summaryrefslogtreecommitdiff
path: root/system/easy-kernel/0100-linux-6.6.6.patch
diff options
context:
space:
mode:
Diffstat (limited to 'system/easy-kernel/0100-linux-6.6.6.patch')
-rw-r--r--system/easy-kernel/0100-linux-6.6.6.patch64421
1 files changed, 64421 insertions, 0 deletions
diff --git a/system/easy-kernel/0100-linux-6.6.6.patch b/system/easy-kernel/0100-linux-6.6.6.patch
new file mode 100644
index 000000000..5192da321
--- /dev/null
+++ b/system/easy-kernel/0100-linux-6.6.6.patch
@@ -0,0 +1,64421 @@
+diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
+index b2ff0012c0f2b..2e24ac3bd7efa 100644
+--- a/Documentation/ABI/testing/sysfs-class-led
++++ b/Documentation/ABI/testing/sysfs-class-led
+@@ -59,15 +59,6 @@ Description:
+ brightness. Reading this file when no hw brightness change
+ event has happened will return an ENODATA error.
+
+-What: /sys/class/leds/<led>/color
+-Date: June 2023
+-KernelVersion: 6.5
+-Description:
+- Color of the LED.
+-
+- This is a read-only file. Reading this file returns the color
+- of the LED as a string (e.g: "red", "green", "multicolor").
+-
+ What: /sys/class/leds/<led>/trigger
+ Date: March 2006
+ KernelVersion: 2.6.17
+diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat
+index ef6d6c57105ef..96834d103a09e 100644
+--- a/Documentation/ABI/testing/sysfs-driver-qat
++++ b/Documentation/ABI/testing/sysfs-driver-qat
+@@ -29,6 +29,8 @@ Description: (RW) Reports the current configuration of the QAT device.
+ services
+ * asym;sym: identical to sym;asym
+ * dc: the device is configured for running compression services
++ * dcc: identical to dc but enables the dc chaining feature,
++ hash then compression. If this is not required chose dc
+ * sym: the device is configured for running symmetric crypto
+ services
+ * asym: the device is configured for running asymmetric crypto
+diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
+index b6cfb51cb0b46..e715bfc09879a 100644
+--- a/Documentation/admin-guide/hw-vuln/srso.rst
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -46,12 +46,22 @@ The possible values in this file are:
+
+ The processor is not vulnerable
+
+- * 'Vulnerable: no microcode':
++* 'Vulnerable':
++
++ The processor is vulnerable and no mitigations have been applied.
++
++ * 'Vulnerable: No microcode':
+
+ The processor is vulnerable, no microcode extending IBPB
+ functionality to address the vulnerability has been applied.
+
+- * 'Mitigation: microcode':
++ * 'Vulnerable: Safe RET, no microcode':
++
++ The "Safe RET" mitigation (see below) has been applied to protect the
++ kernel, but the IBPB-extending microcode has not been applied. User
++ space tasks may still be vulnerable.
++
++ * 'Vulnerable: Microcode, no safe RET':
+
+ Extended IBPB functionality microcode patch has been applied. It does
+ not address User->Kernel and Guest->Host transitions protection but it
+@@ -72,11 +82,11 @@ The possible values in this file are:
+
+ (spec_rstack_overflow=microcode)
+
+- * 'Mitigation: safe RET':
++ * 'Mitigation: Safe RET':
+
+- Software-only mitigation. It complements the extended IBPB microcode
+- patch functionality by addressing User->Kernel and Guest->Host
+- transitions protection.
++ Combined microcode/software mitigation. It complements the
++ extended IBPB microcode patch functionality by addressing
++ User->Kernel and Guest->Host transitions protection.
+
+ Selected by default or by spec_rstack_overflow=safe-ret
+
+@@ -129,7 +139,7 @@ an indrect branch prediction barrier after having applied the required
+ microcode patch for one's system. This mitigation comes also at
+ a performance cost.
+
+-Mitigation: safe RET
++Mitigation: Safe RET
+ --------------------
+
+ The mitigation works by ensuring all RET instructions speculate to
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 0a1731a0f0ef3..41644336e3587 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5858,6 +5858,13 @@
+ This feature may be more efficiently disabled
+ using the csdlock_debug- kernel parameter.
+
++ smp.panic_on_ipistall= [KNL]
++ If a csd_lock_timeout extends for more than
++ the specified number of milliseconds, panic the
++ system. By default, let CSD-lock acquisition
++ take as long as they take. Specifying 300,000
++ for this value provides a 5-minute timeout.
++
+ smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
+ smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
+ smsc-ircc2.ircc_sir= [HW] SIR base I/O port
+diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
+index 294693a8906cf..10540aa7afa1a 100644
+--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
++++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
+@@ -22,8 +22,9 @@ compatible:
+ "mediatek,mt6323" for PMIC MT6323
+ "mediatek,mt6331" for PMIC MT6331 and MT6332
+ "mediatek,mt6357" for PMIC MT6357
+- "mediatek,mt6358" for PMIC MT6358 and MT6366
++ "mediatek,mt6358" for PMIC MT6358
+ "mediatek,mt6359" for PMIC MT6359
++ "mediatek,mt6366", "mediatek,mt6358" for PMIC MT6366
+ "mediatek,mt6397" for PMIC MT6397
+
+ Optional subnodes:
+@@ -40,6 +41,7 @@ Optional subnodes:
+ - compatible: "mediatek,mt6323-regulator"
+ see ../regulator/mt6323-regulator.txt
+ - compatible: "mediatek,mt6358-regulator"
++ - compatible: "mediatek,mt6366-regulator", "mediatek-mt6358-regulator"
+ see ../regulator/mt6358-regulator.txt
+ - compatible: "mediatek,mt6397-regulator"
+ see ../regulator/mt6397-regulator.txt
+diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+index 029569d5fcf35..24c733c10e0e9 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+@@ -32,6 +32,27 @@ properties:
+
+ vdd3-supply: true
+
++ qcom,tune-usb2-disc-thres:
++ $ref: /schemas/types.yaml#/definitions/uint8
++ description: High-Speed disconnect threshold
++ minimum: 0
++ maximum: 7
++ default: 0
++
++ qcom,tune-usb2-amplitude:
++ $ref: /schemas/types.yaml#/definitions/uint8
++ description: High-Speed trasmit amplitude
++ minimum: 0
++ maximum: 15
++ default: 8
++
++ qcom,tune-usb2-preem:
++ $ref: /schemas/types.yaml#/definitions/uint8
++ description: High-Speed TX pre-emphasis tuning
++ minimum: 0
++ maximum: 7
++ default: 5
++
+ required:
+ - compatible
+ - reg
+diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml
+index 303a443d9e29b..9418fd66a8e95 100644
+--- a/Documentation/devicetree/bindings/serial/rs485.yaml
++++ b/Documentation/devicetree/bindings/serial/rs485.yaml
+@@ -29,6 +29,10 @@ properties:
+ default: 0
+ maximum: 100
+
++ rs485-rts-active-high:
++ description: drive RTS high when sending (this is the default).
++ $ref: /schemas/types.yaml#/definitions/flag
++
+ rs485-rts-active-low:
+ description: drive RTS low when sending (default is high).
+ $ref: /schemas/types.yaml#/definitions/flag
+diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml
+index ea277560a5966..5727bd549deca 100644
+--- a/Documentation/devicetree/bindings/serial/serial.yaml
++++ b/Documentation/devicetree/bindings/serial/serial.yaml
+@@ -96,7 +96,7 @@ then:
+ rts-gpios: false
+
+ patternProperties:
+- "^bluetooth|gnss|gps|mcu$":
++ "^(bluetooth|gnss|gps|mcu)$":
+ if:
+ type: object
+ then:
+diff --git a/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml b/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
+index bffdab0b01859..fbac40b958dde 100644
+--- a/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
++++ b/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
+@@ -169,27 +169,27 @@ properties:
+ - const: tgib0
+ - const: tgic0
+ - const: tgid0
+- - const: tgiv0
++ - const: tciv0
+ - const: tgie0
+ - const: tgif0
+ - const: tgia1
+ - const: tgib1
+- - const: tgiv1
+- - const: tgiu1
++ - const: tciv1
++ - const: tciu1
+ - const: tgia2
+ - const: tgib2
+- - const: tgiv2
+- - const: tgiu2
++ - const: tciv2
++ - const: tciu2
+ - const: tgia3
+ - const: tgib3
+ - const: tgic3
+ - const: tgid3
+- - const: tgiv3
++ - const: tciv3
+ - const: tgia4
+ - const: tgib4
+ - const: tgic4
+ - const: tgid4
+- - const: tgiv4
++ - const: tciv4
+ - const: tgiu5
+ - const: tgiv5
+ - const: tgiw5
+@@ -197,18 +197,18 @@ properties:
+ - const: tgib6
+ - const: tgic6
+ - const: tgid6
+- - const: tgiv6
++ - const: tciv6
+ - const: tgia7
+ - const: tgib7
+ - const: tgic7
+ - const: tgid7
+- - const: tgiv7
++ - const: tciv7
+ - const: tgia8
+ - const: tgib8
+ - const: tgic8
+ - const: tgid8
+- - const: tgiv8
+- - const: tgiu8
++ - const: tciv8
++ - const: tciu8
+
+ clocks:
+ maxItems: 1
+@@ -285,16 +285,16 @@ examples:
+ <GIC_SPI 211 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
+- interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tgiv0", "tgie0",
++ interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tciv0", "tgie0",
+ "tgif0",
+- "tgia1", "tgib1", "tgiv1", "tgiu1",
+- "tgia2", "tgib2", "tgiv2", "tgiu2",
+- "tgia3", "tgib3", "tgic3", "tgid3", "tgiv3",
+- "tgia4", "tgib4", "tgic4", "tgid4", "tgiv4",
++ "tgia1", "tgib1", "tciv1", "tciu1",
++ "tgia2", "tgib2", "tciv2", "tciu2",
++ "tgia3", "tgib3", "tgic3", "tgid3", "tciv3",
++ "tgia4", "tgib4", "tgic4", "tgid4", "tciv4",
+ "tgiu5", "tgiv5", "tgiw5",
+- "tgia6", "tgib6", "tgic6", "tgid6", "tgiv6",
+- "tgia7", "tgib7", "tgic7", "tgid7", "tgiv7",
+- "tgia8", "tgib8", "tgic8", "tgid8", "tgiv8", "tgiu8";
++ "tgia6", "tgib6", "tgic6", "tgid6", "tciv6",
++ "tgia7", "tgib7", "tgic7", "tgid7", "tciv7",
++ "tgia8", "tgib8", "tgic8", "tgid8", "tciv8", "tciu8";
+ clocks = <&cpg CPG_MOD R9A07G044_MTU_X_MCK_MTU3>;
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G044_MTU_X_PRESET_MTU3>;
+diff --git a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
+index ff3a1707ef570..6d4cfd943f584 100644
+--- a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
++++ b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
+@@ -36,7 +36,11 @@ properties:
+
+ vdd-supply:
+ description:
+- VDD power supply to the hub
++ 3V3 power supply to the hub
++
++ vdd2-supply:
++ description:
++ 1V2 power supply to the hub
+
+ peer-hub:
+ $ref: /schemas/types.yaml#/definitions/phandle
+@@ -62,6 +66,7 @@ allOf:
+ properties:
+ reset-gpios: false
+ vdd-supply: false
++ vdd2-supply: false
+ peer-hub: false
+ i2c-bus: false
+ else:
+diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst
+index e76e68ccf7182..10eced6c2e462 100644
+--- a/Documentation/i2c/busses/i2c-i801.rst
++++ b/Documentation/i2c/busses/i2c-i801.rst
+@@ -47,6 +47,7 @@ Supported adapters:
+ * Intel Alder Lake (PCH)
+ * Intel Raptor Lake (PCH)
+ * Intel Meteor Lake (SOC and PCH)
++ * Intel Birch Stream (SOC)
+
+ Datasheets: Publicly available at the Intel website
+
+diff --git a/Makefile b/Makefile
+index 5c418efbe89b6..1eefa893f048b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 6
+-SUBLEVEL = 0
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi b/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
+index 42bcbf10957c4..9f9084269ef58 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
++++ b/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
+@@ -181,5 +181,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
+index e04d2e5ea51aa..72e960c888ac8 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
+@@ -85,5 +85,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
+index a399800139d9c..750e17482371c 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
+@@ -88,5 +88,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
+index fad3473810a2e..2bdbc7d18b0eb 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
+@@ -122,5 +122,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
+index 5b2b7b8b3b123..b226bef3369cf 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
+@@ -145,6 +145,14 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
+index d0a26b643b82f..192b8db5a89c3 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
+@@ -145,5 +145,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
+index 9f21d6d6d35b7..0198b5f9e4a75 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
+@@ -81,5 +81,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
+index 2561072917021..73ff1694a4a0b 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
+@@ -148,5 +148,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
+index 707c561703ed8..55fc9f44cbc7f 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
+@@ -227,6 +227,14 @@
+ label = "wan";
+ };
+
++ port@5 {
++ status = "disabled";
++ };
++
++ port@7 {
++ status = "disabled";
++ };
++
+ port@8 {
+ label = "cpu";
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
+index c914569ddd5ec..e6d26987865d0 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
+@@ -144,6 +144,14 @@
+ label = "wan";
+ };
+
++ port@5 {
++ status = "disabled";
++ };
++
++ port@7 {
++ status = "disabled";
++ };
++
+ port@8 {
+ label = "cpu";
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
+index f050acbea0b20..3124dfd01b944 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
+@@ -192,6 +192,14 @@
+ label = "wan";
+ };
+
++ port@5 {
++ status = "disabled";
++ };
++
++ port@7 {
++ status = "disabled";
++ };
++
+ port@8 {
+ label = "cpu";
+ phy-mode = "rgmii";
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
+index e8991d4e248ce..e374062eb5b76 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
+@@ -107,5 +107,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
+index afc635c8cdebb..badafa024d24c 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
+@@ -120,5 +120,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
+index 7cfa4607ef311..cf95af9db1e66 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
+@@ -107,5 +107,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
+index d55e10095eae7..992c19e1cfa17 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
+@@ -75,5 +75,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
+index ccf031c0e276d..4d0ba315a2049 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
+@@ -147,5 +147,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
+index e28f7a3501179..83c429afc2974 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
+@@ -158,5 +158,13 @@
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
+index 03ad614e6b721..0bf5106f7012c 100644
+--- a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
++++ b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
+@@ -124,6 +124,14 @@
+ full-duplex;
+ };
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
+index 26c12bfb0bdd4..25eeacf6a2484 100644
+--- a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
++++ b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
+@@ -185,6 +185,14 @@
+ full-duplex;
+ };
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm953012er.dts b/arch/arm/boot/dts/broadcom/bcm953012er.dts
+index 4fe3b36533767..d939ec9f4a9e7 100644
+--- a/arch/arm/boot/dts/broadcom/bcm953012er.dts
++++ b/arch/arm/boot/dts/broadcom/bcm953012er.dts
+@@ -84,6 +84,14 @@
+ label = "cpu";
+ ethernet = <&gmac0>;
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
+index 884d99297d4cf..f516e0426bb9e 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
++++ b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
+@@ -45,11 +45,11 @@
+
+ event-hall-sensor {
+ label = "Hall Effect Sensor";
+- gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>;
+- interrupts = <&tlmm 110 IRQ_TYPE_EDGE_FALLING>;
++ gpios = <&tlmm 110 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ debounce-interval = <15>;
++ linux,can-disable;
+ wakeup-source;
+ };
+ };
+diff --git a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
+index fc4f52f9e9f7d..63e21aa236429 100644
+--- a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
+@@ -47,14 +47,12 @@
+ };
+ };
+
+- regulators {
+- vsdcc_fixed: vsdcc-regulator {
+- compatible = "regulator-fixed";
+- regulator-name = "SDCC Power";
+- regulator-min-microvolt = <2700000>;
+- regulator-max-microvolt = <2700000>;
+- regulator-always-on;
+- };
++ vsdcc_fixed: vsdcc-regulator {
++ compatible = "regulator-fixed";
++ regulator-name = "SDCC Power";
++ regulator-min-microvolt = <2700000>;
++ regulator-max-microvolt = <2700000>;
++ regulator-always-on;
+ };
+
+ soc: soc {
+diff --git a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
+index c66de9dd12dfc..6a83923aa4612 100644
+--- a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
++++ b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
+@@ -239,7 +239,7 @@
+ };
+
+ keyboard_pins: keyboard {
+- pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_02";
++ pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_2";
+ bias-pull-up;
+ };
+
+diff --git a/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi
+index 65480a9f5cc4e..842f2b17c4a81 100644
+--- a/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi
++++ b/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi
+@@ -376,7 +376,6 @@
+ };
+ };
+
+-
+ ltdc_pins_a: ltdc-0 {
+ pins {
+ pinmux = <STM32_PINMUX('E', 4, AF14)>, /* LCD_B0 */
+diff --git a/arch/arm/boot/dts/ti/omap/am3517-evm.dts b/arch/arm/boot/dts/ti/omap/am3517-evm.dts
+index af9df15274bed..866f68c5b504d 100644
+--- a/arch/arm/boot/dts/ti/omap/am3517-evm.dts
++++ b/arch/arm/boot/dts/ti/omap/am3517-evm.dts
+@@ -271,13 +271,6 @@
+ >;
+ };
+
+- leds_pins: leds-pins {
+- pinctrl-single,pins = <
+- OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu0.gpio_11 */
+- OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu1.gpio_31 */
+- >;
+- };
+-
+ mmc1_pins: mmc1-pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2144, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */
+@@ -355,3 +348,12 @@
+ >;
+ };
+ };
++
++&omap3_pmx_wkup {
++ leds_pins: leds-pins {
++ pinctrl-single,pins = <
++ OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu0.gpio_11 */
++ OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu1.gpio_31 */
++ >;
++ };
++};
+diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
+index 72529f5e2bed9..a41b503b7dcde 100644
+--- a/arch/arm/include/asm/arm_pmuv3.h
++++ b/arch/arm/include/asm/arm_pmuv3.h
+@@ -23,6 +23,8 @@
+ #define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
+ #define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
+ #define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
++#define PMCEID2 __ACCESS_CP15(c9, 0, c14, 4)
++#define PMCEID3 __ACCESS_CP15(c9, 0, c14, 5)
+ #define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
+ #define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
+
+@@ -150,21 +152,6 @@ static inline u64 read_pmccntr(void)
+ return read_sysreg(PMCCNTR);
+ }
+
+-static inline void write_pmxevcntr(u32 val)
+-{
+- write_sysreg(val, PMXEVCNTR);
+-}
+-
+-static inline u32 read_pmxevcntr(void)
+-{
+- return read_sysreg(PMXEVCNTR);
+-}
+-
+-static inline void write_pmxevtyper(u32 val)
+-{
+- write_sysreg(val, PMXEVTYPER);
+-}
+-
+ static inline void write_pmcntenset(u32 val)
+ {
+ write_sysreg(val, PMCNTENSET);
+@@ -205,16 +192,6 @@ static inline void write_pmuserenr(u32 val)
+ write_sysreg(val, PMUSERENR);
+ }
+
+-static inline u32 read_pmceid0(void)
+-{
+- return read_sysreg(PMCEID0);
+-}
+-
+-static inline u32 read_pmceid1(void)
+-{
+- return read_sysreg(PMCEID1);
+-}
+-
+ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
+ static inline void kvm_clr_pmu_events(u32 clr) {}
+ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
+@@ -231,6 +208,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {}
+
+ /* PMU Version in DFR Register */
+ #define ARMV8_PMU_DFR_VER_NI 0
++#define ARMV8_PMU_DFR_VER_V3P1 0x4
+ #define ARMV8_PMU_DFR_VER_V3P4 0x5
+ #define ARMV8_PMU_DFR_VER_V3P5 0x6
+ #define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
+@@ -251,4 +229,24 @@ static inline bool is_pmuv3p5(int pmuver)
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
+ }
+
++static inline u64 read_pmceid0(void)
++{
++ u64 val = read_sysreg(PMCEID0);
++
++ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
++ val |= (u64)read_sysreg(PMCEID2) << 32;
++
++ return val;
++}
++
++static inline u64 read_pmceid1(void)
++{
++ u64 val = read_sysreg(PMCEID1);
++
++ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
++ val |= (u64)read_sysreg(PMCEID3) << 32;
++
++ return val;
++}
++
+ #endif
+diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
+index c6aded1b069cf..e2a1916013e75 100644
+--- a/arch/arm/include/asm/dma.h
++++ b/arch/arm/include/asm/dma.h
+@@ -12,6 +12,9 @@
+ extern phys_addr_t arm_dma_zone_size; \
+ arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
+ (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
++
++extern phys_addr_t arm_dma_limit;
++#define ARCH_LOW_ADDRESS_LIMIT arm_dma_limit
+ #endif
+
+ #ifdef CONFIG_ISA_DMA_API
+diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
+index 58e039a851af0..3c82975d46db3 100644
+--- a/arch/arm/include/asm/exception.h
++++ b/arch/arm/include/asm/exception.h
+@@ -10,10 +10,6 @@
+
+ #include <linux/interrupt.h>
+
+-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ #define __exception_irq_entry __irq_entry
+-#else
+-#define __exception_irq_entry
+-#endif
+
+ #endif /* __ASM_ARM_EXCEPTION_H */
+diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
+index d71ab61430b26..de75ae4d5ab41 100644
+--- a/arch/arm/lib/memset.S
++++ b/arch/arm/lib/memset.S
+@@ -17,6 +17,7 @@ ENTRY(__memset)
+ ENTRY(mmioset)
+ WEAK(memset)
+ UNWIND( .fnstart )
++ and r1, r1, #255 @ cast to unsigned char
+ ands r3, r0, #3 @ 1 unaligned?
+ mov ip, r0 @ preserve r0 as return value
+ bne 6f @ 1
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index c392e18f1e431..a395b6c0aae2a 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu)
+ BUG_ON(err);
+ per_cpu(xen_vcpu, cpu) = vcpup;
+
+- if (!xen_kernel_unmapped_at_usr())
+- xen_setup_runstate_info(cpu);
+-
+ after_register_vcpu_info:
+ enable_percpu_irq(xen_events_irq, 0);
+ return 0;
+@@ -487,7 +484,8 @@ static int __init xen_guest_init(void)
+ * for secondary CPUs as they are brought up.
+ * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
+ */
+- xen_vcpu_info = alloc_percpu(struct vcpu_info);
++ xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
++ 1 << fls(sizeof(struct vcpu_info) - 1));
+ if (xen_vcpu_info == NULL)
+ return -ENOMEM;
+
+@@ -523,9 +521,6 @@ static int __init xen_guest_init(void)
+ return -EINVAL;
+ }
+
+- if (!xen_kernel_unmapped_at_usr())
+- xen_time_setup_guest();
+-
+ if (xen_initial_domain())
+ pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
+
+@@ -535,7 +530,13 @@ static int __init xen_guest_init(void)
+ }
+ early_initcall(xen_guest_init);
+
+-static int __init xen_pm_init(void)
++static int xen_starting_runstate_cpu(unsigned int cpu)
++{
++ xen_setup_runstate_info(cpu);
++ return 0;
++}
++
++static int __init xen_late_init(void)
+ {
+ if (!xen_domain())
+ return -ENODEV;
+@@ -548,9 +549,16 @@ static int __init xen_pm_init(void)
+ do_settimeofday64(&ts);
+ }
+
+- return 0;
++ if (xen_kernel_unmapped_at_usr())
++ return 0;
++
++ xen_time_setup_guest();
++
++ return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
++ "arm/xen_runstate:starting",
++ xen_starting_runstate_cpu, NULL);
+ }
+-late_initcall(xen_pm_init);
++late_initcall(xen_late_init);
+
+
+ /* empty stubs */
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 78f20e6327120..6062a52a084ff 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1368,6 +1368,8 @@ choice
+ config CPU_BIG_ENDIAN
+ bool "Build big-endian kernel"
+ depends on !LD_IS_LLD || LLD_VERSION >= 130000
++ # https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
++ depends on AS_IS_GNU || AS_VERSION >= 150000
+ help
+ Say Y if you plan on running a kernel with a big-endian userspace.
+
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+index d2f5345d05600..717288bbdb8b6 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+@@ -1186,26 +1186,34 @@
+ dma-coherent;
+ };
+
+- usb0: usb@3100000 {
+- status = "disabled";
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3100000 0x0 0x10000>;
+- interrupts = <0 80 0x4>; /* Level high type */
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
+- };
++ bus: bus {
++ #address-cells = <2>;
++ #size-cells = <2>;
++ compatible = "simple-bus";
++ ranges;
++ dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
++
++ usb0: usb@3100000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <0 80 0x4>; /* Level high type */
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ status = "disabled";
++ };
+
+- usb1: usb@3110000 {
+- status = "disabled";
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3110000 0x0 0x10000>;
+- interrupts = <0 81 0x4>; /* Level high type */
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ usb1: usb@3110000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3110000 0x0 0x10000>;
++ interrupts = <0 81 0x4>; /* Level high type */
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ status = "disabled";
++ };
+ };
+
+ ccn@4000000 {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index 236fe44f779df..738024baaa578 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -399,6 +399,7 @@
+ "pll8k", "pll11k", "clkext3";
+ dmas = <&sdma2 24 25 0x80000000>;
+ dma-names = "rx";
++ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index aa38dd6dc9ba5..1bb1d0c1bae4d 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -371,6 +371,7 @@
+ "pll8k", "pll11k", "clkext3";
+ dmas = <&sdma2 24 25 0x80000000>;
+ dma-names = "rx";
++ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
+index 28db9349ed62c..267ceffc02d84 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
+@@ -284,7 +284,6 @@
+ usb_hub_2_x: hub@1 {
+ compatible = "usbbda,5411";
+ reg = <1>;
+- reset-gpios = <&gpio4 25 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_usb_hub>;
+ peer-hub = <&usb_hub_3_x>;
+ };
+@@ -293,7 +292,6 @@
+ usb_hub_3_x: hub@2 {
+ compatible = "usbbda,411";
+ reg = <2>;
+- reset-gpios = <&gpio4 25 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_usb_hub>;
+ peer-hub = <&usb_hub_2_x>;
+ };
+@@ -443,7 +441,6 @@
+ pinctrl_usb1: usb1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR 0x10
+- MX8MP_IOMUXC_SAI2_TXC__GPIO4_IO25 0x19
+ >;
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
+index 7764b4146e0ab..2bbdacb1313f9 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
+@@ -8,5 +8,5 @@
+ };
+
+ &jpegenc {
+- compatible = "nxp,imx8qm-jpgdec", "nxp,imx8qxp-jpgenc";
++ compatible = "nxp,imx8qm-jpgenc", "nxp,imx8qxp-jpgenc";
+ };
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+index 32cfb3e2efc3a..47d45ff3d6f57 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+@@ -120,7 +120,7 @@
+ "mpp59", "mpp60", "mpp61";
+ marvell,function = "sdio";
+ };
+- cp0_spi0_pins: cp0-spi-pins-0 {
++ cp0_spi1_pins: cp0-spi-pins-1 {
+ marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ marvell,function = "spi1";
+ };
+@@ -170,7 +170,7 @@
+
+ &cp0_spi1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&cp0_spi0_pins>;
++ pinctrl-0 = <&cp0_spi1_pins>;
+ reg = <0x700680 0x50>, /* control */
+ <0x2000000 0x1000000>; /* CS0 */
+ status = "okay";
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+index c7de1ea0d470a..6eb6a175de38d 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+@@ -307,7 +307,7 @@
+ &cp0_spi1 {
+ status = "disabled";
+ pinctrl-names = "default";
+- pinctrl-0 = <&cp0_spi0_pins>;
++ pinctrl-0 = <&cp0_spi1_pins>;
+ reg = <0x700680 0x50>;
+
+ flash@0 {
+@@ -371,7 +371,7 @@
+ "mpp59", "mpp60", "mpp61";
+ marvell,function = "sdio";
+ };
+- cp0_spi0_pins: cp0-spi-pins-0 {
++ cp0_spi1_pins: cp0-spi-pins-1 {
+ marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ marvell,function = "spi1";
+ };
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
+index 5f592f1d81e2e..fe08e131b7b9e 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
+@@ -28,7 +28,7 @@
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+- spi-max-frequency = <136000000>;
++ spi-max-frequency = <102000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+@@ -42,7 +42,7 @@
+ mmc@3400000 {
+ status = "okay";
+ bus-width = <4>;
+- cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_HIGH>;
++ cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>;
+ disable-wp;
+ };
+
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index 95524e5bce826..ac69eacf8a6ba 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -43,12 +43,12 @@
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+index 4f5541e9be0e9..dabe9f42a63ad 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+@@ -172,6 +172,9 @@
+ pd-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>;
+
+ avdd-supply = <&pm8916_l6>;
++ a2vdd-supply = <&pm8916_l6>;
++ dvdd-supply = <&pm8916_l6>;
++ pvdd-supply = <&pm8916_l6>;
+ v1p2-supply = <&pm8916_l6>;
+ v3p3-supply = <&pm8916_l17>;
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+index 8bfc2db44624a..e40c55adff23d 100644
+--- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+@@ -135,7 +135,7 @@
+ reg = <0x0 0x4a800000 0x0 0x100000>;
+ no-map;
+
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index 47b8b1d6730ac..264845cecf925 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -211,7 +211,7 @@
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_region>;
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ soc: soc@0 {
+@@ -393,7 +393,7 @@
+
+ tcsr_mutex: hwlock@1905000 {
+ compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
+- reg = <0x0 0x01905000 0x0 0x1000>;
++ reg = <0x0 0x01905000 0x0 0x20000>;
+ #hwlock-cells = <1>;
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index 00ed71936b472..92fd924bbdbe5 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -101,7 +101,7 @@
+ reg = <0x0 0x4ab00000 0x0 0x100000>;
+ no-map;
+
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ memory@4ac00000 {
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 51aba071c1eb3..8a72ad4afd032 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -195,7 +195,7 @@
+ smem@4aa00000 {
+ compatible = "qcom,smem";
+ reg = <0x0 0x4aa00000 0x0 0x100000>;
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ no-map;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 33fb65d731046..3c934363368c3 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1813,7 +1813,7 @@
+ #size-cells = <1>;
+ #iommu-cells = <1>;
+ compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
+- ranges = <0 0x01e20000 0x40000>;
++ ranges = <0 0x01e20000 0x20000>;
+ reg = <0x01ef0000 0x3000>;
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_APSS_TCU_CLK>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+index 6e24f0f2374fe..5a6b1942cfaa5 100644
+--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+@@ -1447,7 +1447,7 @@
+ apps_iommu: iommu@1ef0000 {
+ compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
+ reg = <0x01ef0000 0x3000>;
+- ranges = <0 0x01e20000 0x40000>;
++ ranges = <0 0x01e20000 0x20000>;
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_APSS_TCU_CLK>;
+ clock-names = "iface", "bus";
+diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+index f9f5afbcc52bb..4c5be22b47fee 100644
+--- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+@@ -379,7 +379,7 @@
+ smp2p-modem {
+ compatible = "qcom,smp2p";
+ interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
+- qcom,ipc = <&apcs 8 13>;
++ qcom,ipc = <&apcs 8 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+@@ -402,7 +402,7 @@
+ smp2p-wcnss {
+ compatible = "qcom,smp2p";
+ interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>;
+- qcom,ipc = <&apcs 8 17>;
++ qcom,ipc = <&apcs 8 18>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <4>;
+@@ -428,9 +428,9 @@
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- qcom,ipc-1 = <&apcs 8 12>;
++ qcom,ipc-1 = <&apcs 8 13>;
+ qcom,ipc-2 = <&apcs 8 9>;
+- qcom,ipc-3 = <&apcs 8 18>;
++ qcom,ipc-3 = <&apcs 8 19>;
+
+ apps_smsm: apps@0 {
+ reg = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+index fcca1ba94da69..5fe5de9ceef99 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
++++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+@@ -109,11 +109,6 @@
+ qcom,client-id = <1>;
+ };
+
+- audio_mem: audio@cb400000 {
+- reg = <0 0xcb000000 0 0x400000>;
+- no-mem;
+- };
+-
+ qseecom_mem: qseecom@cb400000 {
+ reg = <0 0xcb400000 0 0x1c00000>;
+ no-mem;
+diff --git a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+index eadba066972e8..0f7c591878962 100644
+--- a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
++++ b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+@@ -13,7 +13,7 @@
+ compatible = "qcom,qrb2210-rb1", "qcom,qrb2210", "qcom,qcm2290";
+
+ aliases {
+- serial0 = &uart0;
++ serial0 = &uart4;
+ sdhc1 = &sdhc_1;
+ sdhc2 = &sdhc_2;
+ };
+@@ -150,15 +150,15 @@
+
+ pm2250_s3: s3 {
+ /* 0.4V-1.6625V -> 1.3V (Power tree requirements) */
+- regulator-min-microvolts = <1350000>;
+- regulator-max-microvolts = <1350000>;
++ regulator-min-microvolt = <1352000>;
++ regulator-max-microvolt = <1352000>;
+ regulator-boot-on;
+ };
+
+ pm2250_s4: s4 {
+ /* 1.2V-2.35V -> 2.05V (Power tree requirements) */
+- regulator-min-microvolts = <2072000>;
+- regulator-max-microvolts = <2072000>;
++ regulator-min-microvolt = <2072000>;
++ regulator-max-microvolt = <2072000>;
+ regulator-boot-on;
+ };
+
+@@ -166,47 +166,47 @@
+
+ pm2250_l2: l2 {
+ /* LPDDR4X VDD2 */
+- regulator-min-microvolts = <1136000>;
+- regulator-max-microvolts = <1136000>;
++ regulator-min-microvolt = <1136000>;
++ regulator-max-microvolt = <1136000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm2250_l3: l3 {
+ /* LPDDR4X VDDQ */
+- regulator-min-microvolts = <616000>;
+- regulator-max-microvolts = <616000>;
++ regulator-min-microvolt = <616000>;
++ regulator-max-microvolt = <616000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm2250_l4: l4 {
+- /* max = 3.05V -> max = just below 3V (SDHCI2) */
+- regulator-min-microvolts = <1648000>;
+- regulator-max-microvolts = <2992000>;
++ /* max = 3.05V -> max = 2.7 to disable 3V signaling (SDHCI2) */
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <2700000>;
+ regulator-allow-set-load;
+ };
+
+ pm2250_l5: l5 {
+ /* CSI/DSI */
+- regulator-min-microvolts = <1232000>;
+- regulator-max-microvolts = <1232000>;
++ regulator-min-microvolt = <1232000>;
++ regulator-max-microvolt = <1232000>;
+ regulator-allow-set-load;
+ regulator-boot-on;
+ };
+
+ pm2250_l6: l6 {
+ /* DRAM PLL */
+- regulator-min-microvolts = <928000>;
+- regulator-max-microvolts = <928000>;
++ regulator-min-microvolt = <928000>;
++ regulator-max-microvolt = <928000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm2250_l7: l7 {
+ /* Wi-Fi CX/MX */
+- regulator-min-microvolts = <664000>;
+- regulator-max-microvolts = <664000>;
++ regulator-min-microvolt = <664000>;
++ regulator-max-microvolt = <664000>;
+ };
+
+ /*
+@@ -216,37 +216,37 @@
+
+ pm2250_l10: l10 {
+ /* Wi-Fi RFA */
+- regulator-min-microvolts = <1300000>;
+- regulator-max-microvolts = <1300000>;
++ regulator-min-microvolt = <1304000>;
++ regulator-max-microvolt = <1304000>;
+ };
+
+ pm2250_l11: l11 {
+ /* GPS RF1 */
+- regulator-min-microvolts = <1000000>;
+- regulator-max-microvolts = <1000000>;
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ };
+
+ pm2250_l12: l12 {
+ /* USB PHYs */
+- regulator-min-microvolts = <928000>;
+- regulator-max-microvolts = <928000>;
++ regulator-min-microvolt = <928000>;
++ regulator-max-microvolt = <928000>;
+ regulator-allow-set-load;
+ regulator-boot-on;
+ };
+
+ pm2250_l13: l13 {
+ /* USB/QFPROM/PLLs */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ regulator-boot-on;
+ };
+
+ pm2250_l14: l14 {
+ /* SDHCI1 VQMMC */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ /* Broken hardware, never turn it off! */
+ regulator-always-on;
+@@ -254,8 +254,8 @@
+
+ pm2250_l15: l15 {
+ /* WCD/DSI/BT VDDIO */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ regulator-always-on;
+ regulator-boot-on;
+@@ -263,47 +263,47 @@
+
+ pm2250_l16: l16 {
+ /* GPS RF2 */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ pm2250_l17: l17 {
+- regulator-min-microvolts = <3000000>;
+- regulator-max-microvolts = <3000000>;
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
+ };
+
+ pm2250_l18: l18 {
+ /* VDD_PXn */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ };
+
+ pm2250_l19: l19 {
+ /* VDD_PXn */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ };
+
+ pm2250_l20: l20 {
+ /* SDHCI1 VMMC */
+- regulator-min-microvolts = <2856000>;
+- regulator-max-microvolts = <2856000>;
++ regulator-min-microvolt = <2400000>;
++ regulator-max-microvolt = <3600000>;
+ regulator-allow-set-load;
+ };
+
+ pm2250_l21: l21 {
+ /* SDHCI2 VMMC */
+- regulator-min-microvolts = <2960000>;
+- regulator-max-microvolts = <3300000>;
++ regulator-min-microvolt = <2960000>;
++ regulator-max-microvolt = <3300000>;
+ regulator-allow-set-load;
+ regulator-boot-on;
+ };
+
+ pm2250_l22: l22 {
+ /* Wi-Fi */
+- regulator-min-microvolts = <3312000>;
+- regulator-max-microvolts = <3312000>;
++ regulator-min-microvolt = <3312000>;
++ regulator-max-microvolt = <3312000>;
+ };
+ };
+ };
+@@ -357,7 +357,7 @@
+ };
+
+ /* UART connected to the Micro-USB port via a FTDI chip */
+-&uart0 {
++&uart4 {
+ compatible = "qcom,geni-debug-uart";
+ status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 925428a5f6aea..91bb58c6b1a61 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -649,18 +649,6 @@
+ };
+ };
+
+- eud_typec: connector {
+- compatible = "usb-c-connector";
+-
+- ports {
+- port@0 {
+- con_eud: endpoint {
+- remote-endpoint = <&eud_con>;
+- };
+- };
+- };
+- };
+-
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+@@ -869,7 +857,8 @@
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>,
+ <0>, <&pcie1_lane>,
+- <0>, <0>, <0>, <0>;
++ <0>, <0>, <0>,
++ <&usb_1_ssphy>;
+ clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk",
+ "pcie_0_pipe_clk", "pcie_1_pipe_clk",
+ "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk",
+@@ -3624,6 +3613,8 @@
+ <0 0x88e2000 0 0x1000>;
+ interrupts-extended = <&pdc 11 IRQ_TYPE_LEVEL_HIGH>;
+
++ status = "disabled";
++
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -3634,13 +3625,6 @@
+ remote-endpoint = <&usb2_role_switch>;
+ };
+ };
+-
+- port@1 {
+- reg = <1>;
+- eud_con: endpoint {
+- remote-endpoint = <&con_eud>;
+- };
+- };
+ };
+ };
+
+@@ -5363,6 +5347,14 @@
+ reg = <0 0x18591000 0 0x1000>,
+ <0 0x18592000 0 0x1000>,
+ <0 0x18593000 0 0x1000>;
++
++ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "dcvsh-irq-0",
++ "dcvsh-irq-1",
++ "dcvsh-irq-2";
++
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ clock-names = "xo", "alternate";
+ #freq-domain-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
+index 84cd2e39266fe..ba2043d67370a 100644
+--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
+@@ -1328,7 +1328,8 @@
+ compatible = "qcom,sdm670-pdc", "qcom,pdc";
+ reg = <0 0x0b220000 0 0x30000>;
+ qcom,pdc-ranges = <0 480 40>, <41 521 7>, <49 529 4>,
+- <54 534 24>, <79 559 30>, <115 630 7>;
++ <54 534 24>, <79 559 15>, <94 609 15>,
++ <115 630 7>;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+index f86e7acdfd99f..0ab5e8f53ac9f 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+@@ -143,16 +143,20 @@
+ };
+ };
+
++&cpufreq_hw {
++ /delete-property/ interrupts-extended; /* reference to lmh_cluster[01] */
++};
++
+ &psci {
+- /delete-node/ cpu0;
+- /delete-node/ cpu1;
+- /delete-node/ cpu2;
+- /delete-node/ cpu3;
+- /delete-node/ cpu4;
+- /delete-node/ cpu5;
+- /delete-node/ cpu6;
+- /delete-node/ cpu7;
+- /delete-node/ cpu-cluster0;
++ /delete-node/ power-domain-cpu0;
++ /delete-node/ power-domain-cpu1;
++ /delete-node/ power-domain-cpu2;
++ /delete-node/ power-domain-cpu3;
++ /delete-node/ power-domain-cpu4;
++ /delete-node/ power-domain-cpu5;
++ /delete-node/ power-domain-cpu6;
++ /delete-node/ power-domain-cpu7;
++ /delete-node/ power-domain-cluster;
+ };
+
+ &cpus {
+@@ -275,6 +279,14 @@
+ &CLUSTER_SLEEP_0>;
+ };
+
++&lmh_cluster0 {
++ status = "disabled";
++};
++
++&lmh_cluster1 {
++ status = "disabled";
++};
++
+ /*
+ * Reserved memory changes
+ *
+@@ -338,6 +350,8 @@
+
+
+ &apps_rsc {
++ /delete-property/ power-domains;
++
+ regulators-0 {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+index b3c27a5247429..1516113391edc 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+@@ -716,6 +716,8 @@
+ vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
++
++ qcom,snoc-host-cap-8bit-quirk;
+ };
+
+ /* PINCTRL - additions to nodes defined in sdm845.dtsi */
+diff --git a/arch/arm64/boot/dts/qcom/sdx75-idp.dts b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
+index 10d15871f2c48..a14e0650c4a8a 100644
+--- a/arch/arm64/boot/dts/qcom/sdx75-idp.dts
++++ b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
+@@ -44,7 +44,7 @@
+ };
+
+ &apps_rsc {
+- pmx75-rpmh-regulators {
++ regulators-0 {
+ compatible = "qcom,pmx75-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+index d7c1a40617c64..197f8fed19a29 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+@@ -1208,7 +1208,7 @@
+
+ apps_smmu: iommu@c600000 {
+ compatible = "qcom,sm6125-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+- reg = <0xc600000 0x80000>;
++ reg = <0x0c600000 0x80000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index 06c53000bb74d..19c6003dca153 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1893,8 +1893,12 @@
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
++ <&gcc GCC_PCIE_0_CLKREF_CLK>,
+ <&gcc GCC_PCIE0_PHY_REFGEN_CLK>;
+- clock-names = "aux", "cfg_ahb", "refgen";
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "refgen";
+
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "phy";
+@@ -1991,8 +1995,12 @@
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
++ <&gcc GCC_PCIE_1_CLKREF_CLK>,
+ <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
+- clock-names = "aux", "cfg_ahb", "refgen";
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "refgen";
+
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "phy";
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 00604bf7724f4..a94e069da83d5 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -2964,7 +2964,7 @@
+ };
+
+ qup_uart18_default: qup-uart18-default-state {
+- pins = "gpio58", "gpio59";
++ pins = "gpio68", "gpio69";
+ function = "qup18";
+ drive-strength = <2>;
+ bias-disable;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
+index 0bd80e5157544..97af4f9128285 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
+@@ -137,6 +137,18 @@
+ vin-supply = <&vcc5v0_sys>;
+ };
+
++ vcc3v3_pcie2x1l0: vcc3v3-pcie2x1l0-regulator {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pcie_m2_1_pwren>;
++ regulator-name = "vcc3v3_pcie2x1l0";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ vin-supply = <&vcc5v0_sys>;
++ };
++
+ vcc3v3_pcie30: vcc3v3-pcie30-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+@@ -421,6 +433,14 @@
+ status = "okay";
+ };
+
++&pcie2x1l1 {
++ reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
++ vpcie3v3-supply = <&vcc3v3_pcie2x1l0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pcie2_1_rst>;
++ status = "okay";
++};
++
+ &pcie2x1l2 {
+ reset-gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_pcie20>;
+@@ -467,6 +487,10 @@
+ rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
++ pcie2_1_rst: pcie2-1-rst {
++ rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
++ };
++
+ pcie2_2_rst: pcie2-2-rst {
+ rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+@@ -474,6 +498,10 @@
+ pcie_m2_0_pwren: pcie-m20-pwren {
+ rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
++
++ pcie_m2_1_pwren: pcie-m21-pwren {
++ rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
++ };
+ };
+
+ usb {
+diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
+index e7b8e2e7f083d..8bd5acc6d6835 100644
+--- a/arch/arm64/boot/dts/ti/Makefile
++++ b/arch/arm64/boot/dts/ti/Makefile
+@@ -9,6 +9,8 @@
+ # alphabetically.
+
+ # Boards with AM62x SoC
++k3-am625-sk-hdmi-audio-dtbs := k3-am625-sk.dtb k3-am62x-sk-hdmi-audio.dtbo
++k3-am62-lp-sk-hdmi-audio-dtbs := k3-am62-lp-sk.dtb k3-am62x-sk-hdmi-audio.dtbo
+ dtb-$(CONFIG_ARCH_K3) += k3-am625-beagleplay.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am625-phyboard-lyra-rdk.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am625-sk.dtb
+@@ -19,7 +21,8 @@ dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-dahlia.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-dev.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-yavia.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk.dtb
+-dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-hdmi-audio.dtbo
++dtb-$(CONFIG_ARCH_K3) += k3-am625-sk-hdmi-audio.dtb
++dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk-hdmi-audio.dtb
+
+ # Boards with AM62Ax SoC
+ dtb-$(CONFIG_ARCH_K3) += k3-am62a7-sk.dtb
+@@ -66,6 +69,8 @@ dtb-$(CONFIG_ARCH_K3) += k3-j721e-sk.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am68-sk-base-board.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-j721s2-common-proc-board.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm-gesi-exp-board.dtbo
++k3-j721s2-evm-dtbs := k3-j721s2-common-proc-board.dtb k3-j721s2-evm-gesi-exp-board.dtbo
++dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm.dtb
+
+ # Boards with J784s4 SoC
+ dtb-$(CONFIG_ARCH_K3) += k3-am69-sk.dtb
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+index 40992e7e4c308..5db52f2372534 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+@@ -1061,6 +1061,7 @@
+ vddc-supply = <&reg_1v2_dsi>;
+ vddmipi-supply = <&reg_1v2_dsi>;
+ vddio-supply = <&reg_1v8_dsi>;
++ status = "disabled";
+
+ dsi_bridge_ports: ports {
+ #address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+index 7cfdf562b53bf..2de74428a8bde 100644
+--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
++++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+@@ -58,7 +58,7 @@
+
+ ramoops: ramoops@9ca00000 {
+ compatible = "ramoops";
+- reg = <0x00 0x9c700000 0x00 0x00100000>;
++ reg = <0x00 0x9ca00000 0x00 0x00100000>;
+ record-size = <0x8000>;
+ console-size = <0x8000>;
+ ftrace-size = <0x00>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+index cff283c75f8ec..99f2878de4c67 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+@@ -250,7 +250,7 @@
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c1_pins_default>;
+- clock-frequency = <400000>;
++ clock-frequency = <100000>;
+
+ exp1: gpio@22 {
+ compatible = "ti,tca6424";
+diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
+index 18dc2fb3d7b7b..c27404fa4418a 100644
+--- a/arch/arm64/include/asm/arm_pmuv3.h
++++ b/arch/arm64/include/asm/arm_pmuv3.h
+@@ -46,12 +46,12 @@ static inline u32 read_pmuver(void)
+ ID_AA64DFR0_EL1_PMUVer_SHIFT);
+ }
+
+-static inline void write_pmcr(u32 val)
++static inline void write_pmcr(u64 val)
+ {
+ write_sysreg(val, pmcr_el0);
+ }
+
+-static inline u32 read_pmcr(void)
++static inline u64 read_pmcr(void)
+ {
+ return read_sysreg(pmcr_el0);
+ }
+@@ -71,21 +71,6 @@ static inline u64 read_pmccntr(void)
+ return read_sysreg(pmccntr_el0);
+ }
+
+-static inline void write_pmxevcntr(u32 val)
+-{
+- write_sysreg(val, pmxevcntr_el0);
+-}
+-
+-static inline u32 read_pmxevcntr(void)
+-{
+- return read_sysreg(pmxevcntr_el0);
+-}
+-
+-static inline void write_pmxevtyper(u32 val)
+-{
+- write_sysreg(val, pmxevtyper_el0);
+-}
+-
+ static inline void write_pmcntenset(u32 val)
+ {
+ write_sysreg(val, pmcntenset_el0);
+@@ -106,7 +91,7 @@ static inline void write_pmintenclr(u32 val)
+ write_sysreg(val, pmintenclr_el1);
+ }
+
+-static inline void write_pmccfiltr(u32 val)
++static inline void write_pmccfiltr(u64 val)
+ {
+ write_sysreg(val, pmccfiltr_el0);
+ }
+@@ -126,12 +111,12 @@ static inline void write_pmuserenr(u32 val)
+ write_sysreg(val, pmuserenr_el0);
+ }
+
+-static inline u32 read_pmceid0(void)
++static inline u64 read_pmceid0(void)
+ {
+ return read_sysreg(pmceid0_el0);
+ }
+
+-static inline u32 read_pmceid1(void)
++static inline u64 read_pmceid1(void)
+ {
+ return read_sysreg(pmceid1_el0);
+ }
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 74d00feb62f03..7c7493cb571f9 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -86,7 +86,8 @@
+ #define ARM_CPU_PART_NEOVERSE_N2 0xD49
+ #define ARM_CPU_PART_CORTEX_A78C 0xD4B
+
+-#define APM_CPU_PART_POTENZA 0x000
++#define APM_CPU_PART_XGENE 0x000
++#define APM_CPU_VAR_POTENZA 0x00
+
+ #define CAVIUM_CPU_PART_THUNDERX 0x0A1
+ #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
+diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
+index f4af547ef54ca..2e4d7da74fb87 100644
+--- a/arch/arm64/include/asm/setup.h
++++ b/arch/arm64/include/asm/setup.h
+@@ -21,9 +21,22 @@ static inline bool arch_parse_debug_rodata(char *arg)
+ extern bool rodata_enabled;
+ extern bool rodata_full;
+
+- if (arg && !strcmp(arg, "full")) {
++ if (!arg)
++ return false;
++
++ if (!strcmp(arg, "full")) {
++ rodata_enabled = rodata_full = true;
++ return true;
++ }
++
++ if (!strcmp(arg, "off")) {
++ rodata_enabled = rodata_full = false;
++ return true;
++ }
++
++ if (!strcmp(arg, "on")) {
+ rodata_enabled = true;
+- rodata_full = true;
++ rodata_full = false;
+ return true;
+ }
+
+diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
+index bd69a4e7cd605..79200f21e1239 100644
+--- a/arch/arm64/kernel/module-plts.c
++++ b/arch/arm64/kernel/module-plts.c
+@@ -167,9 +167,6 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
+ switch (ELF64_R_TYPE(rela[i].r_info)) {
+ case R_AARCH64_JUMP26:
+ case R_AARCH64_CALL26:
+- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+- break;
+-
+ /*
+ * We only have to consider branch targets that resolve
+ * to symbols that are defined in a different section.
+@@ -269,9 +266,6 @@ static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
+ {
+ int i = 0, j = numrels - 1;
+
+- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+- return 0;
+-
+ while (i < j) {
+ if (branch_rela_needs_plt(syms, &rela[i], dstidx))
+ i++;
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index 95f6945c44325..a1710e5fa72b6 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -874,7 +874,7 @@ u32 __attribute_const__ kvm_target_cpu(void)
+ break;
+ case ARM_CPU_IMP_APM:
+ switch (part_number) {
+- case APM_CPU_PART_POTENZA:
++ case APM_CPU_PART_XGENE:
+ return KVM_ARM_TARGET_XGENE_POTENZA;
+ }
+ break;
+diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
+index 8e2017ba5f1b1..924843f1f661b 100644
+--- a/arch/arm64/mm/pageattr.c
++++ b/arch/arm64/mm/pageattr.c
+@@ -29,8 +29,8 @@ bool can_set_direct_map(void)
+ *
+ * KFENCE pool requires page-granular mapping if initialized late.
+ */
+- return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
+- arm64_kfence_can_set_direct_map();
++ return rodata_full || debug_pagealloc_enabled() ||
++ arm64_kfence_can_set_direct_map();
+ }
+
+ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
+@@ -105,8 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
+ * If we are manipulating read-only permissions, apply the same
+ * change to the linear mapping of the pages that back this VM area.
+ */
+- if (rodata_enabled &&
+- rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
++ if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
+ pgprot_val(clear_mask) == PTE_RDONLY)) {
+ for (i = 0; i < area->nr_pages; i++) {
+ __change_memory_common((u64)page_address(area->pages[i]),
+diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
+index b9f567e660166..ed5da02b1cf6f 100644
+--- a/arch/loongarch/include/asm/percpu.h
++++ b/arch/loongarch/include/asm/percpu.h
+@@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off)
+ #define __my_cpu_offset __my_cpu_offset
+
+ #define PERCPU_OP(op, asm_op, c_op) \
+-static inline unsigned long __percpu_##op(void *ptr, \
++static __always_inline unsigned long __percpu_##op(void *ptr, \
+ unsigned long val, int size) \
+ { \
+ unsigned long ret; \
+@@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
+ PERCPU_OP(or, or, |)
+ #undef PERCPU_OP
+
+-static inline unsigned long __percpu_read(void *ptr, int size)
++static __always_inline unsigned long __percpu_read(void *ptr, int size)
+ {
+ unsigned long ret;
+
+@@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
+ return ret;
+ }
+
+-static inline void __percpu_write(void *ptr, unsigned long val, int size)
++static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
+ {
+ switch (size) {
+ case 1:
+@@ -132,8 +132,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
+ }
+ }
+
+-static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+- int size)
++static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
++ int size)
+ {
+ switch (size) {
+ case 1:
+diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
+index 02042100e2671..7f830634dbe7d 100644
+--- a/arch/mips/mm/cache.c
++++ b/arch/mips/mm/cache.c
+@@ -117,7 +117,7 @@ void __flush_dcache_pages(struct page *page, unsigned int nr)
+ * get faulted into the tlb (and thus flushed) anyways.
+ */
+ for (i = 0; i < nr; i++) {
+- addr = (unsigned long)kmap_local_page(page + i);
++ addr = (unsigned long)kmap_local_page(nth_page(page, i));
+ flush_data_cache_page(addr);
+ kunmap_local((void *)addr);
+ }
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index a15ab147af2e0..68cbe666510a3 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -138,11 +138,11 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
+ default 8
+
+ config ARCH_MMAP_RND_BITS_MAX
+- default 24 if 64BIT
+- default 17
++ default 18 if 64BIT
++ default 13
+
+ config ARCH_MMAP_RND_COMPAT_BITS_MAX
+- default 17
++ default 13
+
+ # unless you want to implement ACPI on PA-RISC ... ;-)
+ config PM
+diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h
+index 1ed45fd085d3b..1eb488f25b838 100644
+--- a/arch/parisc/include/asm/alternative.h
++++ b/arch/parisc/include/asm/alternative.h
+@@ -34,7 +34,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+
+ /* Alternative SMP implementation. */
+ #define ALTERNATIVE(cond, replacement) "!0:" \
+- ".section .altinstructions, \"aw\" !" \
++ ".section .altinstructions, \"a\" !" \
++ ".align 4 !" \
+ ".word (0b-4-.) !" \
+ ".hword 1, " __stringify(cond) " !" \
+ ".word " __stringify(replacement) " !" \
+@@ -44,7 +45,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+
+ /* to replace one single instructions by a new instruction */
+ #define ALTERNATIVE(from, to, cond, replacement)\
+- .section .altinstructions, "aw" ! \
++ .section .altinstructions, "a" ! \
++ .align 4 ! \
+ .word (from - .) ! \
+ .hword (to - from)/4, cond ! \
+ .word replacement ! \
+@@ -52,7 +54,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+
+ /* to replace multiple instructions by new code */
+ #define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
+- .section .altinstructions, "aw" ! \
++ .section .altinstructions, "a" ! \
++ .align 4 ! \
+ .word (from - .) ! \
+ .hword -num_instructions, cond ! \
+ .word (new_instr_ptr - .) ! \
+diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
+index 75677b526b2bb..74d17d7e759da 100644
+--- a/arch/parisc/include/asm/assembly.h
++++ b/arch/parisc/include/asm/assembly.h
+@@ -574,6 +574,7 @@
+ */
+ #define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
+ .section __ex_table,"aw" ! \
++ .align 4 ! \
+ .word (fault_addr - .), (except_addr - .) ! \
+ .previous
+
+diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
+index 4b6d60b941247..b9cad0bb4461b 100644
+--- a/arch/parisc/include/asm/bug.h
++++ b/arch/parisc/include/asm/bug.h
+@@ -28,13 +28,15 @@
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"aw\"\n" \
++ "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.align %4\n" \
+ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
+- "\t.short %c1, %c2\n" \
+- "\t.org 2b+%c3\n" \
++ "\t.short %1, %2\n" \
++ "\t.blockz %3-2*%4-2*2\n" \
+ "\t.popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+- "i" (0), "i" (sizeof(struct bug_entry)) ); \
++ "i" (0), "i" (sizeof(struct bug_entry)), \
++ "i" (sizeof(long)) ); \
+ unreachable(); \
+ } while(0)
+
+@@ -51,27 +53,31 @@
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"aw\"\n" \
++ "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.align %4\n" \
+ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
+- "\t.short %c1, %c2\n" \
+- "\t.org 2b+%c3\n" \
++ "\t.short %1, %2\n" \
++ "\t.blockz %3-2*%4-2*2\n" \
+ "\t.popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (BUGFLAG_WARNING|(flags)), \
+- "i" (sizeof(struct bug_entry)) ); \
++ "i" (sizeof(struct bug_entry)), \
++ "i" (sizeof(long)) ); \
+ } while(0)
+ #else
+ #define __WARN_FLAGS(flags) \
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"aw\"\n" \
++ "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.align %2\n" \
+ "2:\t" ASM_WORD_INSN "1b\n" \
+- "\t.short %c0\n" \
+- "\t.org 2b+%c1\n" \
++ "\t.short %0\n" \
++ "\t.blockz %1-%2-2\n" \
+ "\t.popsection" \
+ : : "i" (BUGFLAG_WARNING|(flags)), \
+- "i" (sizeof(struct bug_entry)) ); \
++ "i" (sizeof(struct bug_entry)), \
++ "i" (sizeof(long)) ); \
+ } while(0)
+ #endif
+
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index 140eaa97bf215..2d73d3c3cd37f 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -349,15 +349,7 @@ struct pt_regs; /* forward declaration... */
+
+ #define ELF_HWCAP 0
+
+-/* Masks for stack and mmap randomization */
+-#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
+-#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
+-#define STACK_RND_MASK MMAP_RND_MASK
+-
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *);
+-#define arch_randomize_brk arch_randomize_brk
+-
++#define STACK_RND_MASK 0x7ff /* 8MB of VA */
+
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
+index af2a598bc0f81..94428798b6aa6 100644
+--- a/arch/parisc/include/asm/jump_label.h
++++ b/arch/parisc/include/asm/jump_label.h
+@@ -15,10 +15,12 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
++ ".align %1\n\t"
+ ".word 1b - ., %l[l_yes] - .\n\t"
+ __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ ".popsection\n\t"
+- : : "i" (&((char *)key)[branch]) : : l_yes);
++ : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
++ : : l_yes);
+
+ return false;
+ l_yes:
+@@ -30,10 +32,12 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
+ asm_volatile_goto("1:\n\t"
+ "b,n %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
++ ".align %1\n\t"
+ ".word 1b - ., %l[l_yes] - .\n\t"
+ __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ ".popsection\n\t"
+- : : "i" (&((char *)key)[branch]) : : l_yes);
++ : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
++ : : l_yes);
+
+ return false;
+ l_yes:
+diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
+index ee9e071859b2f..47ebc4c91eaff 100644
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -55,7 +55,7 @@
+ })
+
+ #ifdef CONFIG_SMP
+-# define __lock_aligned __section(".data..lock_aligned")
++# define __lock_aligned __section(".data..lock_aligned") __aligned(16)
+ #endif
+
+ #endif /* __PARISC_LDCW_H */
+diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
+index ff6cbdb6903bc..ece4b3046515c 100644
+--- a/arch/parisc/include/asm/processor.h
++++ b/arch/parisc/include/asm/processor.h
+@@ -47,6 +47,8 @@
+
+ #ifndef __ASSEMBLY__
+
++struct rlimit;
++unsigned long mmap_upper_limit(struct rlimit *rlim_stack);
+ unsigned long calc_max_stack_size(unsigned long stack_max);
+
+ /*
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 2bf660eabe421..4165079898d9e 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -41,6 +41,7 @@ struct exception_table_entry {
+
+ #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
+ ".section __ex_table,\"aw\"\n" \
++ ".align 4\n" \
+ ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
+ ".previous\n"
+
+diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
+index 87245c584784e..8d94739d75c67 100644
+--- a/arch/parisc/include/uapi/asm/errno.h
++++ b/arch/parisc/include/uapi/asm/errno.h
+@@ -75,7 +75,6 @@
+
+ /* We now return you to your regularly scheduled HPUX. */
+
+-#define ENOSYM 215 /* symbol does not exist in executable */
+ #define ENOTSOCK 216 /* Socket operation on non-socket */
+ #define EDESTADDRREQ 217 /* Destination address required */
+ #define EMSGSIZE 218 /* Message too long */
+@@ -101,7 +100,6 @@
+ #define ETIMEDOUT 238 /* Connection timed out */
+ #define ECONNREFUSED 239 /* Connection refused */
+ #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+-#define EREMOTERELEASE 240 /* Remote peer released connection */
+ #define EHOSTDOWN 241 /* Host is down */
+ #define EHOSTUNREACH 242 /* No route to host */
+
+diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
+index 7a90070136e82..8e38a86996fc6 100644
+--- a/arch/parisc/include/uapi/asm/pdc.h
++++ b/arch/parisc/include/uapi/asm/pdc.h
+@@ -472,6 +472,7 @@ struct pdc_model { /* for PDC_MODEL */
+ unsigned long arch_rev;
+ unsigned long pot_key;
+ unsigned long curr_key;
++ unsigned long width; /* default of PSW_W bit (1=enabled) */
+ };
+
+ struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index ae03b8679696e..ab23e61a6f016 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -36,6 +36,24 @@
+ .level 2.0
+ #endif
+
++/*
++ * We need seven instructions after a TLB insert for it to take effect.
++ * The PA8800/PA8900 processors are an exception and need 12 instructions.
++ * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
++ */
++#ifdef CONFIG_64BIT
++#define NUM_PIPELINE_INSNS 12
++#else
++#define NUM_PIPELINE_INSNS 7
++#endif
++
++ /* Insert num nops */
++ .macro insert_nops num
++ .rept \num
++ nop
++ .endr
++ .endm
++
+ /* Get aligned page_table_lock address for this mm from cr28/tr4 */
+ .macro get_ptl reg
+ mfctl %cr28,\reg
+@@ -415,24 +433,20 @@
+ 3:
+ .endm
+
+- /* Release page_table_lock without reloading lock address.
+- We use an ordered store to ensure all prior accesses are
+- performed prior to releasing the lock. */
+- .macro ptl_unlock0 spc,tmp,tmp2
++ /* Release page_table_lock if for user space. We use an ordered
++ store to ensure all prior accesses are performed prior to
++ releasing the lock. Note stw may not be executed, so we
++ provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
++ .macro ptl_unlock spc,tmp,tmp2
+ #ifdef CONFIG_TLB_PTLOCK
+-98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
++98: get_ptl \tmp
++ ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
+ or,COND(=) %r0,\spc,%r0
+ stw,ma \tmp2,0(\tmp)
+ 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+-#endif
+- .endm
+-
+- /* Release page_table_lock. */
+- .macro ptl_unlock1 spc,tmp,tmp2
+-#ifdef CONFIG_TLB_PTLOCK
+-98: get_ptl \tmp
+- ptl_unlock0 \spc,\tmp,\tmp2
+-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
++ insert_nops NUM_PIPELINE_INSNS - 4
++#else
++ insert_nops NUM_PIPELINE_INSNS - 1
+ #endif
+ .endm
+
+@@ -461,13 +475,13 @@
+ * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
+ #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
+ #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
++ #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
+
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ .macro convert_for_tlb_insert20 pte,tmp
+ #ifdef CONFIG_HUGETLB_PAGE
+ copy \pte,\tmp
+- extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
++ extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
+
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_SHIFT,\pte
+@@ -475,8 +489,7 @@
+ depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
+ #else /* Huge pages disabled */
+- extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
++ extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_SHIFT,\pte
+ #endif
+@@ -1124,7 +1137,7 @@ dtlb_miss_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1133,6 +1146,7 @@ dtlb_check_alias_20w:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1150,7 +1164,7 @@ nadtlb_miss_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1159,6 +1173,7 @@ nadtlb_check_alias_20w:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1184,7 +1199,7 @@ dtlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1194,6 +1209,7 @@ dtlb_check_alias_11:
+ idtlba pte,(va)
+ idtlbp prot,(va)
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1217,7 +1233,7 @@ nadtlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1227,6 +1243,7 @@ nadtlb_check_alias_11:
+ idtlba pte,(va)
+ idtlbp prot,(va)
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1246,7 +1263,7 @@ dtlb_miss_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1255,6 +1272,7 @@ dtlb_check_alias_20:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1274,7 +1292,7 @@ nadtlb_miss_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1283,6 +1301,7 @@ nadtlb_check_alias_20:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1319,7 +1338,7 @@ itlb_miss_20w:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1343,7 +1362,7 @@ naitlb_miss_20w:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1352,6 +1371,7 @@ naitlb_check_alias_20w:
+
+ iitlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1377,7 +1397,7 @@ itlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1401,7 +1421,7 @@ naitlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1411,6 +1431,7 @@ naitlb_check_alias_11:
+ iitlba pte,(%sr0, va)
+ iitlbp prot,(%sr0, va)
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1431,7 +1452,7 @@ itlb_miss_20:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1451,7 +1472,7 @@ naitlb_miss_20:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1460,6 +1481,7 @@ naitlb_check_alias_20:
+
+ iitlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1481,7 +1503,7 @@ dbit_trap_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock0 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+ #else
+@@ -1507,7 +1529,7 @@ dbit_trap_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock0 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1527,7 +1549,7 @@ dbit_trap_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock0 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+ #endif
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
+index a171bf3c6b318..96e0264ac9616 100644
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -70,9 +70,8 @@ $bss_loop:
+ stw,ma %arg2,4(%r1)
+ stw,ma %arg3,4(%r1)
+
+-#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
+- /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
+- * and halt kernel if we detect a PA1.x CPU. */
++#if defined(CONFIG_PA20)
++ /* check for 64-bit capable CPU as required by current kernel */
+ ldi 32,%r10
+ mtctl %r10,%cr11
+ .level 2.0
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index ab896eff7a1de..98af719d5f85b 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -77,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max)
+ * indicating that "current" should be used instead of a passed-in
+ * value from the exec bprm as done with arch_pick_mmap_layout().
+ */
+-static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
++unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
+ {
+ unsigned long stack_base;
+
+diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
+index 58694d1989c23..548051b0b4aff 100644
+--- a/arch/parisc/kernel/vmlinux.lds.S
++++ b/arch/parisc/kernel/vmlinux.lds.S
+@@ -130,6 +130,7 @@ SECTIONS
+ RO_DATA(8)
+
+ /* unwind info */
++ . = ALIGN(4);
+ .PARISC.unwind : {
+ __start___unwind = .;
+ *(.PARISC.unwind)
+diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
+index 6fe46e7545566..0b4e5f8ce3e8a 100644
+--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
++++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
+@@ -69,9 +69,6 @@
+
+ #define _PTE_NONE_MASK 0
+
+-/* Until my rework is finished, 40x still needs atomic PTE updates */
+-#define PTE_ATOMIC_UPDATES 1
+-
+ #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+ #define _PAGE_BASE (_PAGE_BASE_NC)
+
+diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
+index 6a9acfb690c9f..2f8f3f93cbb67 100644
+--- a/arch/powerpc/kernel/fpu.S
++++ b/arch/powerpc/kernel/fpu.S
+@@ -23,6 +23,15 @@
+ #include <asm/feature-fixups.h>
+
+ #ifdef CONFIG_VSX
++#define __REST_1FPVSR(n,c,base) \
++BEGIN_FTR_SECTION \
++ b 2f; \
++END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
++ REST_FPR(n,base); \
++ b 3f; \
++2: REST_VSR(n,c,base); \
++3:
++
+ #define __REST_32FPVSRS(n,c,base) \
+ BEGIN_FTR_SECTION \
+ b 2f; \
+@@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
+ 2: SAVE_32VSRS(n,c,base); \
+ 3:
+ #else
++#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
+ #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
+ #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
+ #endif
++#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
+ #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
+ #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
+
+@@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
+ SAVE_32FPVSRS(0, R4, R3)
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r3)
++ REST_1FPVSR(0, R4, R3)
+ blr
+ EXPORT_SYMBOL(store_fp_state)
+
+@@ -138,4 +150,5 @@ _GLOBAL(save_fpu)
+ 2: SAVE_32FPVSRS(0, R4, R6)
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r6)
++ REST_1FPVSR(0, R4, R6)
+ blr
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index b68898ac07e19..9452a54d356c9 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1198,11 +1198,11 @@ void kvmppc_save_user_regs(void)
+
+ usermsr = current->thread.regs->msr;
+
++ /* Caller has enabled FP/VEC/VSX/TM in MSR */
+ if (usermsr & MSR_FP)
+- save_fpu(current);
+-
++ __giveup_fpu(current);
+ if (usermsr & MSR_VEC)
+- save_altivec(current);
++ __giveup_altivec(current);
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (usermsr & MSR_TM) {
+@@ -2258,6 +2258,22 @@ unsigned long __get_wchan(struct task_struct *p)
+ return ret;
+ }
+
++static bool empty_user_regs(struct pt_regs *regs, struct task_struct *tsk)
++{
++ unsigned long stack_page;
++
++ // A non-empty pt_regs should never have a zero MSR or TRAP value.
++ if (regs->msr || regs->trap)
++ return false;
++
++ // Check it sits at the very base of the stack
++ stack_page = (unsigned long)task_stack_page(tsk);
++ if ((unsigned long)(regs + 1) != stack_page + THREAD_SIZE)
++ return false;
++
++ return true;
++}
++
+ static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
+
+ void __no_sanitize_address show_stack(struct task_struct *tsk,
+@@ -2322,9 +2338,13 @@ void __no_sanitize_address show_stack(struct task_struct *tsk,
+ lr = regs->link;
+ printk("%s--- interrupt: %lx at %pS\n",
+ loglvl, regs->trap, (void *)regs->nip);
+- __show_regs(regs);
+- printk("%s--- interrupt: %lx\n",
+- loglvl, regs->trap);
++
++ // Detect the case of an empty pt_regs at the very base
++ // of the stack and suppress showing it in full.
++ if (!empty_user_regs(regs, tsk)) {
++ __show_regs(regs);
++ printk("%s--- interrupt: %lx\n", loglvl, regs->trap);
++ }
+
+ firstframe = 1;
+ }
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 64ff37721fd06..fe3f720c9cd61 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1164,6 +1164,7 @@ void emulate_single_step(struct pt_regs *regs)
+ __single_step_exception(regs);
+ }
+
++#ifdef CONFIG_PPC_FPU_REGS
+ static inline int __parse_fpscr(unsigned long fpscr)
+ {
+ int ret = FPE_FLTUNK;
+@@ -1190,6 +1191,7 @@ static inline int __parse_fpscr(unsigned long fpscr)
+
+ return ret;
+ }
++#endif
+
+ static void parse_fpe(struct pt_regs *regs)
+ {
+diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
+index 4094e4c4c77a7..80b3f6e476b66 100644
+--- a/arch/powerpc/kernel/vector.S
++++ b/arch/powerpc/kernel/vector.S
+@@ -33,6 +33,7 @@ _GLOBAL(store_vr_state)
+ mfvscr v0
+ li r4, VRSTATE_VSCR
+ stvx v0, r4, r3
++ lvx v0, 0, r3
+ blr
+ EXPORT_SYMBOL(store_vr_state)
+
+@@ -109,6 +110,7 @@ _GLOBAL(save_altivec)
+ mfvscr v0
+ li r4,VRSTATE_VSCR
+ stvx v0,r4,r7
++ lvx v0,0,r7
+ blr
+
+ #ifdef CONFIG_VSX
+diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
+index de64c79629912..005269ac3244c 100644
+--- a/arch/powerpc/kexec/core.c
++++ b/arch/powerpc/kexec/core.c
+@@ -74,6 +74,9 @@ void arch_crash_save_vmcoreinfo(void)
+ VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
+ VMCOREINFO_OFFSET(mmu_psize_def, shift);
+ #endif
++ VMCOREINFO_SYMBOL(cur_cpu_spec);
++ VMCOREINFO_OFFSET(cpu_spec, mmu_features);
++ vmcoreinfo_append_str("NUMBER(RADIX_MMU)=%d\n", early_radix_enabled());
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+ }
+
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 8c1f7def596e4..10b946e9c6e75 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -1371,8 +1371,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ /*
+ * Disable instruction sampling if it was enabled
+ */
+- if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
+- val &= ~MMCRA_SAMPLE_ENABLE;
++ val &= ~MMCRA_SAMPLE_ENABLE;
+
+ /* Disable BHRB via mmcra (BHRBRD) for p10 */
+ if (ppmu->flags & PPMU_ARCH_31)
+@@ -1383,7 +1382,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ * instruction sampling or BHRB.
+ */
+ if (val != mmcra) {
+- mtspr(SPRN_MMCRA, mmcra);
++ mtspr(SPRN_MMCRA, val);
+ mb();
+ isync();
+ }
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index 9d229ef7f86ef..ada817c49b722 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -51,7 +51,7 @@ static int trace_imc_mem_size;
+ * core and trace-imc
+ */
+ static struct imc_pmu_ref imc_global_refc = {
+- .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
+ .id = 0,
+ .refc = 0,
+ };
+diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
+index 77ea9335fd049..f381b177ea06a 100644
+--- a/arch/powerpc/platforms/book3s/vas-api.c
++++ b/arch/powerpc/platforms/book3s/vas-api.c
+@@ -4,6 +4,8 @@
+ * Copyright (C) 2019 Haren Myneni, IBM Corp
+ */
+
++#define pr_fmt(fmt) "vas-api: " fmt
++
+ #include <linux/kernel.h>
+ #include <linux/device.h>
+ #include <linux/cdev.h>
+@@ -78,7 +80,7 @@ int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
+ task_ref->mm = get_task_mm(current);
+ if (!task_ref->mm) {
+ put_pid(task_ref->pid);
+- pr_err("VAS: pid(%d): mm_struct is not found\n",
++ pr_err("pid(%d): mm_struct is not found\n",
+ current->pid);
+ return -EPERM;
+ }
+@@ -235,8 +237,7 @@ void vas_update_csb(struct coprocessor_request_block *crb,
+ rc = kill_pid_info(SIGSEGV, &info, pid);
+ rcu_read_unlock();
+
+- pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
+- pid_vnr(pid), rc);
++ pr_devel("pid %d kill_proc_info() rc %d\n", pid_vnr(pid), rc);
+ }
+
+ void vas_dump_crb(struct coprocessor_request_block *crb)
+@@ -294,7 +295,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+
+ rc = copy_from_user(&uattr, uptr, sizeof(uattr));
+ if (rc) {
+- pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
++ pr_err("copy_from_user() returns %d\n", rc);
+ return -EFAULT;
+ }
+
+@@ -311,7 +312,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+ txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
+ cp_inst->coproc->cop_type);
+ if (IS_ERR(txwin)) {
+- pr_err("%s() VAS window open failed, %ld\n", __func__,
++ pr_err_ratelimited("VAS window open failed rc=%ld\n",
+ PTR_ERR(txwin));
+ return PTR_ERR(txwin);
+ }
+@@ -405,8 +406,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ * window is not opened. Shouldn't expect this error.
+ */
+ if (!cp_inst || !cp_inst->txwin) {
+- pr_err("%s(): Unexpected fault on paste address with TX window closed\n",
+- __func__);
++ pr_err("Unexpected fault on paste address with TX window closed\n");
+ return VM_FAULT_SIGBUS;
+ }
+
+@@ -421,8 +421,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ * issue NX request.
+ */
+ if (txwin->task_ref.vma != vmf->vma) {
+- pr_err("%s(): No previous mapping with paste address\n",
+- __func__);
++ pr_err("No previous mapping with paste address\n");
+ return VM_FAULT_SIGBUS;
+ }
+
+@@ -481,19 +480,19 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ txwin = cp_inst->txwin;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+- pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
++ pr_debug("size 0x%zx, PAGE_SIZE 0x%zx\n",
+ (vma->vm_end - vma->vm_start), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /* Ensure instance has an open send window */
+ if (!txwin) {
+- pr_err("%s(): No send window open?\n", __func__);
++ pr_err("No send window open?\n");
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
+- pr_err("%s(): VAS API is not registered\n", __func__);
++ pr_err("VAS API is not registered\n");
+ return -EACCES;
+ }
+
+@@ -510,14 +509,14 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ */
+ mutex_lock(&txwin->task_ref.mmap_mutex);
+ if (txwin->status != VAS_WIN_ACTIVE) {
+- pr_err("%s(): Window is not active\n", __func__);
++ pr_err("Window is not active\n");
+ rc = -EACCES;
+ goto out;
+ }
+
+ paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ if (!paste_addr) {
+- pr_err("%s(): Window paste address failed\n", __func__);
++ pr_err("Window paste address failed\n");
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -533,8 +532,8 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, prot);
+
+- pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
+- paste_addr, vma->vm_start, rc);
++ pr_devel("paste addr %llx at %lx, rc %d\n", paste_addr,
++ vma->vm_start, rc);
+
+ txwin->task_ref.vma = vma;
+ vma->vm_ops = &vas_vm_ops;
+@@ -609,8 +608,7 @@ int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ goto err;
+ }
+
+- pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
+- MINOR(devno));
++ pr_devel("Added dev [%d,%d]\n", MAJOR(devno), MINOR(devno));
+
+ return 0;
+
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 16d93b580f61f..496e16c588aaa 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -914,7 +914,8 @@ static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_
+ return 0;
+ }
+
+-static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift)
++static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift,
++ bool *direct_mapping)
+ {
+ struct dma_win *window;
+ const struct dynamic_dma_window_prop *dma64;
+@@ -927,6 +928,7 @@ static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *windo
+ dma64 = window->prop;
+ *dma_addr = be64_to_cpu(dma64->dma_base);
+ *window_shift = be32_to_cpu(dma64->window_shift);
++ *direct_mapping = window->direct;
+ found = true;
+ break;
+ }
+@@ -1270,10 +1272,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+
+ mutex_lock(&dma_win_init_mutex);
+
+- if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
+- direct_mapping = (len >= max_ram_len);
++ if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len, &direct_mapping))
+ goto out_unlock;
+- }
+
+ /*
+ * If we already went through this for a previous function of
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index f2cb62148f36f..d4d6de0628b05 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -526,8 +526,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
+
+ if (cmd) {
+ rc = init_cpu_associativity();
+- if (rc)
++ if (rc) {
++ destroy_cpu_associativity();
+ goto out;
++ }
+
+ for_each_possible_cpu(cpu) {
+ disp = per_cpu_ptr(&vcpu_disp_data, cpu);
+diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
+index e25ac52acf507..b1f25bac280b4 100644
+--- a/arch/powerpc/platforms/pseries/vas.c
++++ b/arch/powerpc/platforms/pseries/vas.c
+@@ -341,7 +341,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+
+ if (atomic_inc_return(&cop_feat_caps->nr_used_credits) >
+ atomic_read(&cop_feat_caps->nr_total_credits)) {
+- pr_err("Credits are not available to allocate window\n");
++ pr_err_ratelimited("Credits are not available to allocate window\n");
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -424,7 +424,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+
+ put_vas_user_win_ref(&txwin->vas_win.task_ref);
+ rc = -EBUSY;
+- pr_err("No credit is available to allocate window\n");
++ pr_err_ratelimited("No credit is available to allocate window\n");
+
+ out_free:
+ /*
+diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
+index 9f0af4d795d88..f1c0fa6ece21d 100644
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -802,7 +802,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
+ if (out_qpage)
+ *out_qpage = be64_to_cpu(qpage);
+ if (out_qsize)
+- *out_qsize = be32_to_cpu(qsize);
++ *out_qsize = be64_to_cpu(qsize);
+ if (out_qeoi_page)
+ *out_qeoi_page = be64_to_cpu(qeoi_page);
+ if (out_escalate_irq)
+diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
+index 22b13947bd131..8e7fc0edf21d3 100644
+--- a/arch/riscv/boot/Makefile
++++ b/arch/riscv/boot/Makefile
+@@ -17,6 +17,7 @@
+ KCOV_INSTRUMENT := n
+
+ OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
++OBJCOPYFLAGS_loader.bin :=-O binary
+ OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+ targets := Image Image.* loader loader.o loader.lds loader.bin
+diff --git a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
+index 8275630af977d..b8684312593e5 100644
+--- a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
++++ b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
+@@ -30,7 +30,6 @@
+ cpu0_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+- #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
+index 61ba8ed43d8fe..36b955c762ba0 100644
+--- a/arch/riscv/include/asm/asm-prototypes.h
++++ b/arch/riscv/include/asm/asm-prototypes.h
+@@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
+ DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
+ DECLARE_DO_ERROR_INFO(do_trap_break);
+
+-asmlinkage unsigned long get_overflow_stack(void);
+ asmlinkage void handle_bad_stack(struct pt_regs *regs);
+ asmlinkage void do_page_fault(struct pt_regs *regs);
+ asmlinkage void do_irq(struct pt_regs *regs);
+diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
+index 114bbadaef41e..bfb4c26f113c4 100644
+--- a/arch/riscv/include/asm/asm.h
++++ b/arch/riscv/include/asm/asm.h
+@@ -82,6 +82,28 @@
+ .endr
+ .endm
+
++#ifdef CONFIG_SMP
++#ifdef CONFIG_32BIT
++#define PER_CPU_OFFSET_SHIFT 2
++#else
++#define PER_CPU_OFFSET_SHIFT 3
++#endif
++
++.macro asm_per_cpu dst sym tmp
++ REG_L \tmp, TASK_TI_CPU_NUM(tp)
++ slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
++ la \dst, __per_cpu_offset
++ add \dst, \dst, \tmp
++ REG_L \tmp, 0(\dst)
++ la \dst, \sym
++ add \dst, \dst, \tmp
++.endm
++#else /* CONFIG_SMP */
++.macro asm_per_cpu dst sym tmp
++ la \dst, \sym
++.endm
++#endif /* CONFIG_SMP */
++
+ /* save all GPs except x1 ~ x5 */
+ .macro save_from_x6_to_x31
+ REG_S x6, PT_T1(sp)
+diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
+index 78936f4ff5133..7cad513538d8d 100644
+--- a/arch/riscv/include/asm/hwprobe.h
++++ b/arch/riscv/include/asm/hwprobe.h
+@@ -10,4 +10,9 @@
+
+ #define RISCV_HWPROBE_MAX_KEY 5
+
++static inline bool riscv_hwprobe_key_is_valid(__s64 key)
++{
++ return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
++}
++
+ #endif
+diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
+index 5488ecc337b63..57e887bfa34cb 100644
+--- a/arch/riscv/include/asm/page.h
++++ b/arch/riscv/include/asm/page.h
+@@ -33,8 +33,8 @@
+ #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+ #endif
+ /*
+- * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
+- * define the PAGE_OFFSET value for SV39.
++ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
++ * define the PAGE_OFFSET value for SV48 and SV39.
+ */
+ #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
+ #define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
+diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
+index 1833beb00489c..d18ce0113ca1f 100644
+--- a/arch/riscv/include/asm/thread_info.h
++++ b/arch/riscv/include/asm/thread_info.h
+@@ -34,9 +34,6 @@
+
+ #ifndef __ASSEMBLY__
+
+-extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
+-extern unsigned long spin_shadow_stack;
+-
+ #include <asm/processor.h>
+ #include <asm/csr.h>
+
+diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
+index 14f5d27783b85..96b65a5396dfc 100644
+--- a/arch/riscv/include/asm/vdso/processor.h
++++ b/arch/riscv/include/asm/vdso/processor.h
+@@ -14,7 +14,7 @@ static inline void cpu_relax(void)
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+ #endif
+
+-#ifdef __riscv_zihintpause
++#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
+ /*
+ * Reduce instruction retirement.
+ * This assumes the PC changes.
+diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
+index d6a75aac1d27a..9f535d5de33f9 100644
+--- a/arch/riscv/kernel/asm-offsets.c
++++ b/arch/riscv/kernel/asm-offsets.c
+@@ -39,6 +39,7 @@ void asm_offsets(void)
+ OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
+ OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+
++ OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
+ OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
+ OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
+ OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
+diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
+index c17dacb1141cb..157ace8b262c2 100644
+--- a/arch/riscv/kernel/cpu.c
++++ b/arch/riscv/kernel/cpu.c
+@@ -125,13 +125,14 @@ old_interface:
+ */
+ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
+ {
+- int rc;
+-
+ for (; node; node = node->parent) {
+ if (of_device_is_compatible(node, "riscv")) {
+- rc = riscv_of_processor_hartid(node, hartid);
+- if (!rc)
+- return 0;
++ *hartid = (unsigned long)of_get_cpu_hwid(node, 0);
++ if (*hartid == ~0UL) {
++ pr_warn("Found CPU without hart ID\n");
++ return -ENODEV;
++ }
++ return 0;
+ }
+ }
+
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 143a2bb3e6976..278d01d2911fd 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -10,9 +10,13 @@
+ #include <asm/asm.h>
+ #include <asm/csr.h>
+ #include <asm/unistd.h>
++#include <asm/page.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/errata_list.h>
++#include <linux/sizes.h>
++
++ .section .irqentry.text, "ax"
+
+ SYM_CODE_START(handle_exception)
+ /*
+@@ -170,67 +174,15 @@ SYM_CODE_END(ret_from_exception)
+
+ #ifdef CONFIG_VMAP_STACK
+ SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
+- /*
+- * Takes the psuedo-spinlock for the shadow stack, in case multiple
+- * harts are concurrently overflowing their kernel stacks. We could
+- * store any value here, but since we're overflowing the kernel stack
+- * already we only have SP to use as a scratch register. So we just
+- * swap in the address of the spinlock, as that's definately non-zero.
+- *
+- * Pairs with a store_release in handle_bad_stack().
+- */
+-1: la sp, spin_shadow_stack
+- REG_AMOSWAP_AQ sp, sp, (sp)
+- bnez sp, 1b
+-
+- la sp, shadow_stack
+- addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
+-
+- //save caller register to shadow stack
+- addi sp, sp, -(PT_SIZE_ON_STACK)
+- REG_S x1, PT_RA(sp)
+- REG_S x5, PT_T0(sp)
+- REG_S x6, PT_T1(sp)
+- REG_S x7, PT_T2(sp)
+- REG_S x10, PT_A0(sp)
+- REG_S x11, PT_A1(sp)
+- REG_S x12, PT_A2(sp)
+- REG_S x13, PT_A3(sp)
+- REG_S x14, PT_A4(sp)
+- REG_S x15, PT_A5(sp)
+- REG_S x16, PT_A6(sp)
+- REG_S x17, PT_A7(sp)
+- REG_S x28, PT_T3(sp)
+- REG_S x29, PT_T4(sp)
+- REG_S x30, PT_T5(sp)
+- REG_S x31, PT_T6(sp)
+-
+- la ra, restore_caller_reg
+- tail get_overflow_stack
+-
+-restore_caller_reg:
+- //save per-cpu overflow stack
+- REG_S a0, -8(sp)
+- //restore caller register from shadow_stack
+- REG_L x1, PT_RA(sp)
+- REG_L x5, PT_T0(sp)
+- REG_L x6, PT_T1(sp)
+- REG_L x7, PT_T2(sp)
+- REG_L x10, PT_A0(sp)
+- REG_L x11, PT_A1(sp)
+- REG_L x12, PT_A2(sp)
+- REG_L x13, PT_A3(sp)
+- REG_L x14, PT_A4(sp)
+- REG_L x15, PT_A5(sp)
+- REG_L x16, PT_A6(sp)
+- REG_L x17, PT_A7(sp)
+- REG_L x28, PT_T3(sp)
+- REG_L x29, PT_T4(sp)
+- REG_L x30, PT_T5(sp)
+- REG_L x31, PT_T6(sp)
++ /* we reach here from kernel context, sscratch must be 0 */
++ csrrw x31, CSR_SCRATCH, x31
++ asm_per_cpu sp, overflow_stack, x31
++ li x31, OVERFLOW_STACK_SIZE
++ add sp, sp, x31
++ /* zero out x31 again and restore x31 */
++ xor x31, x31, x31
++ csrrw x31, CSR_SCRATCH, x31
+
+- //load per-cpu overflow stack
+- REG_L sp, -8(sp)
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+
+ //save context to overflow stack
+diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
+index d3099d67816d0..6c166029079c4 100644
+--- a/arch/riscv/kernel/probes/simulate-insn.c
++++ b/arch/riscv/kernel/probes/simulate-insn.c
+@@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
+ unsigned long val)
+ {
+ if (index == 0)
+- return false;
++ return true;
+ else if (index <= 31)
+ *((unsigned long *)regs + index) = val;
+ else
+diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
+index 194f166b2cc40..4b3dc8beaf77d 100644
+--- a/arch/riscv/kernel/probes/uprobes.c
++++ b/arch/riscv/kernel/probes/uprobes.c
+@@ -3,6 +3,7 @@
+ #include <linux/highmem.h>
+ #include <linux/ptrace.h>
+ #include <linux/uprobes.h>
++#include <asm/insn.h>
+
+ #include "decode-insn.h"
+
+@@ -17,6 +18,11 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
+ #endif
+ }
+
++bool is_trap_insn(uprobe_opcode_t *insn)
++{
++ return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
++}
++
+ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+ {
+ return instruction_pointer(regs);
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index fae8f610d867f..67d0073fb624d 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -410,48 +410,14 @@ int is_valid_bugaddr(unsigned long pc)
+ #endif /* CONFIG_GENERIC_BUG */
+
+ #ifdef CONFIG_VMAP_STACK
+-/*
+- * Extra stack space that allows us to provide panic messages when the kernel
+- * has overflowed its stack.
+- */
+-static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
++DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
+ overflow_stack)__aligned(16);
+-/*
+- * A temporary stack for use by handle_kernel_stack_overflow. This is used so
+- * we can call into C code to get the per-hart overflow stack. Usage of this
+- * stack must be protected by spin_shadow_stack.
+- */
+-long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
+-
+-/*
+- * A pseudo spinlock to protect the shadow stack from being used by multiple
+- * harts concurrently. This isn't a real spinlock because the lock side must
+- * be taken without a valid stack and only a single register, it's only taken
+- * while in the process of panicing anyway so the performance and error
+- * checking a proper spinlock gives us doesn't matter.
+- */
+-unsigned long spin_shadow_stack;
+-
+-asmlinkage unsigned long get_overflow_stack(void)
+-{
+- return (unsigned long)this_cpu_ptr(overflow_stack) +
+- OVERFLOW_STACK_SIZE;
+-}
+
+ asmlinkage void handle_bad_stack(struct pt_regs *regs)
+ {
+ unsigned long tsk_stk = (unsigned long)current->stack;
+ unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
+
+- /*
+- * We're done with the shadow stack by this point, as we're on the
+- * overflow stack. Tell any other concurrent overflowing harts that
+- * they can proceed with panicing by releasing the pseudo-spinlock.
+- *
+- * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
+- */
+- smp_store_release(&spin_shadow_stack, 0);
+-
+ console_verbose();
+
+ pr_emerg("Insufficient stack space to handle exception!\n");
+diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
+index d40bec6ac0786..cadf725ef7983 100644
+--- a/arch/riscv/kernel/vdso/hwprobe.c
++++ b/arch/riscv/kernel/vdso/hwprobe.c
+@@ -37,7 +37,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+
+ /* This is something we can handle, fill out the pairs. */
+ while (p < end) {
+- if (p->key <= RISCV_HWPROBE_MAX_KEY) {
++ if (riscv_hwprobe_key_is_valid(p->key)) {
+ p->value = avd->all_cpu_hwprobe_values[p->key];
+
+ } else {
+diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
+index 9c454f90fd3da..3a4dfc8babcf8 100644
+--- a/arch/riscv/mm/Makefile
++++ b/arch/riscv/mm/Makefile
+@@ -36,3 +36,4 @@ endif
+
+ obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
+ obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
++obj-$(CONFIG_RISCV_NONSTANDARD_CACHE_OPS) += cache-ops.o
+diff --git a/arch/riscv/mm/cache-ops.c b/arch/riscv/mm/cache-ops.c
+new file mode 100644
+index 0000000000000..a993ad11d0eca
+--- /dev/null
++++ b/arch/riscv/mm/cache-ops.c
+@@ -0,0 +1,17 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
++ */
++
++#include <asm/dma-noncoherent.h>
++
++struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init;
++
++void
++riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
++{
++ if (!ops)
++ return;
++ noncoherent_cache_ops = *ops;
++}
++EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
+diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
+index b76e7e192eb18..341bd6706b4c5 100644
+--- a/arch/riscv/mm/dma-noncoherent.c
++++ b/arch/riscv/mm/dma-noncoherent.c
+@@ -15,12 +15,6 @@ static bool noncoherent_supported __ro_after_init;
+ int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
+ EXPORT_SYMBOL_GPL(dma_cache_alignment);
+
+-struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
+- .wback = NULL,
+- .inv = NULL,
+- .wback_inv = NULL,
+-};
+-
+ static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
+ {
+ void *vaddr = phys_to_virt(paddr);
+@@ -162,12 +156,3 @@ void __init riscv_set_dma_cache_alignment(void)
+ if (!noncoherent_supported)
+ dma_cache_alignment = 1;
+ }
+-
+-void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
+-{
+- if (!ops)
+- return;
+-
+- noncoherent_cache_ops = *ops;
+-}
+-EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
+diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
+index 20a9f991a6d74..e9090b38f8117 100644
+--- a/arch/riscv/mm/ptdump.c
++++ b/arch/riscv/mm/ptdump.c
+@@ -384,6 +384,9 @@ static int __init ptdump_init(void)
+
+ kernel_ptd_info.base_addr = KERN_VIRT_START;
+
++ pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
++ pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
++
+ for (i = 0; i < ARRAY_SIZE(pg_level); i++)
+ for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
+ pg_level[i].mask |= pte_bits[j].mask;
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 05e51666db033..8d0b95c173129 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -666,6 +666,7 @@ static int __init ipl_init(void)
+ &ipl_ccw_attr_group_lpar);
+ break;
+ case IPL_TYPE_ECKD:
++ case IPL_TYPE_ECKD_DUMP:
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group);
+ break;
+ case IPL_TYPE_FCP:
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 906a7bfc2a787..20786f6883b29 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -21,10 +21,22 @@
+
+ #include <asm/pgalloc.h>
+ #include <asm/gmap.h>
++#include <asm/page.h>
+ #include <asm/tlb.h>
+
+ #define GMAP_SHADOW_FAKE_TABLE 1ULL
+
++static struct page *gmap_alloc_crst(void)
++{
++ struct page *page;
++
++ page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ if (!page)
++ return NULL;
++ arch_set_page_dat(page, CRST_ALLOC_ORDER);
++ return page;
++}
++
+ /**
+ * gmap_alloc - allocate and initialize a guest address space
+ * @limit: maximum address of the gmap address space
+@@ -67,7 +79,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
+ spin_lock_init(&gmap->guest_table_lock);
+ spin_lock_init(&gmap->shadow_lock);
+ refcount_set(&gmap->ref_count, 1);
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ goto out_free;
+ page->index = 0;
+@@ -308,7 +320,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
+ unsigned long *new;
+
+ /* since we dont free the gmap table until gmap_free we can unlock */
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ return -ENOMEM;
+ new = page_to_virt(page);
+@@ -1759,7 +1771,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
+
+ BUG_ON(!gmap_is_shadow(sg));
+ /* Allocate a shadow region second table */
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ return -ENOMEM;
+ page->index = r2t & _REGION_ENTRY_ORIGIN;
+@@ -1843,7 +1855,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
+
+ BUG_ON(!gmap_is_shadow(sg));
+ /* Allocate a shadow region second table */
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ return -ENOMEM;
+ page->index = r3t & _REGION_ENTRY_ORIGIN;
+@@ -1927,7 +1939,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
+
+ BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
+ /* Allocate a shadow segment table */
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ return -ENOMEM;
+ page->index = sgt & _REGION_ENTRY_ORIGIN;
+@@ -2855,7 +2867,7 @@ int s390_replace_asce(struct gmap *gmap)
+ if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
+ return -EINVAL;
+
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ return -ENOMEM;
+ page->index = 0;
+diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
+index 1e2ea706aa228..79a037f49f707 100644
+--- a/arch/s390/mm/page-states.c
++++ b/arch/s390/mm/page-states.c
+@@ -121,7 +121,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
+ continue;
+ if (!pud_folded(*pud)) {
+ page = phys_to_page(pud_val(*pud));
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_pmd(pud, addr, next);
+@@ -142,7 +142,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
+ continue;
+ if (!p4d_folded(*p4d)) {
+ page = phys_to_page(p4d_val(*p4d));
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_pud(p4d, addr, next);
+@@ -164,7 +164,7 @@ static void mark_kernel_pgd(void)
+ continue;
+ if (!pgd_folded(*pgd)) {
+ page = phys_to_page(pgd_val(*pgd));
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_p4d(pgd, addr, next);
+@@ -181,6 +181,12 @@ void __init cmma_init_nodat(void)
+ return;
+ /* Mark pages used in kernel page tables */
+ mark_kernel_pgd();
++ page = virt_to_page(&swapper_pg_dir);
++ for (i = 0; i < 4; i++)
++ set_bit(PG_arch_1, &page[i].flags);
++ page = virt_to_page(&invalid_pg_dir);
++ for (i = 0; i < 4; i++)
++ set_bit(PG_arch_1, &page[i].flags);
+
+ /* Set all kernel pages not used for page tables to stable/no-dat */
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
+diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
+index 07fc660a24aa2..6396d6b06a3a2 100644
+--- a/arch/s390/mm/pgalloc.c
++++ b/arch/s390/mm/pgalloc.c
+@@ -146,6 +146,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
+ ptdesc = pagetable_alloc(GFP_KERNEL, 0);
+ if (ptdesc) {
+ table = (u64 *)ptdesc_to_virt(ptdesc);
++ arch_set_page_dat(virt_to_page(table), 0);
+ memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
+ }
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index 6957d2ed97bf0..6d276103c6d58 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -12,6 +12,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/slab.h>
+ #include <linux/sort.h>
++#include <asm/page-states.h>
+ #include <asm/cacheflush.h>
+ #include <asm/nospec-branch.h>
+ #include <asm/pgalloc.h>
+@@ -45,8 +46,11 @@ void *vmem_crst_alloc(unsigned long val)
+ unsigned long *table;
+
+ table = vmem_alloc_pages(CRST_ALLOC_ORDER);
+- if (table)
+- crst_table_init(table, val);
++ if (!table)
++ return NULL;
++ crst_table_init(table, val);
++ if (slab_is_available())
++ arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
+ return table;
+ }
+
+diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
+index c449e7c1b20ff..8bcd6c1431a95 100644
+--- a/arch/sh/Kconfig.debug
++++ b/arch/sh/Kconfig.debug
+@@ -22,6 +22,17 @@ config STACK_DEBUG
+ every function call and will therefore incur a major
+ performance hit. Most users should say N.
+
++config EARLY_PRINTK
++ bool "Early printk"
++ depends on SH_STANDARD_BIOS
++ help
++ Say Y here to redirect kernel printk messages to the serial port
++ used by the SH-IPL bootloader, starting very early in the boot
++ process and ending when the kernel's serial console is initialised.
++ This option is only useful while porting the kernel to a new machine,
++ when the kernel may crash or hang before the serial console is
++ initialised. If unsure, say N.
++
+ config 4KSTACKS
+ bool "Use 4Kb for kernel stacks instead of 8Kb"
+ depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
+diff --git a/arch/x86/coco/tdx/tdcall.S b/arch/x86/coco/tdx/tdcall.S
+index b193c0a1d8db3..2eca5f43734fe 100644
+--- a/arch/x86/coco/tdx/tdcall.S
++++ b/arch/x86/coco/tdx/tdcall.S
+@@ -195,6 +195,7 @@ SYM_FUNC_END(__tdx_module_call)
+ xor %r10d, %r10d
+ xor %r11d, %r11d
+ xor %rdi, %rdi
++ xor %rsi, %rsi
+ xor %rdx, %rdx
+
+ /* Restore callee-saved GPRs as mandated by the x86_64 ABI */
+diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
+index 44340a1139e0b..959afa705e95c 100644
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -24,8 +24,17 @@
+ #include <linux/types.h>
+ #include <crypto/sha1.h>
+ #include <crypto/sha1_base.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
+
++static const struct x86_cpu_id module_cpu_ids[] = {
++ X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
++ {}
++};
++MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
++
+ static int sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len, sha1_block_fn *sha1_xform)
+ {
+@@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { }
+
+ static int __init sha1_ssse3_mod_init(void)
+ {
++ if (!x86_match_cpu(module_cpu_ids))
++ return -ENODEV;
++
+ if (register_sha1_ssse3())
+ goto fail;
+
+diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
+index 3a5f6be7dbba4..d25235f0ccafc 100644
+--- a/arch/x86/crypto/sha256_ssse3_glue.c
++++ b/arch/x86/crypto/sha256_ssse3_glue.c
+@@ -38,11 +38,20 @@
+ #include <crypto/sha2.h>
+ #include <crypto/sha256_base.h>
+ #include <linux/string.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
+
+ asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
+ const u8 *data, int blocks);
+
++static const struct x86_cpu_id module_cpu_ids[] = {
++ X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
++ {}
++};
++MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
++
+ static int _sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len, sha256_block_fn *sha256_xform)
+ {
+@@ -366,6 +375,9 @@ static inline void unregister_sha256_ni(void) { }
+
+ static int __init sha256_ssse3_mod_init(void)
+ {
++ if (!x86_match_cpu(module_cpu_ids))
++ return -ENODEV;
++
+ if (register_sha256_ssse3())
+ goto fail;
+
+diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
+index c8a7fc23f63c6..f896eed4516c7 100644
+--- a/arch/x86/include/asm/acpi.h
++++ b/arch/x86/include/asm/acpi.h
+@@ -16,6 +16,9 @@
+ #include <asm/x86_init.h>
+ #include <asm/cpufeature.h>
+ #include <asm/irq_vectors.h>
++#include <asm/xen/hypervisor.h>
++
++#include <xen/xen.h>
+
+ #ifdef CONFIG_ACPI_APEI
+ # include <asm/pgtable_types.h>
+@@ -127,6 +130,17 @@ static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
+ if (!cpu_has(c, X86_FEATURE_MWAIT) ||
+ boot_option_idle_override == IDLE_NOMWAIT)
+ *cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH);
++
++ if (xen_initial_domain()) {
++ /*
++ * When Linux is running as Xen dom0, the hypervisor is the
++ * entity in charge of the processor power management, and so
++ * Xen needs to check the OS capabilities reported in the
++ * processor capabilities buffer matches what the hypervisor
++ * driver supports.
++ */
++ xen_sanitize_proc_cap_bits(cap);
++ }
+ }
+
+ static inline bool acpi_has_cpu_in_madt(void)
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index e3054e3e46d52..9b419f0de713c 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -108,6 +108,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking)
+ KVM_X86_OP_OPTIONAL(vcpu_unblocking)
+ KVM_X86_OP_OPTIONAL(pi_update_irte)
+ KVM_X86_OP_OPTIONAL(pi_start_assignment)
++KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
+ KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
+ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
+ KVM_X86_OP_OPTIONAL(set_hv_timer)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 70d139406bc80..fb9f5fa96cc96 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1708,6 +1708,7 @@ struct kvm_x86_ops {
+ int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
+ void (*pi_start_assignment)(struct kvm *kvm);
++ void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
+ void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index b37abb55e948b..389f9594746ef 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -553,6 +553,7 @@
+ #define MSR_AMD64_CPUID_FN_1 0xc0011004
+ #define MSR_AMD64_LS_CFG 0xc0011020
+ #define MSR_AMD64_DC_CFG 0xc0011022
++#define MSR_AMD64_TW_CFG 0xc0011023
+
+ #define MSR_AMD64_DE_CFG 0xc0011029
+ #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index c55cc243592e9..197ff4f4d1ceb 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -271,7 +271,7 @@
+ .Lskip_rsb_\@:
+ .endm
+
+-#ifdef CONFIG_CPU_UNRET_ENTRY
++#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
+ #define CALL_UNTRAIN_RET "call entry_untrain_ret"
+ #else
+ #define CALL_UNTRAIN_RET ""
+@@ -312,7 +312,7 @@
+
+ .macro UNTRAIN_RET_FROM_CALL
+ #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
+- defined(CONFIG_CALL_DEPTH_TRACKING)
++ defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
+ VALIDATE_UNRET_END
+ ALTERNATIVE_3 "", \
+ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
+index e3bae2b60a0db..ef2844d691735 100644
+--- a/arch/x86/include/asm/numa.h
++++ b/arch/x86/include/asm/numa.h
+@@ -12,13 +12,6 @@
+
+ #define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+
+-/*
+- * Too small node sizes may confuse the VM badly. Usually they
+- * result from BIOS bugs. So dont recognize nodes as standalone
+- * NUMA entities that have less than this amount of RAM listed:
+- */
+-#define NODE_MIN_SIZE (4*1024*1024)
+-
+ extern int numa_off;
+
+ /*
+diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
+index 64df897c0ee30..1be13b2dfe8bf 100644
+--- a/arch/x86/include/asm/sparsemem.h
++++ b/arch/x86/include/asm/sparsemem.h
+@@ -37,6 +37,8 @@ extern int phys_to_target_node(phys_addr_t start);
+ #define phys_to_target_node phys_to_target_node
+ extern int memory_add_physaddr_to_nid(u64 start);
+ #define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
++extern int numa_fill_memblks(u64 start, u64 end);
++#define numa_fill_memblks numa_fill_memblks
+ #endif
+ #endif /* __ASSEMBLY__ */
+
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 8bae40a662827..5c367c1290c35 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -496,7 +496,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len);
+ #define copy_mc_to_kernel copy_mc_to_kernel
+
+ unsigned long __must_check
+-copy_mc_to_user(void *to, const void *from, unsigned len);
++copy_mc_to_user(void __user *to, const void *from, unsigned len);
+ #endif
+
+ /*
+diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
+index 7048dfacc04b2..a9088250770f2 100644
+--- a/arch/x86/include/asm/xen/hypervisor.h
++++ b/arch/x86/include/asm/xen/hypervisor.h
+@@ -100,4 +100,13 @@ static inline void leave_lazy(enum xen_lazy_mode mode)
+
+ enum xen_lazy_mode xen_get_lazy_mode(void);
+
++#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI)
++void xen_sanitize_proc_cap_bits(uint32_t *buf);
++#else
++static inline void xen_sanitize_proc_cap_bits(uint32_t *buf)
++{
++ BUG();
++}
++#endif
++
+ #endif /* _ASM_X86_XEN_HYPERVISOR_H */
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 356de955e78dd..cab4d8b1535d6 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -112,6 +112,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 760adac3d1a82..3cdf48493546d 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -36,6 +36,8 @@
+ #include <linux/smp.h>
+ #include <linux/mm.h>
+
++#include <xen/xen.h>
++
+ #include <asm/trace/irq_vectors.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/pc-conf-reg.h>
+@@ -2344,6 +2346,15 @@ static int __init smp_init_primary_thread_mask(void)
+ {
+ unsigned int cpu;
+
++ /*
++ * XEN/PV provides either none or useless topology information.
++ * Pretend that all vCPUs are primary threads.
++ */
++ if (xen_pv_domain()) {
++ cpumask_copy(&__cpu_primary_thread_mask, cpu_possible_mask);
++ return 0;
++ }
++
+ for (cpu = 0; cpu < nr_logical_cpuids; cpu++)
+ cpu_mark_primary_thread(cpu, cpuid_to_apicid[cpu]);
+ return 0;
+diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
+index 6b6b711678fe0..d9651f15ae4f7 100644
+--- a/arch/x86/kernel/apic/msi.c
++++ b/arch/x86/kernel/apic/msi.c
+@@ -55,14 +55,14 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
+ * caused by the non-atomic update of the address/data pair.
+ *
+ * Direct update is possible when:
+- * - The MSI is maskable (remapped MSI does not use this code path)).
+- * The quirk bit is not set in this case.
++ * - The MSI is maskable (remapped MSI does not use this code path).
++ * The reservation mode bit is set in this case.
+ * - The new vector is the same as the old vector
+ * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+ * - The interrupt is not yet started up
+ * - The new destination CPU is the same as the old destination CPU
+ */
+- if (!irqd_msi_nomask_quirk(irqd) ||
++ if (!irqd_can_reserve(irqd) ||
+ cfg->vector == old_cfg.vector ||
+ old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+ !irqd_is_started(irqd) ||
+@@ -215,8 +215,6 @@ static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ if (WARN_ON_ONCE(domain != real_parent))
+ return false;
+ info->chip->irq_set_affinity = msi_set_affinity;
+- /* See msi_set_affinity() for the gory details */
+- info->flags |= MSI_FLAG_NOMASK_QUIRK;
+ break;
+ case DOMAIN_BUS_DMAR:
+ case DOMAIN_BUS_AMDVI:
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 10499bcd4e396..0bc55472f303a 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2353,6 +2353,8 @@ early_param("l1tf", l1tf_cmdline);
+
+ enum srso_mitigation {
+ SRSO_MITIGATION_NONE,
++ SRSO_MITIGATION_UCODE_NEEDED,
++ SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
+ SRSO_MITIGATION_MICROCODE,
+ SRSO_MITIGATION_SAFE_RET,
+ SRSO_MITIGATION_IBPB,
+@@ -2368,11 +2370,13 @@ enum srso_mitigation_cmd {
+ };
+
+ static const char * const srso_strings[] = {
+- [SRSO_MITIGATION_NONE] = "Vulnerable",
+- [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode",
+- [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET",
+- [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
+- [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
++ [SRSO_MITIGATION_NONE] = "Vulnerable",
++ [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
++ [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
++ [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
++ [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
++ [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
++ [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
+ };
+
+ static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
+@@ -2409,10 +2413,7 @@ static void __init srso_select_mitigation(void)
+ if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
+ goto pred_cmd;
+
+- if (!has_microcode) {
+- pr_warn("IBPB-extending microcode not applied!\n");
+- pr_warn(SRSO_NOTICE);
+- } else {
++ if (has_microcode) {
+ /*
+ * Zen1/2 with SMT off aren't vulnerable after the right
+ * IBPB microcode has been applied.
+@@ -2421,14 +2422,17 @@ static void __init srso_select_mitigation(void)
+ setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
+ return;
+ }
+- }
+
+- if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+- if (has_microcode) {
+- pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
++ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+ srso_mitigation = SRSO_MITIGATION_IBPB;
+- goto pred_cmd;
++ goto out;
+ }
++ } else {
++ pr_warn("IBPB-extending microcode not applied!\n");
++ pr_warn(SRSO_NOTICE);
++
++ /* may be overwritten by SRSO_CMD_SAFE_RET below */
++ srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
+ }
+
+ switch (srso_cmd) {
+@@ -2458,7 +2462,10 @@ static void __init srso_select_mitigation(void)
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
+ x86_return_thunk = srso_return_thunk;
+ }
+- srso_mitigation = SRSO_MITIGATION_SAFE_RET;
++ if (has_microcode)
++ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
++ else
++ srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
+ } else {
+ pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
+ goto pred_cmd;
+@@ -2493,10 +2500,11 @@ static void __init srso_select_mitigation(void)
+ break;
+ }
+
+- pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
++out:
++ pr_info("%s\n", srso_strings[srso_mitigation]);
+
+ pred_cmd:
+- if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
++ if ((!boot_cpu_has_bug(X86_BUG_SRSO) || srso_cmd == SRSO_CMD_OFF) &&
+ boot_cpu_has(X86_FEATURE_SBPB))
+ x86_pred_cmd = PRED_CMD_SBPB;
+ }
+@@ -2704,9 +2712,7 @@ static ssize_t srso_show_state(char *buf)
+ if (boot_cpu_has(X86_FEATURE_SRSO_NO))
+ return sysfs_emit(buf, "Mitigation: SMT disabled\n");
+
+- return sysfs_emit(buf, "%s%s\n",
+- srso_strings[srso_mitigation],
+- boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
++ return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
+ }
+
+ static ssize_t gds_show_state(char *buf)
+diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
+index defdc594be14d..a7b3ef4c4de91 100644
+--- a/arch/x86/kernel/cpu/hygon.c
++++ b/arch/x86/kernel/cpu/hygon.c
+@@ -87,8 +87,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
+ if (!err)
+ c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+
+- /* Socket ID is ApicId[6] for these processors. */
+- c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
++ /*
++ * Socket ID is ApicId[6] for the processors with model <= 0x3
++ * when running on host.
++ */
++ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
++ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
+
+ cacheinfo_hygon_init_llc_id(c, cpu);
+ } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 49f7629b17f73..bbc21798df10e 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -80,7 +80,7 @@ static struct desc_struct startup_gdt[GDT_ENTRIES] = {
+ * while the kernel still uses a direct mapping.
+ */
+ static struct desc_ptr startup_gdt_descr = {
+- .size = sizeof(startup_gdt),
++ .size = sizeof(startup_gdt)-1,
+ .address = 0,
+ };
+
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index a0c551846b35f..4766b6bed4439 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -507,12 +507,13 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
+ }
+ this_cpu_write(nmi_state, NMI_EXECUTING);
+ this_cpu_write(nmi_cr2, read_cr2());
++
++nmi_restart:
+ if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
+ WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1);
+ WARN_ON_ONCE(!(nsp->idt_seq & 0x1));
+ WRITE_ONCE(nsp->recv_jiffies, jiffies);
+ }
+-nmi_restart:
+
+ /*
+ * Needs to happen before DR7 is accessed, because the hypervisor can
+@@ -548,16 +549,16 @@ nmi_restart:
+
+ if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
+ write_cr2(this_cpu_read(nmi_cr2));
+- if (this_cpu_dec_return(nmi_state))
+- goto nmi_restart;
+-
+- if (user_mode(regs))
+- mds_user_clear_cpu_buffers();
+ if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
+ WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1);
+ WARN_ON_ONCE(nsp->idt_seq & 0x1);
+ WRITE_ONCE(nsp->recv_jiffies, jiffies);
+ }
++ if (this_cpu_dec_return(nmi_state))
++ goto nmi_restart;
++
++ if (user_mode(regs))
++ mds_user_clear_cpu_buffers();
+ }
+
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
+index cacf2ede62175..23d8aaf8d9fd1 100644
+--- a/arch/x86/kernel/signal_64.c
++++ b/arch/x86/kernel/signal_64.c
+@@ -175,9 +175,6 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+ frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp);
+ uc_flags = frame_uc_flags(regs);
+
+- if (setup_signal_shadow_stack(ksig))
+- return -EFAULT;
+-
+ if (!user_access_begin(frame, sizeof(*frame)))
+ return -EFAULT;
+
+@@ -198,6 +195,9 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+ return -EFAULT;
+ }
+
++ if (setup_signal_shadow_stack(ksig))
++ return -EFAULT;
++
+ /* Set up registers for signal handler */
+ regs->di = ksig->sig;
+ /* In case the signal handler was declared without prototypes */
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index 7c2dac6824e26..238afd7335e46 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -727,10 +727,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
+
+ stimer_cleanup(stimer);
+ stimer->count = count;
+- if (stimer->count == 0)
+- stimer->config.enable = 0;
+- else if (stimer->config.auto_enable)
+- stimer->config.enable = 1;
++ if (!host) {
++ if (stimer->count == 0)
++ stimer->config.enable = 0;
++ else if (stimer->config.auto_enable)
++ stimer->config.enable = 1;
++ }
+
+ if (stimer->config.enable)
+ stimer_mark_pending(stimer, false);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 3e977dbbf9933..245b20973caee 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2444,22 +2444,22 @@ EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
+ {
+ struct kvm_lapic *apic = vcpu->arch.apic;
+- u64 val;
+
+ /*
+- * ICR is a single 64-bit register when x2APIC is enabled. For legacy
+- * xAPIC, ICR writes need to go down the common (slightly slower) path
+- * to get the upper half from ICR2.
++ * ICR is a single 64-bit register when x2APIC is enabled, all others
++ * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
++ * go down the common path to get the upper half from ICR2.
++ *
++ * Note, using the write helpers may incur an unnecessary write to the
++ * virtual APIC state, but KVM needs to conditionally modify the value
++ * in certain cases, e.g. to clear the ICR busy bit. The cost of extra
++ * conditional branches is likely a wash relative to the cost of the
++ * maybe-unecessary write, and both are in the noise anyways.
+ */
+- if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
+- val = kvm_lapic_get_reg64(apic, APIC_ICR);
+- kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
+- trace_kvm_apic_write(APIC_ICR, val);
+- } else {
+- /* TODO: optimize to just emulate side effect w/o one more write */
+- val = kvm_lapic_get_reg(apic, offset);
+- kvm_lapic_reg_write(apic, offset, (u32)val);
+- }
++ if (apic_x2apic_mode(apic) && offset == APIC_ICR)
++ kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
++ else
++ kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
+ }
+ EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
+
+@@ -2670,6 +2670,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ u64 msr_val;
+ int i;
+
++ static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ if (!init_event) {
+ msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
+ if (kvm_vcpu_is_reset_bsp(vcpu))
+@@ -2981,6 +2983,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ int r;
+
++ static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
+ /* set SPIV separately to get count of SW disabled APICs right */
+ apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 72e3943f36935..9bba5352582c3 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6912,7 +6912,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
+ }
+
+-static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
++static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+@@ -8286,7 +8286,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
+ .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
+ .load_eoi_exitmap = vmx_load_eoi_exitmap,
+- .apicv_post_state_restore = vmx_apicv_post_state_restore,
++ .apicv_pre_state_restore = vmx_apicv_pre_state_restore,
+ .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
+ .hwapic_irr_update = vmx_hwapic_irr_update,
+ .hwapic_isr_update = vmx_hwapic_isr_update,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 41cce5031126a..e179db7c17dad 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3641,6 +3641,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ case MSR_AMD64_PATCH_LOADER:
+ case MSR_AMD64_BU_CFG2:
+ case MSR_AMD64_DC_CFG:
++ case MSR_AMD64_TW_CFG:
+ case MSR_F15H_EX_CFG:
+ break;
+
+@@ -4065,6 +4066,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ case MSR_AMD64_BU_CFG2:
+ case MSR_IA32_PERF_CTL:
+ case MSR_AMD64_DC_CFG:
++ case MSR_AMD64_TW_CFG:
+ case MSR_F15H_EX_CFG:
+ /*
+ * Intel Sandy Bridge CPUs must support the RAPL (running average power
+diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
+index 80efd45a77617..6e8b7e600def5 100644
+--- a/arch/x86/lib/copy_mc.c
++++ b/arch/x86/lib/copy_mc.c
+@@ -70,23 +70,23 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne
+ }
+ EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
+
+-unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
++unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
+ {
+ unsigned long ret;
+
+ if (copy_mc_fragile_enabled) {
+ __uaccess_begin();
+- ret = copy_mc_fragile(dst, src, len);
++ ret = copy_mc_fragile((__force void *)dst, src, len);
+ __uaccess_end();
+ return ret;
+ }
+
+ if (static_cpu_has(X86_FEATURE_ERMS)) {
+ __uaccess_begin();
+- ret = copy_mc_enhanced_fast_string(dst, src, len);
++ ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
+ __uaccess_end();
+ return ret;
+ }
+
+- return copy_user_generic(dst, src, len);
++ return copy_user_generic((__force void *)dst, src, len);
+ }
+diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
+index 5a53c2cc169cc..6993f026adec9 100644
+--- a/arch/x86/mm/maccess.c
++++ b/arch/x86/mm/maccess.c
+@@ -9,12 +9,21 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+ unsigned long vaddr = (unsigned long)unsafe_src;
+
+ /*
+- * Range covering the highest possible canonical userspace address
+- * as well as non-canonical address range. For the canonical range
+- * we also need to include the userspace guard page.
++ * Do not allow userspace addresses. This disallows
++ * normal userspace and the userspace guard page:
+ */
+- return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
+- __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
++ if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
++ return false;
++
++ /*
++ * Allow everything during early boot before 'x86_virt_bits'
++ * is initialized. Needed for instruction decoding in early
++ * exception handlers.
++ */
++ if (!boot_cpu_data.x86_virt_bits)
++ return true;
++
++ return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
+ }
+ #else
+ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 2aadb2019b4f2..aa39d678fe81d 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -11,6 +11,7 @@
+ #include <linux/nodemask.h>
+ #include <linux/sched.h>
+ #include <linux/topology.h>
++#include <linux/sort.h>
+
+ #include <asm/e820/api.h>
+ #include <asm/proto.h>
+@@ -601,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
+ if (start >= end)
+ continue;
+
+- /*
+- * Don't confuse VM with a node that doesn't have the
+- * minimum amount of memory:
+- */
+- if (end && (end - start) < NODE_MIN_SIZE)
+- continue;
+-
+ alloc_node_data(nid);
+ }
+
+@@ -961,4 +955,83 @@ int memory_add_physaddr_to_nid(u64 start)
+ return nid;
+ }
+ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
++
++static int __init cmp_memblk(const void *a, const void *b)
++{
++ const struct numa_memblk *ma = *(const struct numa_memblk **)a;
++ const struct numa_memblk *mb = *(const struct numa_memblk **)b;
++
++ return ma->start - mb->start;
++}
++
++static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
++
++/**
++ * numa_fill_memblks - Fill gaps in numa_meminfo memblks
++ * @start: address to begin fill
++ * @end: address to end fill
++ *
++ * Find and extend numa_meminfo memblks to cover the @start-@end
++ * physical address range, such that the first memblk includes
++ * @start, the last memblk includes @end, and any gaps in between
++ * are filled.
++ *
++ * RETURNS:
++ * 0 : Success
++ * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
++ */
++
++int __init numa_fill_memblks(u64 start, u64 end)
++{
++ struct numa_memblk **blk = &numa_memblk_list[0];
++ struct numa_meminfo *mi = &numa_meminfo;
++ int count = 0;
++ u64 prev_end;
++
++ /*
++ * Create a list of pointers to numa_meminfo memblks that
++ * overlap start, end. Exclude (start == bi->end) since
++ * end addresses in both a CFMWS range and a memblk range
++ * are exclusive.
++ *
++ * This list of pointers is used to make in-place changes
++ * that fill out the numa_meminfo memblks.
++ */
++ for (int i = 0; i < mi->nr_blks; i++) {
++ struct numa_memblk *bi = &mi->blk[i];
++
++ if (start < bi->end && end >= bi->start) {
++ blk[count] = &mi->blk[i];
++ count++;
++ }
++ }
++ if (!count)
++ return NUMA_NO_MEMBLK;
++
++ /* Sort the list of pointers in memblk->start order */
++ sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
++
++ /* Make sure the first/last memblks include start/end */
++ blk[0]->start = min(blk[0]->start, start);
++ blk[count - 1]->end = max(blk[count - 1]->end, end);
++
++ /*
++ * Fill any gaps by tracking the previous memblks
++ * end address and backfilling to it if needed.
++ */
++ prev_end = blk[0]->end;
++ for (int i = 1; i < count; i++) {
++ struct numa_memblk *curr = blk[i];
++
++ if (prev_end >= curr->start) {
++ if (prev_end < curr->end)
++ prev_end = curr->end;
++ } else {
++ curr->start = prev_end;
++ prev_end = curr->end;
++ }
++ }
++ return 0;
++}
++
+ #endif
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index a5930042139d3..52f36c48c1b9e 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1018,6 +1018,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
+
+ #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
+
++/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
++#define RESTORE_TAIL_CALL_CNT(stack) \
++ EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
++
+ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
+ int oldproglen, struct jit_context *ctx, bool jmp_padding)
+ {
+@@ -1623,9 +1627,7 @@ st: if (is_imm8(insn->off))
+
+ func = (u8 *) __bpf_call_base + imm32;
+ if (tail_call_reachable) {
+- /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+- EMIT3_off32(0x48, 0x8B, 0x85,
+- -round_up(bpf_prog->aux->stack_depth, 8) - 8);
++ RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
+ if (!imm32)
+ return -EINVAL;
+ offs = 7 + x86_call_depth_emit_accounting(&prog, func);
+@@ -2400,6 +2402,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ * [ ... ]
+ * [ stack_arg2 ]
+ * RBP - arg_stack_off [ stack_arg1 ]
++ * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
+ */
+
+ /* room for return value of orig_call or fentry prog */
+@@ -2464,6 +2467,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ else
+ /* sub rsp, stack_size */
+ EMIT4(0x48, 0x83, 0xEC, stack_size);
++ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++ EMIT1(0x50); /* push rax */
+ /* mov QWORD PTR [rbp - rbx_off], rbx */
+ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
+
+@@ -2516,9 +2521,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ restore_regs(m, &prog, regs_off);
+ save_args(m, &prog, arg_stack_off, true);
+
++ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++ /* Before calling the original function, restore the
++ * tail_call_cnt from stack to rax.
++ */
++ RESTORE_TAIL_CALL_CNT(stack_size);
++
+ if (flags & BPF_TRAMP_F_ORIG_STACK) {
+- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
+- EMIT2(0xff, 0xd0); /* call *rax */
++ emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
++ EMIT2(0xff, 0xd3); /* call *rbx */
+ } else {
+ /* call original function */
+ if (emit_rsb_call(&prog, orig_call, prog)) {
+@@ -2569,7 +2580,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ ret = -EINVAL;
+ goto cleanup;
+ }
+- }
++ } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++ /* Before running the original function, restore the
++ * tail_call_cnt from stack to rax.
++ */
++ RESTORE_TAIL_CALL_CNT(stack_size);
++
+ /* restore return value of orig_call or fentry prog back into RAX */
+ if (save_ret)
+ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index e3ec02e6ac9fe..f347c20247d30 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -3,9 +3,11 @@
+ * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <linux/dmi.h>
+ #include <linux/pci.h>
++#include <linux/suspend.h>
+ #include <linux/vgaarb.h>
+ #include <asm/amd_nb.h>
+ #include <asm/hpet.h>
+@@ -904,3 +906,60 @@ static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability);
+ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability);
++
++#ifdef CONFIG_SUSPEND
++/*
++ * Root Ports on some AMD SoCs advertise PME_Support for D3hot and D3cold, but
++ * if the SoC is put into a hardware sleep state by the amd-pmc driver, the
++ * Root Ports don't generate wakeup interrupts for USB devices.
++ *
++ * When suspending, remove D3hot and D3cold from the PME_Support advertised
++ * by the Root Port so we don't use those states if we're expecting wakeup
++ * interrupts. Restore the advertised PME_Support when resuming.
++ */
++static void amd_rp_pme_suspend(struct pci_dev *dev)
++{
++ struct pci_dev *rp;
++
++ /*
++ * PM_SUSPEND_ON means we're doing runtime suspend, which means
++ * amd-pmc will not be involved so PMEs during D3 work as advertised.
++ *
++ * The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware
++ * sleep state, but we assume amd-pmc is always present.
++ */
++ if (pm_suspend_target_state == PM_SUSPEND_ON)
++ return;
++
++ rp = pcie_find_root_port(dev);
++ if (!rp->pm_cap)
++ return;
++
++ rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
++ PCI_PM_CAP_PME_SHIFT);
++ dev_info_once(&rp->dev, "quirk: disabling D3cold for suspend\n");
++}
++
++static void amd_rp_pme_resume(struct pci_dev *dev)
++{
++ struct pci_dev *rp;
++ u16 pmc;
++
++ rp = pcie_find_root_port(dev);
++ if (!rp->pm_cap)
++ return;
++
++ pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
++ rp->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
++}
++/* Rembrandt (yellow_carp) */
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_resume);
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_resume);
++/* Phoenix (pink_sardine) */
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
++#endif /* CONFIG_SUSPEND */
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 0337392a31214..3c61bb98c10e2 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -33,9 +33,12 @@ EXPORT_SYMBOL_GPL(hypercall_page);
+ * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
+ * but during boot it is switched to point to xen_vcpu_info.
+ * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
++ * Make sure that xen_vcpu_info doesn't cross a page boundary by making it
++ * cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
++ * which matches the cache line size of 64-bit x86 processors).
+ */
+ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+-DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
++DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
+
+ /* Linux <-> Xen vCPU id mapping */
+ DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
+@@ -160,6 +163,7 @@ void xen_vcpu_setup(int cpu)
+ int err;
+ struct vcpu_info *vcpup;
+
++ BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
+ BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+
+ /*
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 408a2aa66c692..a87ab36889e76 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -21,7 +21,7 @@ extern void *xen_initial_gdt;
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+
+-DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
++DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
+ DECLARE_PER_CPU(unsigned long, xen_cr3);
+ DECLARE_PER_CPU(unsigned long, xen_current_cr3);
+
+diff --git a/block/bdev.c b/block/bdev.c
+index f3b13aa1b7d42..04dba25b0019e 100644
+--- a/block/bdev.c
++++ b/block/bdev.c
+@@ -425,6 +425,8 @@ void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
+
+ void bdev_add(struct block_device *bdev, dev_t dev)
+ {
++ if (bdev_stable_writes(bdev))
++ mapping_set_stable_writes(bdev->bd_inode->i_mapping);
+ bdev->bd_dev = dev;
+ bdev->bd_inode->i_rdev = dev;
+ bdev->bd_inode->i_ino = dev;
+diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
+index 624c03c8fe64e..fd482439afbc9 100644
+--- a/block/blk-cgroup.h
++++ b/block/blk-cgroup.h
+@@ -249,8 +249,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
+ {
+ struct blkcg_gq *blkg;
+
+- WARN_ON_ONCE(!rcu_read_lock_held());
+-
+ if (blkcg == &blkcg_root)
+ return q->root_blkg;
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 9d51e9894ece7..fdf25b8d6e784 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -501,8 +501,8 @@ static inline void bio_check_ro(struct bio *bio)
+ if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+ return;
+- pr_warn("Trying to write to read-only block-device %pg\n",
+- bio->bi_bdev);
++ pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
++ bio->bi_bdev);
+ /* Older lvm-tools actually trigger this */
+ }
+ }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 1fafd54dce3cb..6ab7f360ff2ac 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2875,11 +2875,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ };
+ struct request *rq;
+
+- if (unlikely(bio_queue_enter(bio)))
+- return NULL;
+-
+ if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+- goto queue_exit;
++ return NULL;
+
+ rq_qos_throttle(q, bio);
+
+@@ -2895,35 +2892,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ rq_qos_cleanup(q, bio);
+ if (bio->bi_opf & REQ_NOWAIT)
+ bio_wouldblock_error(bio);
+-queue_exit:
+- blk_queue_exit(q);
+ return NULL;
+ }
+
+-static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+- struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
++/* return true if this @rq can be used for @bio */
++static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
++ struct bio *bio)
+ {
+- struct request *rq;
+- enum hctx_type type, hctx_type;
++ enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
++ enum hctx_type hctx_type = rq->mq_hctx->type;
+
+- if (!plug)
+- return NULL;
+- rq = rq_list_peek(&plug->cached_rq);
+- if (!rq || rq->q != q)
+- return NULL;
++ WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
+
+- if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
+- *bio = NULL;
+- return NULL;
+- }
+-
+- type = blk_mq_get_hctx_type((*bio)->bi_opf);
+- hctx_type = rq->mq_hctx->type;
+ if (type != hctx_type &&
+ !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
+- return NULL;
+- if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
+- return NULL;
++ return false;
++ if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
++ return false;
+
+ /*
+ * If any qos ->throttle() end up blocking, we will have flushed the
+@@ -2931,12 +2916,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+ * before we throttle.
+ */
+ plug->cached_rq = rq_list_next(rq);
+- rq_qos_throttle(q, *bio);
++ rq_qos_throttle(rq->q, bio);
+
+ blk_mq_rq_time_init(rq, 0);
+- rq->cmd_flags = (*bio)->bi_opf;
++ rq->cmd_flags = bio->bi_opf;
+ INIT_LIST_HEAD(&rq->queuelist);
+- return rq;
++ return true;
+ }
+
+ static void bio_set_ioprio(struct bio *bio)
+@@ -2966,7 +2951,7 @@ void blk_mq_submit_bio(struct bio *bio)
+ struct blk_plug *plug = blk_mq_plug(bio);
+ const int is_sync = op_is_sync(bio->bi_opf);
+ struct blk_mq_hw_ctx *hctx;
+- struct request *rq;
++ struct request *rq = NULL;
+ unsigned int nr_segs = 1;
+ blk_status_t ret;
+
+@@ -2977,20 +2962,36 @@ void blk_mq_submit_bio(struct bio *bio)
+ return;
+ }
+
+- if (!bio_integrity_prep(bio))
+- return;
+-
+ bio_set_ioprio(bio);
+
+- rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
+- if (!rq) {
+- if (!bio)
++ if (plug) {
++ rq = rq_list_peek(&plug->cached_rq);
++ if (rq && rq->q != q)
++ rq = NULL;
++ }
++ if (rq) {
++ if (!bio_integrity_prep(bio))
+ return;
+- rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+- if (unlikely(!rq))
++ if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+ return;
++ if (blk_mq_can_use_cached_rq(rq, plug, bio))
++ goto done;
++ percpu_ref_get(&q->q_usage_counter);
++ } else {
++ if (unlikely(bio_queue_enter(bio)))
++ return;
++ if (!bio_integrity_prep(bio))
++ goto fail;
++ }
++
++ rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
++ if (unlikely(!rq)) {
++fail:
++ blk_queue_exit(q);
++ return;
+ }
+
++done:
+ trace_block_getrq(bio);
+
+ rq_qos_track(q, rq, bio);
+diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
+index 1ef3b46d6f6e5..59ec726b7c770 100644
+--- a/crypto/asymmetric_keys/Kconfig
++++ b/crypto/asymmetric_keys/Kconfig
+@@ -76,7 +76,7 @@ config SIGNED_PE_FILE_VERIFICATION
+ signed PE binary.
+
+ config FIPS_SIGNATURE_SELFTEST
+- bool "Run FIPS selftests on the X.509+PKCS7 signature verification"
++ tristate "Run FIPS selftests on the X.509+PKCS7 signature verification"
+ help
+ This option causes some selftests to be run on the signature
+ verification code, using some built in data. This is required
+@@ -84,5 +84,6 @@ config FIPS_SIGNATURE_SELFTEST
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+ depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER
++ depends on X509_CERTIFICATE_PARSER
+
+ endif # ASYMMETRIC_KEY_TYPE
+diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
+index 0d1fa1b692c6b..1a273d6df3ebf 100644
+--- a/crypto/asymmetric_keys/Makefile
++++ b/crypto/asymmetric_keys/Makefile
+@@ -22,7 +22,8 @@ x509_key_parser-y := \
+ x509_cert_parser.o \
+ x509_loader.o \
+ x509_public_key.o
+-x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o
++obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o
++x509_selftest-y += selftest.o
+
+ $(obj)/x509_cert_parser.o: \
+ $(obj)/x509.asn1.h \
+diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c
+index fa0bf7f242849..c50da7ef90ae9 100644
+--- a/crypto/asymmetric_keys/selftest.c
++++ b/crypto/asymmetric_keys/selftest.c
+@@ -4,10 +4,11 @@
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+-#include <linux/kernel.h>
++#include <crypto/pkcs7.h>
+ #include <linux/cred.h>
++#include <linux/kernel.h>
+ #include <linux/key.h>
+-#include <crypto/pkcs7.h>
++#include <linux/module.h>
+ #include "x509_parser.h"
+
+ struct certs_test {
+@@ -175,7 +176,7 @@ static const struct certs_test certs_tests[] __initconst = {
+ TEST(certs_selftest_1_data, certs_selftest_1_pkcs7),
+ };
+
+-int __init fips_signature_selftest(void)
++static int __init fips_signature_selftest(void)
+ {
+ struct key *keyring;
+ int ret, i;
+@@ -222,3 +223,9 @@ int __init fips_signature_selftest(void)
+ key_put(keyring);
+ return 0;
+ }
++
++late_initcall(fips_signature_selftest);
++
++MODULE_DESCRIPTION("X.509 self tests");
++MODULE_AUTHOR("Red Hat, Inc.");
++MODULE_LICENSE("GPL");
+diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
+index a299c9c56f409..97a886cbe01c3 100644
+--- a/crypto/asymmetric_keys/x509_parser.h
++++ b/crypto/asymmetric_keys/x509_parser.h
+@@ -40,15 +40,6 @@ struct x509_certificate {
+ bool blacklisted;
+ };
+
+-/*
+- * selftest.c
+- */
+-#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST
+-extern int __init fips_signature_selftest(void);
+-#else
+-static inline int fips_signature_selftest(void) { return 0; }
+-#endif
+-
+ /*
+ * x509_cert_parser.c
+ */
+diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
+index 7c71db3ac23d4..6a4f00be22fc1 100644
+--- a/crypto/asymmetric_keys/x509_public_key.c
++++ b/crypto/asymmetric_keys/x509_public_key.c
+@@ -262,15 +262,9 @@ static struct asymmetric_key_parser x509_key_parser = {
+ /*
+ * Module stuff
+ */
+-extern int __init certs_selftest(void);
+ static int __init x509_key_init(void)
+ {
+- int ret;
+-
+- ret = register_asymmetric_key_parser(&x509_key_parser);
+- if (ret < 0)
+- return ret;
+- return fips_signature_selftest();
++ return register_asymmetric_key_parser(&x509_key_parser);
+ }
+
+ static void __exit x509_key_exit(void)
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index 8c1d0ca412137..d0d954fe9d54f 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
+ err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
+ if (!err)
+ return -EINPROGRESS;
++ if (err == -EBUSY)
++ return -EAGAIN;
+
+ return err;
+ }
+@@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
+ err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
+ if (!err)
+ return -EINPROGRESS;
++ if (err == -EBUSY)
++ return -EAGAIN;
+
+ return err;
+ }
+diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
+index 20c4583f12b0d..31c74ca70a2e5 100644
+--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
++++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
+@@ -8149,11 +8149,11 @@ static int gaudi2_psoc_razwi_get_engines(struct gaudi2_razwi_info *razwi_info, u
+ eng_id[num_of_eng] = razwi_info[i].eng_id;
+ base[num_of_eng] = razwi_info[i].rtr_ctrl;
+ if (!num_of_eng)
+- str_size += snprintf(eng_name + str_size,
++ str_size += scnprintf(eng_name + str_size,
+ PSOC_RAZWI_ENG_STR_SIZE - str_size, "%s",
+ razwi_info[i].eng_name);
+ else
+- str_size += snprintf(eng_name + str_size,
++ str_size += scnprintf(eng_name + str_size,
+ PSOC_RAZWI_ENG_STR_SIZE - str_size, " or %s",
+ razwi_info[i].eng_name);
+ num_of_eng++;
+diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
+index 18be8b98e9a8b..b8010c07eec17 100644
+--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
++++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
+@@ -536,6 +536,16 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
+ return ret;
+ }
+
++static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
++{
++ ivpu_boot_dpu_active_drive(vdev, false);
++ ivpu_boot_pwr_island_isolation_drive(vdev, true);
++ ivpu_boot_pwr_island_trickle_drive(vdev, false);
++ ivpu_boot_pwr_island_drive(vdev, false);
++
++ return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
++}
++
+ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+ {
+ u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
+@@ -625,30 +635,26 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
+ ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
+ ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
+
++ ivpu_hw_read_platform(vdev);
++ ivpu_hw_wa_init(vdev);
++ ivpu_hw_timeouts_init(vdev);
++
+ return 0;
+ }
+
+ static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
+ {
+- int ret;
+- u32 val;
+-
+- if (IVPU_WA(punit_disabled))
+- return 0;
++ int ret = 0;
+
+- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+- if (ret) {
+- ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+- return ret;
++ if (ivpu_boot_pwr_domain_disable(vdev)) {
++ ivpu_err(vdev, "Failed to disable power domain\n");
++ ret = -EIO;
+ }
+
+- val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
+- val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+- REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
+-
+- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+- if (ret)
+- ivpu_err(vdev, "Timed out waiting for RESET completion\n");
++ if (ivpu_pll_disable(vdev)) {
++ ivpu_err(vdev, "Failed to disable PLL\n");
++ ret = -EIO;
++ }
+
+ return ret;
+ }
+@@ -681,14 +687,6 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
+ {
+ int ret;
+
+- ivpu_hw_read_platform(vdev);
+- ivpu_hw_wa_init(vdev);
+- ivpu_hw_timeouts_init(vdev);
+-
+- ret = ivpu_hw_37xx_reset(vdev);
+- if (ret)
+- ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
+-
+ ret = ivpu_hw_37xx_d0i3_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+@@ -756,11 +754,11 @@ static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
+ {
+ int ret = 0;
+
+- if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
+- ivpu_err(vdev, "Failed to reset the VPU\n");
++ if (!ivpu_hw_37xx_is_idle(vdev))
++ ivpu_warn(vdev, "VPU not idle during power down\n");
+
+- if (ivpu_pll_disable(vdev)) {
+- ivpu_err(vdev, "Failed to disable PLL\n");
++ if (ivpu_hw_37xx_reset(vdev)) {
++ ivpu_err(vdev, "Failed to reset VPU\n");
+ ret = -EIO;
+ }
+
+diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
+index 85171a408363f..7c3ff25232a2c 100644
+--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
++++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
+@@ -728,6 +728,10 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
+ ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
+ ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
+
++ ivpu_hw_read_platform(vdev);
++ ivpu_hw_wa_init(vdev);
++ ivpu_hw_timeouts_init(vdev);
++
+ return 0;
+ }
+
+@@ -819,10 +823,6 @@ static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
+ return ret;
+ }
+
+- ivpu_hw_read_platform(vdev);
+- ivpu_hw_wa_init(vdev);
+- ivpu_hw_timeouts_init(vdev);
+-
+ ret = ivpu_hw_40xx_d0i3_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
+index a2056c4c8cb70..271092f2700a1 100644
+--- a/drivers/acpi/acpi_fpdt.c
++++ b/drivers/acpi/acpi_fpdt.c
+@@ -194,12 +194,19 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_header = (void *)subtable_header + offset;
+ offset += record_header->length;
+
++ if (!record_header->length) {
++ pr_err(FW_BUG "Zero-length record found in FPTD.\n");
++ result = -EINVAL;
++ goto err;
++ }
++
+ switch (record_header->type) {
+ case RECORD_S3_RESUME:
+ if (subtable_type != SUBTABLE_S3PT) {
+ pr_err(FW_BUG "Invalid record %d for subtable %s\n",
+ record_header->type, signature);
+- return -EINVAL;
++ result = -EINVAL;
++ goto err;
+ }
+ if (record_resume) {
+ pr_err("Duplicate resume performance record found.\n");
+@@ -208,7 +215,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_resume = (struct resume_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
+ if (result)
+- return result;
++ goto err;
+ break;
+ case RECORD_S3_SUSPEND:
+ if (subtable_type != SUBTABLE_S3PT) {
+@@ -223,13 +230,14 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_suspend = (struct suspend_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
+ if (result)
+- return result;
++ goto err;
+ break;
+ case RECORD_BOOT:
+ if (subtable_type != SUBTABLE_FBPT) {
+ pr_err(FW_BUG "Invalid %d for subtable %s\n",
+ record_header->type, signature);
+- return -EINVAL;
++ result = -EINVAL;
++ goto err;
+ }
+ if (record_boot) {
+ pr_err("Duplicate boot performance record found.\n");
+@@ -238,7 +246,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_boot = (struct boot_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
+ if (result)
+- return result;
++ goto err;
+ break;
+
+ default:
+@@ -247,6 +255,18 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ }
+ }
+ return 0;
++
++err:
++ if (record_boot)
++ sysfs_remove_group(fpdt_kobj, &boot_attr_group);
++
++ if (record_suspend)
++ sysfs_remove_group(fpdt_kobj, &suspend_attr_group);
++
++ if (record_resume)
++ sysfs_remove_group(fpdt_kobj, &resume_attr_group);
++
++ return result;
+ }
+
+ static int __init acpi_init_fpdt(void)
+@@ -255,6 +275,7 @@ static int __init acpi_init_fpdt(void)
+ struct acpi_table_header *header;
+ struct fpdt_subtable_entry *subtable;
+ u32 offset = sizeof(*header);
++ int result;
+
+ status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
+
+@@ -263,8 +284,8 @@ static int __init acpi_init_fpdt(void)
+
+ fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
+ if (!fpdt_kobj) {
+- acpi_put_table(header);
+- return -ENOMEM;
++ result = -ENOMEM;
++ goto err_nomem;
+ }
+
+ while (offset < header->length) {
+@@ -272,8 +293,10 @@ static int __init acpi_init_fpdt(void)
+ switch (subtable->type) {
+ case SUBTABLE_FBPT:
+ case SUBTABLE_S3PT:
+- fpdt_process_subtable(subtable->address,
++ result = fpdt_process_subtable(subtable->address,
+ subtable->type);
++ if (result)
++ goto err_subtable;
+ break;
+ default:
+ /* Other types are reserved in ACPI 6.4 spec. */
+@@ -282,6 +305,12 @@ static int __init acpi_init_fpdt(void)
+ offset += sizeof(*subtable);
+ }
+ return 0;
++err_subtable:
++ kobject_put(fpdt_kobj);
++
++err_nomem:
++ acpi_put_table(header);
++ return result;
+ }
+
+ fs_initcall(acpi_init_fpdt);
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index b411948594ff8..35f071ad95324 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -253,8 +253,7 @@ static const struct backlight_ops acpi_backlight_ops = {
+ static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long *state)
+ {
+- struct acpi_device *device = cooling_dev->devdata;
+- struct acpi_video_device *video = acpi_driver_data(device);
++ struct acpi_video_device *video = cooling_dev->devdata;
+
+ *state = video->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1;
+ return 0;
+@@ -263,8 +262,7 @@ static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
+ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long *state)
+ {
+- struct acpi_device *device = cooling_dev->devdata;
+- struct acpi_video_device *video = acpi_driver_data(device);
++ struct acpi_video_device *video = cooling_dev->devdata;
+ unsigned long long level;
+ int offset;
+
+@@ -283,8 +281,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
+ static int
+ video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
+ {
+- struct acpi_device *device = cooling_dev->devdata;
+- struct acpi_video_device *video = acpi_driver_data(device);
++ struct acpi_video_device *video = cooling_dev->devdata;
+ int level;
+
+ if (state >= video->brightness->count - ACPI_VIDEO_FIRST_LEVEL)
+@@ -1125,7 +1122,6 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
+
+ strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
+- device->driver_data = data;
+
+ data->device_id = device_id;
+ data->video = video;
+@@ -1747,8 +1743,8 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+ device->backlight->props.brightness =
+ acpi_video_get_brightness(device->backlight);
+
+- device->cooling_dev = thermal_cooling_device_register("LCD",
+- device->dev, &video_cooling_ops);
++ device->cooling_dev = thermal_cooling_device_register("LCD", device,
++ &video_cooling_ops);
+ if (IS_ERR(device->cooling_dev)) {
+ /*
+ * Set cooling_dev to NULL so we don't crash trying to free it.
+@@ -2031,7 +2027,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
+ * HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0
+ * evaluated to have functional panel brightness control.
+ */
+- acpi_device_fix_up_power_extended(device);
++ acpi_device_fix_up_power_children(device);
+
+ pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
+ ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index ef59d6ea16da0..63ad0541db381 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -209,6 +209,20 @@ err_pool_alloc:
+ return -ENOMEM;
+ }
+
++/**
++ * ghes_estatus_pool_region_free - free previously allocated memory
++ * from the ghes_estatus_pool.
++ * @addr: address of memory to free.
++ * @size: size of memory to free.
++ *
++ * Returns none.
++ */
++void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
++{
++ gen_pool_free(ghes_estatus_pool, addr, size);
++}
++EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
++
+ static int map_gen_v2(struct ghes *ghes)
+ {
+ return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
+@@ -564,6 +578,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
++ u8 *aer_info;
+
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+@@ -577,11 +592,17 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+ if (gdata->flags & CPER_SEC_RESET)
+ aer_severity = AER_FATAL;
+
++ aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
++ sizeof(struct aer_capability_regs));
++ if (!aer_info)
++ return;
++ memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
++
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity,
+ (struct aer_capability_regs *)
+- pcie_err->aer_info);
++ aer_info);
+ }
+ #endif
+ }
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index f007116a84276..3b4d048c49417 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -397,6 +397,19 @@ void acpi_device_fix_up_power_extended(struct acpi_device *adev)
+ }
+ EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
+
++/**
++ * acpi_device_fix_up_power_children - Force a device's children into D0.
++ * @adev: Parent device object whose children's power state is to be fixed up.
++ *
++ * Call acpi_device_fix_up_power() for @adev's children so long as they
++ * are reported as present and enabled.
++ */
++void acpi_device_fix_up_power_children(struct acpi_device *adev)
++{
++ acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
++}
++EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children);
++
+ int acpi_device_update_power(struct acpi_device *device, int *state_p)
+ {
+ int state;
+diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
+index b9bbf07461992..a34d8578b3da6 100644
+--- a/drivers/acpi/device_sysfs.c
++++ b/drivers/acpi/device_sysfs.c
+@@ -158,8 +158,8 @@ static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalia
+ return 0;
+
+ len = snprintf(modalias, size, "acpi:");
+- if (len <= 0)
+- return len;
++ if (len >= size)
++ return -ENOMEM;
+
+ size -= len;
+
+@@ -212,8 +212,10 @@ static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias
+ len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
+ ACPI_FREE(buf.pointer);
+
+- if (len <= 0)
+- return len;
++ if (len >= size)
++ return -ENOMEM;
++
++ size -= len;
+
+ of_compatible = acpi_dev->data.of_compatible;
+ if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index c95d0edb0be9e..a59c11df73754 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1924,6 +1924,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
+ },
+ },
++ {
++ /*
++ * HP 250 G7 Notebook PC
++ */
++ .callback = ec_honor_dsdt_gpe,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"),
++ },
++ },
+ {
+ /*
+ * Samsung hardware
+diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
+index 1f4fc5f8a819d..12f330b0eac01 100644
+--- a/drivers/acpi/numa/srat.c
++++ b/drivers/acpi/numa/srat.c
+@@ -310,11 +310,16 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
+ start = cfmws->base_hpa;
+ end = cfmws->base_hpa + cfmws->window_size;
+
+- /* Skip if the SRAT already described the NUMA details for this HPA */
+- node = phys_to_target_node(start);
+- if (node != NUMA_NO_NODE)
++ /*
++ * The SRAT may have already described NUMA details for all,
++ * or a portion of, this CFMWS HPA range. Extend the memblks
++ * found for any portion of the window to cover the entire
++ * window.
++ */
++ if (!numa_fill_memblks(start, end))
+ return 0;
+
++ /* No SRAT description. Create a new node. */
+ node = acpi_map_pxm_to_node(*fake_pxm);
+
+ if (node == NUMA_NO_NODE) {
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 3a34a8c425fe4..55437f5e0c3ae 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -592,7 +592,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+ while (1) {
+
+ if (cx->entry_method == ACPI_CSTATE_HALT)
+- safe_halt();
++ raw_safe_halt();
+ else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
+ io_idle(cx->address);
+ } else
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 413e4fcadcaf7..99b4e33554355 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -1102,25 +1102,26 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ switch (proptype) {
+ case DEV_PROP_STRING:
+ break;
+- case DEV_PROP_U8 ... DEV_PROP_U64:
++ default:
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (nval > obj->buffer.length)
+ return -EOVERFLOW;
+- break;
++ } else {
++ if (nval > obj->package.count)
++ return -EOVERFLOW;
+ }
+- fallthrough;
+- default:
+- if (nval > obj->package.count)
+- return -EOVERFLOW;
+ break;
+ }
+ if (nval == 0)
+ return -EINVAL;
+
+- if (obj->type != ACPI_TYPE_BUFFER)
+- items = obj->package.elements;
+- else
++ if (obj->type == ACPI_TYPE_BUFFER) {
++ if (proptype != DEV_PROP_U8)
++ return -EPROTO;
+ items = obj;
++ } else {
++ items = obj->package.elements;
++ }
+
+ switch (proptype) {
+ case DEV_PROP_U8:
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 297a88587031e..d09e3e7bcb585 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -446,6 +446,13 @@ static const struct dmi_system_id asus_laptop[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
+ },
+ },
++ {
++ /* Asus ExpertBook B1402CVA */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
++ },
++ },
+ {
+ .ident = "Asus ExpertBook B1502CBA",
+ .matches = {
+@@ -495,6 +502,18 @@ static const struct dmi_system_id maingear_laptop[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
+ }
+ },
++ {
++ /* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
++ },
++ },
++ {
++ /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
++ },
++ },
+ {
+ .ident = "MAINGEAR Vector Pro 2 17",
+ .matches = {
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 691d4b7686ee7..1d249d0f61ae4 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1568,17 +1568,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
+ int err;
+ const struct iommu_ops *ops;
+
++ /* Serialise to make dev->iommu stable under our potential fwspec */
++ mutex_lock(&iommu_probe_device_lock);
+ /*
+ * If we already translated the fwspec there is nothing left to do,
+ * return the iommu_ops.
+ */
+ ops = acpi_iommu_fwspec_ops(dev);
+- if (ops)
++ if (ops) {
++ mutex_unlock(&iommu_probe_device_lock);
+ return ops;
++ }
+
+ err = iort_iommu_configure_id(dev, id_in);
+ if (err && err != -EPROBE_DEFER)
+ err = viot_iommu_configure(dev);
++ mutex_unlock(&iommu_probe_device_lock);
+
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 442396f6ed1f9..31205fee59d4a 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -130,6 +130,16 @@ static int video_detect_force_native(const struct dmi_system_id *d)
+ return 0;
+ }
+
++static int video_detect_portege_r100(const struct dmi_system_id *d)
++{
++ struct pci_dev *dev;
++ /* Search for Trident CyberBlade XP4m32 to confirm Portégé R100 */
++ dev = pci_get_device(PCI_VENDOR_ID_TRIDENT, 0x2100, NULL);
++ if (dev)
++ acpi_backlight_dmi = acpi_backlight_vendor;
++ return 0;
++}
++
+ static const struct dmi_system_id video_detect_dmi_table[] = {
+ /*
+ * Models which should use the vendor backlight interface,
+@@ -270,6 +280,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ },
+ },
+
++ /*
++ * Toshiba Portégé R100 has working both acpi_video and toshiba_acpi
++ * vendor driver. But none of them gets activated as it has a VGA with
++ * no kernel driver (Trident CyberBlade XP4m32).
++ * The DMI strings are generic so check for the VGA chip in callback.
++ */
++ {
++ .callback = video_detect_portege_r100,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
++ DMI_MATCH(DMI_BOARD_NAME, "Portable PC")
++ },
++ },
++
+ /*
+ * Models which need acpi_video backlight control where the GPU drivers
+ * do not call acpi_video_register_backlight() because no internal panel
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 3a957c4da4092..4209fb39f6442 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1055,9 +1055,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ * Ask the sd driver to issue START STOP UNIT on runtime suspend
+ * and resume and shutdown only. For system level suspend/resume,
+ * devices power state is handled directly by libata EH.
++ * Given that disks are always spun up on system resume, also
++ * make sure that the sd driver forces runtime suspended disks
++ * to be resumed to correctly reflect the power state of the
++ * device.
+ */
+- sdev->manage_runtime_start_stop = true;
+- sdev->manage_shutdown = true;
++ sdev->manage_runtime_start_stop = 1;
++ sdev->manage_shutdown = 1;
++ sdev->force_runtime_start_on_system_start = 1;
+ }
+
+ /*
+diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
+index 25a63d043c8e1..0f77e04240661 100644
+--- a/drivers/ata/pata_isapnp.c
++++ b/drivers/ata/pata_isapnp.c
+@@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
+ if (pnp_port_valid(idev, 1)) {
+ ctl_addr = devm_ioport_map(&idev->dev,
+ pnp_port_start(idev, 1), 1);
++ if (!ctl_addr)
++ return -ENOMEM;
++
+ ap->ioaddr.altstatus_addr = ctl_addr;
+ ap->ioaddr.ctl_addr = ctl_addr;
+ ap->ops = &isapnp_port_ops;
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index 3241486869530..9bba8f280a4d4 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -2291,19 +2291,21 @@ static int get_esi(struct atm_dev *dev)
+ static int reset_sar(struct atm_dev *dev)
+ {
+ IADEV *iadev;
+- int i, error = 1;
++ int i, error;
+ unsigned int pci[64];
+
+ iadev = INPH_IA_DEV(dev);
+- for(i=0; i<64; i++)
+- if ((error = pci_read_config_dword(iadev->pci,
+- i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
+- return error;
++ for (i = 0; i < 64; i++) {
++ error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
++ if (error != PCIBIOS_SUCCESSFUL)
++ return error;
++ }
+ writel(0, iadev->reg+IPHASE5575_EXT_RESET);
+- for(i=0; i<64; i++)
+- if ((error = pci_write_config_dword(iadev->pci,
+- i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
+- return error;
++ for (i = 0; i < 64; i++) {
++ error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
++ if (error != PCIBIOS_SUCCESSFUL)
++ return error;
++ }
+ udelay(5);
+ return 0;
+ }
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index a528cec24264a..0c3725c3eefa4 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1274,8 +1274,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
+
+- device_links_driver_cleanup(dev);
+ device_unbind_cleanup(dev);
++ device_links_driver_cleanup(dev);
+
+ klist_remove(&dev->p->knode_driver);
+ device_pm_check_callbacks(dev);
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index c5d151e9c4815..92592f944a3df 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -334,6 +334,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
+ return 0;
+ }
+
++static int rbtree_all(const void *key, const struct rb_node *node)
++{
++ return 0;
++}
++
+ /**
+ * regcache_sync - Sync the register cache with the hardware.
+ *
+@@ -351,6 +356,7 @@ int regcache_sync(struct regmap *map)
+ unsigned int i;
+ const char *name;
+ bool bypass;
++ struct rb_node *node;
+
+ if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ return -EINVAL;
+@@ -392,6 +398,30 @@ out:
+ /* Restore the bypass state */
+ map->cache_bypass = bypass;
+ map->no_sync_defaults = false;
++
++ /*
++ * If we did any paging with cache bypassed and a cached
++ * paging register then the register and cache state might
++ * have gone out of sync, force writes of all the paging
++ * registers.
++ */
++ rb_for_each(node, 0, &map->range_tree, rbtree_all) {
++ struct regmap_range_node *this =
++ rb_entry(node, struct regmap_range_node, node);
++
++ /* If there's nothing in the cache there's nothing to sync */
++ ret = regcache_read(map, this->selector_reg, &i);
++ if (ret != 0)
++ continue;
++
++ ret = _regmap_write(map, this->selector_reg, i);
++ if (ret != 0) {
++ dev_err(map->dev, "Failed to write %x = %x: %d\n",
++ this->selector_reg, i, ret);
++ break;
++ }
++ }
++
+ map->unlock(map->lock_arg);
+
+ regmap_async_complete(map);
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index f36027591e1a8..bdd80b73c3e6c 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -48,7 +48,7 @@ static ssize_t regmap_name_read_file(struct file *file,
+ name = map->dev->driver->name;
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
+- if (ret < 0) {
++ if (ret >= PAGE_SIZE) {
+ kfree(buf);
+ return ret;
+ }
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 234a84ecde8b1..ea61577471994 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1620,17 +1620,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
+ }
+
+ if (!map->cache_bypass && map->format.parse_val) {
+- unsigned int ival;
++ unsigned int ival, offset;
+ int val_bytes = map->format.val_bytes;
+- for (i = 0; i < val_len / val_bytes; i++) {
+- ival = map->format.parse_val(val + (i * val_bytes));
+- ret = regcache_write(map,
+- reg + regmap_get_offset(map, i),
+- ival);
++
++ /* Cache the last written value for noinc writes */
++ i = noinc ? val_len - val_bytes : 0;
++ for (; i < val_len; i += val_bytes) {
++ ival = map->format.parse_val(val + i);
++ offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
++ ret = regcache_write(map, reg + offset, ival);
+ if (ret) {
+ dev_err(map->dev,
+ "Error in caching of register: %x ret: %d\n",
+- reg + regmap_get_offset(map, i), ret);
++ reg + offset, ret);
+ return ret;
+ }
+ }
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 800f131222fc8..855fdf5c3b4ea 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd)
+ struct gendisk *disk = nbd->disk;
+
+ del_gendisk(disk);
+- put_disk(disk);
+ blk_mq_free_tag_set(&nbd->tag_set);
+
+ /*
+@@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
+ idr_remove(&nbd_index_idr, nbd->index);
+ mutex_unlock(&nbd_index_mutex);
+ destroy_workqueue(nbd->recv_workq);
+- kfree(nbd);
++ put_disk(disk);
+ }
+
+ static void nbd_dev_remove_work(struct work_struct *work)
+@@ -1608,6 +1607,13 @@ static void nbd_release(struct gendisk *disk)
+ nbd_put(nbd);
+ }
+
++static void nbd_free_disk(struct gendisk *disk)
++{
++ struct nbd_device *nbd = disk->private_data;
++
++ kfree(nbd);
++}
++
+ static const struct block_device_operations nbd_fops =
+ {
+ .owner = THIS_MODULE,
+@@ -1615,6 +1621,7 @@ static const struct block_device_operations nbd_fops =
+ .release = nbd_release,
+ .ioctl = nbd_ioctl,
+ .compat_ioctl = nbd_ioctl,
++ .free_disk = nbd_free_disk,
+ };
+
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 1fe011676d070..4a4b9bad551e8 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1313,6 +1313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
+ u16 min_io_size;
+ u8 physical_block_exp, alignment_offset;
+ unsigned int queue_depth;
++ size_t max_dma_size;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+@@ -1411,7 +1412,8 @@ static int virtblk_probe(struct virtio_device *vdev)
+ /* No real sector limit. */
+ blk_queue_max_hw_sectors(q, UINT_MAX);
+
+- max_size = virtio_max_dma_size(vdev);
++ max_dma_size = virtio_max_dma_size(vdev);
++ max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
+
+ /* Host can optionally specify maximum segment size and number of
+ * segments. */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 499f4809fcdf3..66080fae072f2 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -543,6 +543,10 @@ static const struct usb_device_id quirks_table[] = {
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
++ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK |
++ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
+@@ -2818,6 +2822,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
+ goto err_free_wc;
+ }
+
++ if (data->evt_skb == NULL)
++ goto err_free_wc;
++
+ /* Parse and handle the return WMT event */
+ wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
+ if (wmt_evt->whdr.op != hdr->op) {
+diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
+index 19ad0e7886462..a617578356953 100644
+--- a/drivers/bluetooth/hci_bcm4377.c
++++ b/drivers/bluetooth/hci_bcm4377.c
+@@ -512,6 +512,7 @@ struct bcm4377_hw {
+ unsigned long disable_aspm : 1;
+ unsigned long broken_ext_scan : 1;
+ unsigned long broken_mws_transport_config : 1;
++ unsigned long broken_le_coded : 1;
+
+ int (*send_calibration)(struct bcm4377_data *bcm4377);
+ int (*send_ptb)(struct bcm4377_data *bcm4377,
+@@ -2372,6 +2373,8 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
+ if (bcm4377->hw->broken_ext_scan)
+ set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
++ if (bcm4377->hw->broken_le_coded)
++ set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+
+ pci_set_drvdata(pdev, bcm4377);
+ hci_set_drvdata(hdev, bcm4377);
+@@ -2461,6 +2464,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
+ .bar0_core2_window2 = 0x18107000,
+ .has_bar0_core2_window2 = true,
+ .broken_mws_transport_config = true,
++ .broken_le_coded = true,
+ .send_calibration = bcm4378_send_calibration,
+ .send_ptb = bcm4378_send_ptb,
+ },
+@@ -2474,6 +2478,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
+ .has_bar0_core2_window2 = true,
+ .clear_pciecfg_subsystem_ctrl_bit19 = true,
+ .broken_mws_transport_config = true,
++ .broken_le_coded = true,
+ .send_calibration = bcm4387_send_calibration,
+ .send_ptb = bcm4378_send_ptb,
+ },
+diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
+index c6f181702b9a7..edbc4d3381177 100644
+--- a/drivers/char/agp/parisc-agp.c
++++ b/drivers/char/agp/parisc-agp.c
+@@ -38,7 +38,7 @@ static struct _parisc_agp_info {
+
+ int lba_cap_offset;
+
+- u64 *gatt;
++ __le64 *gatt;
+ u64 gatt_entries;
+
+ u64 gart_base;
+@@ -104,7 +104,7 @@ parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
+ int i;
+
+ for (i = 0; i < info->gatt_entries; i++) {
+- info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
++ info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
+ }
+
+ return 0;
+@@ -158,9 +158,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+ for (k = 0;
+ k < info->io_pages_per_kpage;
+ k++, j++, paddr += info->io_page_size) {
+- info->gatt[j] =
++ info->gatt[j] = cpu_to_le64(
+ parisc_agp_mask_memory(agp_bridge,
+- paddr, type);
++ paddr, type));
+ asm_io_fdc(&info->gatt[j]);
+ }
+ }
+@@ -184,7 +184,7 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+ io_pg_start = info->io_pages_per_kpage * pg_start;
+ io_pg_count = info->io_pages_per_kpage * mem->page_count;
+ for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+- info->gatt[i] = agp_bridge->scratch_page;
++ info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+@@ -204,7 +204,8 @@ parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
+ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
+
+- return cpu_to_le64(pa);
++ /* return native (big-endian) PDIR entry */
++ return pa;
+ }
+
+ static void
+@@ -251,7 +252,8 @@ static int __init
+ agp_ioc_init(void __iomem *ioc_regs)
+ {
+ struct _parisc_agp_info *info = &parisc_agp_info;
+- u64 iova_base, *io_pdir, io_tlb_ps;
++ u64 iova_base, io_tlb_ps;
++ __le64 *io_pdir;
+ int io_tlb_shift;
+
+ printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
+diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
+index e19b0f9f48b97..4c08efe7f3753 100644
+--- a/drivers/char/hw_random/bcm2835-rng.c
++++ b/drivers/char/hw_random/bcm2835-rng.c
+@@ -70,7 +70,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
+ while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
+ if (!wait)
+ return 0;
+- hwrng_msleep(rng, 1000);
++ hwrng_yield(rng);
+ }
+
+ num_words = rng_readl(priv, RNG_STATUS) >> 24;
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index e3598ec9cfca8..420f155d251fb 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -678,6 +678,12 @@ long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+ }
+ EXPORT_SYMBOL_GPL(hwrng_msleep);
+
++long hwrng_yield(struct hwrng *rng)
++{
++ return wait_for_completion_interruptible_timeout(&rng->dying, 1);
++}
++EXPORT_SYMBOL_GPL(hwrng_yield);
++
+ static int __init hwrng_modinit(void)
+ {
+ int ret;
+diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
+index 12fbe80918319..159baf00a8675 100644
+--- a/drivers/char/hw_random/geode-rng.c
++++ b/drivers/char/hw_random/geode-rng.c
+@@ -58,7 +58,8 @@ struct amd_geode_priv {
+
+ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ {
+- void __iomem *mem = (void __iomem *)rng->priv;
++ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
++ void __iomem *mem = priv->membase;
+
+ *data = readl(mem + GEODE_RNG_DATA_REG);
+
+@@ -67,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+
+ static int geode_rng_data_present(struct hwrng *rng, int wait)
+ {
+- void __iomem *mem = (void __iomem *)rng->priv;
++ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
++ void __iomem *mem = priv->membase;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
+index e319cfa51a8a3..030186def9c69 100644
+--- a/drivers/clk/clk-npcm7xx.c
++++ b/drivers/clk/clk-npcm7xx.c
+@@ -510,7 +510,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
+ return;
+
+ npcm7xx_init_fail:
+- kfree(npcm7xx_clk_data->hws);
++ kfree(npcm7xx_clk_data);
+ npcm7xx_init_np_err:
+ iounmap(clk_base);
+ npcm7xx_init_error:
+diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
+index 2c7a830ce3080..fdec715c9ba9b 100644
+--- a/drivers/clk/clk-scmi.c
++++ b/drivers/clk/clk-scmi.c
+@@ -213,6 +213,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
+ sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
+ if (!sclk->info) {
+ dev_dbg(dev, "invalid clock info for idx %d\n", idx);
++ devm_kfree(dev, sclk);
+ continue;
+ }
+
+diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
+index f6b82e0b9703a..db3bca5f4ec9c 100644
+--- a/drivers/clk/imx/Kconfig
++++ b/drivers/clk/imx/Kconfig
+@@ -96,6 +96,7 @@ config CLK_IMX8QXP
+ depends on (ARCH_MXC && ARM64) || COMPILE_TEST
+ depends on IMX_SCU && HAVE_ARM_SMCCC
+ select MXC_CLK_SCU
++ select MXC_CLK
+ help
+ Build the driver for IMX8QXP SCU based clocks.
+
+diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
+index 1e82f72b75c67..1c95ae905eec8 100644
+--- a/drivers/clk/imx/clk-imx8-acm.c
++++ b/drivers/clk/imx/clk-imx8-acm.c
+@@ -279,8 +279,10 @@ static int clk_imx_acm_attach_pm_domains(struct device *dev,
+
+ for (i = 0; i < dev_pm->num_domains; i++) {
+ dev_pm->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
+- if (IS_ERR(dev_pm->pd_dev[i]))
+- return PTR_ERR(dev_pm->pd_dev[i]);
++ if (IS_ERR(dev_pm->pd_dev[i])) {
++ ret = PTR_ERR(dev_pm->pd_dev[i]);
++ goto detach_pm;
++ }
+
+ dev_pm->pd_dev_link[i] = device_link_add(dev,
+ dev_pm->pd_dev[i],
+@@ -371,7 +373,7 @@ static int imx8_acm_clk_probe(struct platform_device *pdev)
+ sels[i].shift, sels[i].width,
+ 0, NULL, NULL);
+ if (IS_ERR(hws[sels[i].clkid])) {
+- pm_runtime_disable(&pdev->dev);
++ ret = PTR_ERR(hws[sels[i].clkid]);
+ goto err_clk_register;
+ }
+ }
+@@ -381,12 +383,16 @@ static int imx8_acm_clk_probe(struct platform_device *pdev)
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
+ if (ret < 0) {
+ dev_err(dev, "failed to register hws for ACM\n");
+- pm_runtime_disable(&pdev->dev);
++ goto err_clk_register;
+ }
+
+-err_clk_register:
++ pm_runtime_put_sync(&pdev->dev);
++ return 0;
+
++err_clk_register:
+ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ clk_imx_acm_detach_pm_domains(&pdev->dev, &priv->dev_pm);
+
+ return ret;
+ }
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index 4bd65879fcd34..f70ed231b92d6 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -288,8 +288,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ void __iomem *base;
+ int err;
+
+- clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+- IMX8MQ_CLK_END), GFP_KERNEL);
++ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX8MQ_CLK_END), GFP_KERNEL);
+ if (WARN_ON(!clk_hw_data))
+ return -ENOMEM;
+
+@@ -306,10 +305,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ hws[IMX8MQ_CLK_EXT4] = imx_get_clk_hw_by_name(np, "clk_ext4");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
+- base = of_iomap(np, 0);
++ base = devm_of_iomap(dev, np, 0, NULL);
+ of_node_put(np);
+- if (WARN_ON(!base))
+- return -ENOMEM;
++ if (WARN_ON(IS_ERR(base))) {
++ err = PTR_ERR(base);
++ goto unregister_hws;
++ }
+
+ hws[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+@@ -395,8 +396,10 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+
+ np = dev->of_node;
+ base = devm_platform_ioremap_resource(pdev, 0);
+- if (WARN_ON(IS_ERR(base)))
+- return PTR_ERR(base);
++ if (WARN_ON(IS_ERR(base))) {
++ err = PTR_ERR(base);
++ goto unregister_hws;
++ }
+
+ /* CORE */
+ hws[IMX8MQ_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mq_a53_sels, base + 0x8000);
+diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
+index cadcbb318f5cf..4020aa4b79bf2 100644
+--- a/drivers/clk/imx/clk-imx8qxp.c
++++ b/drivers/clk/imx/clk-imx8qxp.c
+@@ -147,10 +147,10 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
+ imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("adc1_clk", IMX_SC_R_ADC_1, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
++ imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
+ imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
+ imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
+ imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
+- imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
+
+ /* Audio SS */
+ imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
+diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
+index ee5c72369334f..6bbdd4705d71f 100644
+--- a/drivers/clk/keystone/pll.c
++++ b/drivers/clk/keystone/pll.c
+@@ -281,12 +281,13 @@ static void __init of_pll_div_clk_init(struct device_node *node)
+
+ clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift,
+ mask, 0, NULL);
+- if (clk) {
+- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+- } else {
++ if (IS_ERR(clk)) {
+ pr_err("%s: error registering divider %s\n", __func__, clk_name);
+ iounmap(reg);
++ return;
+ }
++
++ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+ CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init);
+
+@@ -328,10 +329,12 @@ static void __init of_pll_mux_clk_init(struct device_node *node)
+ clk = clk_register_mux(NULL, clk_name, (const char **)&parents,
+ ARRAY_SIZE(parents) , 0, reg, shift, mask,
+ 0, NULL);
+- if (clk)
+- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+- else
++ if (IS_ERR(clk)) {
+ pr_err("%s: error registering mux %s\n", __func__, clk_name);
++ return;
++ }
++
++ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+ CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init);
+
+diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
+index c81f3e33ce568..12d9560eb4ba2 100644
+--- a/drivers/clk/mediatek/clk-mt2701.c
++++ b/drivers/clk/mediatek/clk-mt2701.c
+@@ -667,6 +667,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+@@ -747,6 +749,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return;
+
+ for (i = 0; i < CLK_INFRA_NR; i++)
+ infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+@@ -774,6 +778,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return -ENOMEM;
+ } else {
+ for (i = 0; i < CLK_INFRA_NR; i++) {
+ if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+@@ -890,6 +896,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
+index 1f4c8d0c041ab..9c7f7407d7980 100644
+--- a/drivers/clk/mediatek/clk-mt6765.c
++++ b/drivers/clk/mediatek/clk-mt6765.c
+@@ -737,6 +737,8 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+@@ -769,6 +771,8 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ clk_data);
+@@ -807,6 +811,8 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, ifr_clks,
+ ARRAY_SIZE(ifr_clks), clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
+index 3ee2f5a2319a0..ffedb1fe3c672 100644
+--- a/drivers/clk/mediatek/clk-mt6779.c
++++ b/drivers/clk/mediatek/clk-mt6779.c
+@@ -1217,6 +1217,8 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+@@ -1237,6 +1239,8 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
+index 2ebd25f0ce71d..f12d4e9ff0bba 100644
+--- a/drivers/clk/mediatek/clk-mt6797.c
++++ b/drivers/clk/mediatek/clk-mt6797.c
+@@ -390,6 +390,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
+ clk_data);
+@@ -545,6 +547,8 @@ static void mtk_infrasys_init_early(struct device_node *node)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return;
+
+ for (i = 0; i < CLK_INFRA_NR; i++)
+ infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+@@ -570,6 +574,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return -ENOMEM;
+ } else {
+ for (i = 0; i < CLK_INFRA_NR; i++) {
+ if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
+index fe714debdc9ec..1bfedc988cfe8 100644
+--- a/drivers/clk/mediatek/clk-mt7629-eth.c
++++ b/drivers/clk/mediatek/clk-mt7629-eth.c
+@@ -77,6 +77,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, eth_clks,
+ CLK_ETH_NR_CLK, clk_data);
+@@ -100,6 +102,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, sgmii_clks[id++],
+ CLK_SGMII_NR_CLK, clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
+index 2882107d0f240..b8a1f01bc974d 100644
+--- a/drivers/clk/mediatek/clk-mt7629.c
++++ b/drivers/clk/mediatek/clk-mt7629.c
+@@ -555,6 +555,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+@@ -579,6 +581,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ struct clk_hw_onecell_data *clk_data;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
+@@ -602,6 +606,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
+diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
+index a4eca5fd539c8..513ab6b1b3229 100644
+--- a/drivers/clk/mediatek/clk-pll.c
++++ b/drivers/clk/mediatek/clk-pll.c
+@@ -321,10 +321,8 @@ struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
+
+ ret = clk_hw_register(NULL, &pll->hw);
+
+- if (ret) {
+- kfree(pll);
++ if (ret)
+ return ERR_PTR(ret);
+- }
+
+ return &pll->hw;
+ }
+@@ -340,6 +338,8 @@ struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data,
+ return ERR_PTR(-ENOMEM);
+
+ hw = mtk_clk_register_pll_ops(pll, data, base, &mtk_pll_ops);
++ if (IS_ERR(hw))
++ kfree(pll);
+
+ return hw;
+ }
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 865db5202e4cf..a79b837583894 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -131,6 +131,7 @@ config IPQ_APSS_6018
+ tristate "IPQ APSS Clock Controller"
+ select IPQ_APSS_PLL
+ depends on QCOM_APCS_IPC || COMPILE_TEST
++ depends on QCOM_SMEM
+ help
+ Support for APSS clock controller on IPQ platforms. The
+ APSS clock controller manages the Mux and enable block that feeds the
+diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
+index e170331858cc1..41279e5437a62 100644
+--- a/drivers/clk/qcom/apss-ipq-pll.c
++++ b/drivers/clk/qcom/apss-ipq-pll.c
+@@ -68,13 +68,13 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_stromer_ops,
++ .ops = &clk_alpha_pll_stromer_plus_ops,
+ },
+ },
+ };
+
+ static const struct alpha_pll_config ipq5332_pll_config = {
+- .l = 0x3e,
++ .l = 0x2d,
+ .config_ctl_val = 0x4001075b,
+ .config_ctl_hi_val = 0x304,
+ .main_output_mask = BIT(0),
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index e4ef645f65d1f..892f2efc1c32c 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -2479,3 +2479,66 @@ const struct clk_ops clk_alpha_pll_stromer_ops = {
+ .set_rate = clk_alpha_pll_stromer_set_rate,
+ };
+ EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_ops);
++
++static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw,
++ unsigned long rate,
++ unsigned long prate)
++{
++ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
++ u32 l, alpha_width = pll_alpha_width(pll);
++ int ret, pll_mode;
++ u64 a;
++
++ rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
++
++ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &pll_mode);
++ if (ret)
++ return ret;
++
++ regmap_write(pll->clkr.regmap, PLL_MODE(pll), 0);
++
++ /* Delay of 2 output clock ticks required until output is disabled */
++ udelay(1);
++
++ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
++
++ if (alpha_width > ALPHA_BITWIDTH)
++ a <<= alpha_width - ALPHA_BITWIDTH;
++
++ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
++ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
++ a >> ALPHA_BITWIDTH);
++
++ regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL);
++
++ /* Wait five micro seconds or more */
++ udelay(5);
++ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_RESET_N,
++ PLL_RESET_N);
++
++ /* The lock time should be less than 50 micro seconds worst case */
++ usleep_range(50, 60);
++
++ ret = wait_for_pll_enable_lock(pll);
++ if (ret) {
++ pr_err("Wait for PLL enable lock failed [%s] %d\n",
++ clk_hw_get_name(hw), ret);
++ return ret;
++ }
++
++ if (pll_mode & PLL_OUTCTRL)
++ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_OUTCTRL,
++ PLL_OUTCTRL);
++
++ return 0;
++}
++
++const struct clk_ops clk_alpha_pll_stromer_plus_ops = {
++ .prepare = clk_alpha_pll_enable,
++ .unprepare = clk_alpha_pll_disable,
++ .is_enabled = clk_alpha_pll_is_enabled,
++ .recalc_rate = clk_alpha_pll_recalc_rate,
++ .determine_rate = clk_alpha_pll_stromer_determine_rate,
++ .set_rate = clk_alpha_pll_stromer_plus_set_rate,
++};
++EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_plus_ops);
+diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
+index e4bd863027ab6..903fbab9b58e9 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.h
++++ b/drivers/clk/qcom/clk-alpha-pll.h
+@@ -152,6 +152,7 @@ extern const struct clk_ops clk_alpha_pll_postdiv_ops;
+ extern const struct clk_ops clk_alpha_pll_huayra_ops;
+ extern const struct clk_ops clk_alpha_pll_postdiv_ro_ops;
+ extern const struct clk_ops clk_alpha_pll_stromer_ops;
++extern const struct clk_ops clk_alpha_pll_stromer_plus_ops;
+
+ extern const struct clk_ops clk_alpha_pll_fabia_ops;
+ extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops;
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index e22baf3a7112a..5183c74b074f8 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -158,17 +158,11 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+ static unsigned long
+ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+ {
+- if (hid_div) {
+- rate *= 2;
+- rate /= hid_div + 1;
+- }
++ if (hid_div)
++ rate = mult_frac(rate, 2, hid_div + 1);
+
+- if (mode) {
+- u64 tmp = rate;
+- tmp *= m;
+- do_div(tmp, n);
+- rate = tmp;
+- }
++ if (mode)
++ rate = mult_frac(rate, m, n);
+
+ return rate;
+ }
+diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c
+index 19dc2b71cacf0..2a3c0659b7008 100644
+--- a/drivers/clk/qcom/gcc-ipq5018.c
++++ b/drivers/clk/qcom/gcc-ipq5018.c
+@@ -128,7 +128,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -143,7 +142,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -158,7 +156,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c
+index b02026f8549b2..f98591148a976 100644
+--- a/drivers/clk/qcom/gcc-ipq5332.c
++++ b/drivers/clk/qcom/gcc-ipq5332.c
+@@ -71,7 +71,6 @@ static struct clk_fixed_factor gpll0_div2 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -85,7 +84,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -114,7 +112,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ &gpll2_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -154,7 +151,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ &gpll4_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
+index 6120fbbc5de05..f9494fa1b8716 100644
+--- a/drivers/clk/qcom/gcc-ipq6018.c
++++ b/drivers/clk/qcom/gcc-ipq6018.c
+@@ -72,7 +72,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -86,7 +85,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -161,7 +159,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -192,7 +189,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ &gpll4_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -243,7 +239,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ &gpll2_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -274,7 +269,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ &nss_crypto_pll_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
+index 63ac2ced76bb9..b7faf12a511a1 100644
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -75,7 +75,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -121,7 +120,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ &gpll2_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -154,7 +152,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ &gpll4_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -188,7 +185,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -201,7 +197,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -266,7 +261,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ &nss_crypto_pll_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
+index 8f430367299e6..e8190108e1aef 100644
+--- a/drivers/clk/qcom/gcc-ipq9574.c
++++ b/drivers/clk/qcom/gcc-ipq9574.c
+@@ -87,7 +87,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ &gpll0_main.clkr.hw
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+ };
+@@ -102,7 +101,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ &gpll0_main.clkr.hw
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+ };
+@@ -132,7 +130,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ &gpll4_main.clkr.hw
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+ };
+@@ -162,7 +159,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ &gpll2_main.clkr.hw
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+ };
+diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
+index 14dcc3f036683..e7b03a17514a5 100644
+--- a/drivers/clk/qcom/gcc-msm8996.c
++++ b/drivers/clk/qcom/gcc-msm8996.c
+@@ -244,71 +244,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
+ { .hw = &gpll0_early_div.hw }
+ };
+
+-static const struct freq_tbl ftbl_system_noc_clk_src[] = {
+- F(19200000, P_XO, 1, 0, 0),
+- F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
+- F(100000000, P_GPLL0, 6, 0, 0),
+- F(150000000, P_GPLL0, 4, 0, 0),
+- F(200000000, P_GPLL0, 3, 0, 0),
+- F(240000000, P_GPLL0, 2.5, 0, 0),
+- { }
+-};
+-
+-static struct clk_rcg2 system_noc_clk_src = {
+- .cmd_rcgr = 0x0401c,
+- .hid_width = 5,
+- .parent_map = gcc_xo_gpll0_gpll0_early_div_map,
+- .freq_tbl = ftbl_system_noc_clk_src,
+- .clkr.hw.init = &(struct clk_init_data){
+- .name = "system_noc_clk_src",
+- .parent_data = gcc_xo_gpll0_gpll0_early_div,
+- .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
+- .ops = &clk_rcg2_ops,
+- },
+-};
+-
+-static const struct freq_tbl ftbl_config_noc_clk_src[] = {
+- F(19200000, P_XO, 1, 0, 0),
+- F(37500000, P_GPLL0, 16, 0, 0),
+- F(75000000, P_GPLL0, 8, 0, 0),
+- { }
+-};
+-
+-static struct clk_rcg2 config_noc_clk_src = {
+- .cmd_rcgr = 0x0500c,
+- .hid_width = 5,
+- .parent_map = gcc_xo_gpll0_map,
+- .freq_tbl = ftbl_config_noc_clk_src,
+- .clkr.hw.init = &(struct clk_init_data){
+- .name = "config_noc_clk_src",
+- .parent_data = gcc_xo_gpll0,
+- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+- .ops = &clk_rcg2_ops,
+- },
+-};
+-
+-static const struct freq_tbl ftbl_periph_noc_clk_src[] = {
+- F(19200000, P_XO, 1, 0, 0),
+- F(37500000, P_GPLL0, 16, 0, 0),
+- F(50000000, P_GPLL0, 12, 0, 0),
+- F(75000000, P_GPLL0, 8, 0, 0),
+- F(100000000, P_GPLL0, 6, 0, 0),
+- { }
+-};
+-
+-static struct clk_rcg2 periph_noc_clk_src = {
+- .cmd_rcgr = 0x06014,
+- .hid_width = 5,
+- .parent_map = gcc_xo_gpll0_map,
+- .freq_tbl = ftbl_periph_noc_clk_src,
+- .clkr.hw.init = &(struct clk_init_data){
+- .name = "periph_noc_clk_src",
+- .parent_data = gcc_xo_gpll0,
+- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+- .ops = &clk_rcg2_ops,
+- },
+-};
+-
+ static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(120000000, P_GPLL0, 5, 0, 0),
+@@ -1297,11 +1232,7 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_noc_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
++ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1464,11 +1395,6 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1498,11 +1424,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1549,11 +1470,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1583,11 +1499,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc3_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1617,11 +1528,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1635,11 +1541,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1977,11 +1878,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2318,11 +2214,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2353,11 +2244,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2370,11 +2256,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2422,11 +2303,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2520,11 +2396,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2537,11 +2408,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2554,11 +2420,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2606,11 +2467,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2623,11 +2479,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2640,11 +2491,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2692,11 +2538,6 @@ static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2709,11 +2550,6 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_mstr_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2726,11 +2562,6 @@ static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2778,11 +2609,6 @@ static struct clk_branch gcc_pcie_phy_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2829,11 +2655,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3060,11 +2881,7 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_snoc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3077,11 +2894,7 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_cnoc_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3094,11 +2907,7 @@ static struct clk_branch gcc_smmu_aggre0_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_aggre0_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3111,11 +2920,7 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_aggre0_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3162,10 +2967,6 @@ static struct clk_branch gcc_dcc_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3178,10 +2979,6 @@ static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_noc_mpu_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3194,11 +2991,6 @@ static struct clk_branch gcc_qspi_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3347,10 +3139,6 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3363,10 +3151,6 @@ static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mnoc_bimc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3379,10 +3163,6 @@ static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3395,10 +3175,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3495,9 +3271,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+- [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
+- [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+- [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
+index 41ab210875fb2..05d115c52dfeb 100644
+--- a/drivers/clk/qcom/gcc-sm8150.c
++++ b/drivers/clk/qcom/gcc-sm8150.c
+@@ -774,7 +774,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+- .flags = CLK_SET_RATE_PARENT,
++ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_floor_ops,
+ },
+ };
+diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
+index a023c4374be96..1180e48c687ac 100644
+--- a/drivers/clk/qcom/mmcc-msm8998.c
++++ b/drivers/clk/qcom/mmcc-msm8998.c
+@@ -2439,6 +2439,7 @@ static struct clk_branch fd_ahb_clk = {
+
+ static struct clk_branch mnoc_ahb_clk = {
+ .halt_reg = 0x5024,
++ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5024,
+ .enable_mask = BIT(0),
+@@ -2454,6 +2455,7 @@ static struct clk_branch mnoc_ahb_clk = {
+
+ static struct clk_branch bimc_smmu_ahb_clk = {
+ .halt_reg = 0xe004,
++ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
+ .clkr = {
+@@ -2471,6 +2473,7 @@ static struct clk_branch bimc_smmu_ahb_clk = {
+
+ static struct clk_branch bimc_smmu_axi_clk = {
+ .halt_reg = 0xe008,
++ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe008,
+ .hwcg_bit = 1,
+ .clkr = {
+@@ -2607,11 +2610,13 @@ static struct gdsc camss_cpp_gdsc = {
+ static struct gdsc bimc_smmu_gdsc = {
+ .gdscr = 0xe020,
+ .gds_hw_ctrl = 0xe024,
++ .cxcs = (unsigned int []){ 0xe008 },
++ .cxc_count = 1,
+ .pd = {
+ .name = "bimc_smmu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = HW_CTRL | ALWAYS_ON,
++ .flags = VOTABLE,
+ };
+
+ static struct clk_regmap *mmcc_msm8998_clocks[] = {
+diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
+index 1e7991439527a..50a443bf79ecd 100644
+--- a/drivers/clk/ralink/clk-mtmips.c
++++ b/drivers/clk/ralink/clk-mtmips.c
+@@ -821,6 +821,10 @@ static const struct mtmips_clk_data mt76x8_clk_data = {
+ };
+
+ static const struct of_device_id mtmips_of_match[] = {
++ {
++ .compatible = "ralink,rt2880-reset",
++ .data = NULL,
++ },
+ {
+ .compatible = "ralink,rt2880-sysc",
+ .data = &rt2880_clk_data,
+@@ -1088,25 +1092,11 @@ static int mtmips_clk_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static const struct of_device_id mtmips_clk_of_match[] = {
+- { .compatible = "ralink,rt2880-reset" },
+- { .compatible = "ralink,rt2880-sysc" },
+- { .compatible = "ralink,rt3050-sysc" },
+- { .compatible = "ralink,rt3052-sysc" },
+- { .compatible = "ralink,rt3352-sysc" },
+- { .compatible = "ralink,rt3883-sysc" },
+- { .compatible = "ralink,rt5350-sysc" },
+- { .compatible = "ralink,mt7620-sysc" },
+- { .compatible = "ralink,mt7628-sysc" },
+- { .compatible = "ralink,mt7688-sysc" },
+- {}
+-};
+-
+ static struct platform_driver mtmips_clk_driver = {
+ .probe = mtmips_clk_probe,
+ .driver = {
+ .name = "mtmips-clk",
+- .of_match_table = mtmips_clk_of_match,
++ .of_match_table = mtmips_of_match,
+ },
+ };
+
+diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
+index e2e0447de1901..5a15f8788b922 100644
+--- a/drivers/clk/renesas/rcar-cpg-lib.c
++++ b/drivers/clk/renesas/rcar-cpg-lib.c
+@@ -70,8 +70,21 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ #define STPnHCK BIT(9 - SDnSRCFC_SHIFT)
+
+ static const struct clk_div_table cpg_sdh_div_table[] = {
++ /*
++ * These values are recommended by the datasheet. Because they come
++ * first, Linux will only use these.
++ */
+ { 0, 1 }, { 1, 2 }, { STPnHCK | 2, 4 }, { STPnHCK | 3, 8 },
+- { STPnHCK | 4, 16 }, { 0, 0 },
++ { STPnHCK | 4, 16 },
++ /*
++ * These values are not recommended because STPnHCK is wrong. But they
++ * have been seen because of broken firmware. So, we support reading
++ * them but Linux will sanitize them when initializing through
++ * recalc_rate.
++ */
++ { STPnHCK | 0, 1 }, { STPnHCK | 1, 2 }, { 2, 4 }, { 3, 8 }, { 4, 16 },
++ /* Sentinel */
++ { 0, 0 }
+ };
+
+ struct clk * __init cpg_sdh_clk_register(const char *name,
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index 47f488387f33a..3f01620e292b6 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -11,6 +11,7 @@
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/clk/renesas.h>
+@@ -38,14 +39,13 @@
+ #define WARN_DEBUG(x) do { } while (0)
+ #endif
+
+-#define DIV_RSMASK(v, s, m) ((v >> s) & m)
+ #define GET_SHIFT(val) ((val >> 12) & 0xff)
+ #define GET_WIDTH(val) ((val >> 8) & 0xf)
+
+-#define KDIV(val) DIV_RSMASK(val, 16, 0xffff)
+-#define MDIV(val) DIV_RSMASK(val, 6, 0x3ff)
+-#define PDIV(val) DIV_RSMASK(val, 0, 0x3f)
+-#define SDIV(val) DIV_RSMASK(val, 0, 0x7)
++#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
++#define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
++#define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
++#define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
+
+ #define CLK_ON_R(reg) (reg)
+ #define CLK_MON_R(reg) (0x180 + (reg))
+@@ -188,7 +188,9 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ u32 off = GET_REG_OFFSET(hwdata->conf);
+ u32 shift = GET_SHIFT(hwdata->conf);
+ const u32 clk_src_266 = 2;
+- u32 bitmask;
++ u32 msk, val, bitmask;
++ unsigned long flags;
++ int ret;
+
+ /*
+ * As per the HW manual, we should not directly switch from 533 MHz to
+@@ -202,26 +204,30 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ * the index to value mapping is done by adding 1 to the index.
+ */
+ bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
++ msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
++ spin_lock_irqsave(&priv->rmw_lock, flags);
+ if (index != clk_src_266) {
+- u32 msk, val;
+- int ret;
+-
+ writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
+
+- msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
+-
+- ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
+- !(val & msk), 100,
+- CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
+- if (ret) {
+- dev_err(priv->dev, "failed to switch clk source\n");
+- return ret;
+- }
++ ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
++ !(val & msk), 10,
++ CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
++ if (ret)
++ goto unlock;
+ }
+
+ writel(bitmask | ((index + 1) << shift), priv->base + off);
+
+- return 0;
++ ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
++ !(val & msk), 10,
++ CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
++unlock:
++ spin_unlock_irqrestore(&priv->rmw_lock, flags);
++
++ if (ret)
++ dev_err(priv->dev, "failed to switch clk source\n");
++
++ return ret;
+ }
+
+ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+@@ -232,14 +238,8 @@ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+
+ val >>= GET_SHIFT(hwdata->conf);
+ val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
+- if (val) {
+- val--;
+- } else {
+- /* Prohibited clk source, change it to 533 MHz(reset value) */
+- rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
+- }
+
+- return val;
++ return val ? val - 1 : 0;
+ }
+
+ static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
+@@ -695,18 +695,18 @@ static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzg2l_cpg_priv *priv = pll_clk->priv;
+ unsigned int val1, val2;
+- unsigned int mult = 1;
+- unsigned int div = 1;
++ u64 rate;
+
+ if (pll_clk->type != CLK_TYPE_SAM_PLL)
+ return parent_rate;
+
+ val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
+ val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
+- mult = MDIV(val1) + KDIV(val1) / 65536;
+- div = PDIV(val1) << SDIV(val2);
+
+- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
++ rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
++ 16 + SDIV(val2));
++
++ return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
+ }
+
+ static const struct clk_ops rzg2l_cpg_pll_ops = {
+diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
+index 6cee9e56acc72..91e9c2569f801 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.h
++++ b/drivers/clk/renesas/rzg2l-cpg.h
+@@ -43,7 +43,7 @@
+ #define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
+ #define CPG_CLKSTATUS_SELSDHI1_STS BIT(29)
+
+-#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 20000
++#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 200
+
+ /* n = 0/1/2 for PLL1/4/6 */
+ #define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n))
+diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
+index 75234e0783e1c..83fe4eb3133cb 100644
+--- a/drivers/clk/socfpga/stratix10-clk.h
++++ b/drivers/clk/socfpga/stratix10-clk.h
+@@ -7,8 +7,10 @@
+ #define __STRATIX10_CLK_H
+
+ struct stratix10_clock_data {
+- struct clk_hw_onecell_data clk_data;
+ void __iomem *base;
++
++ /* Must be last */
++ struct clk_hw_onecell_data clk_data;
+ };
+
+ struct stratix10_pll_clock {
+diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
+index 768a1f3398b47..5d5bb123ba949 100644
+--- a/drivers/clk/ti/divider.c
++++ b/drivers/clk/ti/divider.c
+@@ -309,7 +309,6 @@ static struct clk *_register_divider(struct device_node *node,
+ u32 flags,
+ struct clk_omap_divider *div)
+ {
+- struct clk *clk;
+ struct clk_init_data init;
+ const char *parent_name;
+ const char *name;
+@@ -326,12 +325,7 @@ static struct clk *_register_divider(struct device_node *node,
+ div->hw.init = &init;
+
+ /* register the clock */
+- clk = of_ti_clk_register(node, &div->hw, name);
+-
+- if (IS_ERR(clk))
+- kfree(div);
+-
+- return clk;
++ return of_ti_clk_register(node, &div->hw, name);
+ }
+
+ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
+diff --git a/drivers/clk/visconti/pll.h b/drivers/clk/visconti/pll.h
+index 01d07f1bf01b1..c4bd40676da4b 100644
+--- a/drivers/clk/visconti/pll.h
++++ b/drivers/clk/visconti/pll.h
+@@ -15,8 +15,10 @@
+
+ struct visconti_pll_provider {
+ void __iomem *reg_base;
+- struct clk_hw_onecell_data clk_data;
+ struct device_node *node;
++
++ /* Must be last */
++ struct clk_hw_onecell_data clk_data;
+ };
+
+ #define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \
+diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
+index 7dd2c615bce23..071b04f1ee730 100644
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -836,8 +836,9 @@ static u64 __arch_timer_check_delta(void)
+ * Note that TVAL is signed, thus has only 31 of its
+ * 32 bits to express magnitude.
+ */
+- MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
+- APM_CPU_PART_POTENZA)),
++ MIDR_REV_RANGE(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
++ APM_CPU_PART_XGENE),
++ APM_CPU_VAR_POTENZA, 0x0, 0xf),
+ {},
+ };
+
+diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
+index 27af17c995900..2a90c92a9182a 100644
+--- a/drivers/clocksource/timer-atmel-tcb.c
++++ b/drivers/clocksource/timer-atmel-tcb.c
+@@ -315,6 +315,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
+ writel(mck_divisor_idx /* likely divide-by-8 */
+ | ATMEL_TC_WAVE
+ | ATMEL_TC_WAVESEL_UP /* free-run */
++ | ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */
+ | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
+ | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
+ tcaddr + ATMEL_TC_REG(0, CMR));
+diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
+index 28ab4f1a7c713..6a878d227a13b 100644
+--- a/drivers/clocksource/timer-imx-gpt.c
++++ b/drivers/clocksource/timer-imx-gpt.c
+@@ -434,12 +434,16 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
+ return -ENOMEM;
+
+ imxtm->base = of_iomap(np, 0);
+- if (!imxtm->base)
+- return -ENXIO;
++ if (!imxtm->base) {
++ ret = -ENXIO;
++ goto err_kfree;
++ }
+
+ imxtm->irq = irq_of_parse_and_map(np, 0);
+- if (imxtm->irq <= 0)
+- return -EINVAL;
++ if (imxtm->irq <= 0) {
++ ret = -EINVAL;
++ goto err_kfree;
++ }
+
+ imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
+
+@@ -452,11 +456,15 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
+
+ ret = _mxc_timer_init(imxtm);
+ if (ret)
+- return ret;
++ goto err_kfree;
+
+ initialized = 1;
+
+ return 0;
++
++err_kfree:
++ kfree(imxtm);
++ return ret;
+ }
+
+ static int __init imx1_timer_init_dt(struct device_node *np)
+diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
+index 09ab29cb7f641..5f60f6bd33866 100644
+--- a/drivers/clocksource/timer-ti-dm.c
++++ b/drivers/clocksource/timer-ti-dm.c
+@@ -140,6 +140,8 @@ struct dmtimer {
+ struct platform_device *pdev;
+ struct list_head node;
+ struct notifier_block nb;
++ struct notifier_block fclk_nb;
++ unsigned long fclk_rate;
+ };
+
+ static u32 omap_reserved_systimers;
+@@ -253,8 +255,7 @@ static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
+ timer->posted = OMAP_TIMER_POSTED;
+ }
+
+-static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+- unsigned long rate)
++static inline void __omap_dm_timer_stop(struct dmtimer *timer)
+ {
+ u32 l;
+
+@@ -269,7 +270,7 @@ static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+ * Wait for functional clock period x 3.5 to make sure that
+ * timer is stopped
+ */
+- udelay(3500000 / rate + 1);
++ udelay(3500000 / timer->fclk_rate + 1);
+ #endif
+ }
+
+@@ -348,6 +349,21 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
+ return NOTIFY_OK;
+ }
+
++static int omap_timer_fclk_notifier(struct notifier_block *nb,
++ unsigned long event, void *data)
++{
++ struct clk_notifier_data *clk_data = data;
++ struct dmtimer *timer = container_of(nb, struct dmtimer, fclk_nb);
++
++ switch (event) {
++ case POST_RATE_CHANGE:
++ timer->fclk_rate = clk_data->new_rate;
++ return NOTIFY_OK;
++ default:
++ return NOTIFY_DONE;
++ }
++}
++
+ static int omap_dm_timer_reset(struct dmtimer *timer)
+ {
+ u32 l, timeout = 100000;
+@@ -754,7 +770,6 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
+ {
+ struct dmtimer *timer;
+ struct device *dev;
+- unsigned long rate = 0;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer))
+@@ -762,10 +777,7 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
+
+ dev = &timer->pdev->dev;
+
+- if (!timer->omap1)
+- rate = clk_get_rate(timer->fclk);
+-
+- __omap_dm_timer_stop(timer, rate);
++ __omap_dm_timer_stop(timer);
+
+ pm_runtime_put_sync(dev);
+
+@@ -1124,6 +1136,14 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
+ timer->fclk = devm_clk_get(dev, "fck");
+ if (IS_ERR(timer->fclk))
+ return PTR_ERR(timer->fclk);
++
++ timer->fclk_nb.notifier_call = omap_timer_fclk_notifier;
++ ret = devm_clk_notifier_register(dev, timer->fclk,
++ &timer->fclk_nb);
++ if (ret)
++ return ret;
++
++ timer->fclk_rate = clk_get_rate(timer->fclk);
+ } else {
+ timer->fclk = ERR_PTR(-ENODEV);
+ }
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 9a1e194d5cf88..1f6186475715e 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -307,11 +307,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
+ highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
+
+ WRITE_ONCE(cpudata->highest_perf, highest_perf);
+-
++ WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
+ WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
+ WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
+ WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
+-
++ WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ return 0;
+ }
+
+@@ -329,11 +329,12 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
+ highest_perf = cppc_perf.highest_perf;
+
+ WRITE_ONCE(cpudata->highest_perf, highest_perf);
+-
++ WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
+ WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
+ WRITE_ONCE(cpudata->lowest_nonlinear_perf,
+ cppc_perf.lowest_nonlinear_perf);
+ WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
++ WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ return 0;
+@@ -432,6 +433,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
+ u64 prev = READ_ONCE(cpudata->cppc_req_cached);
+ u64 value = prev;
+
++ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
++ cpudata->max_limit_perf);
++ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
++ cpudata->max_limit_perf);
+ des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+
+ if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
+@@ -470,6 +475,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
+ return 0;
+ }
+
++static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
++{
++ u32 max_limit_perf, min_limit_perf;
++ struct amd_cpudata *cpudata = policy->driver_data;
++
++ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
++ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
++
++ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
++ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
++ WRITE_ONCE(cpudata->max_limit_freq, policy->max);
++ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
++
++ return 0;
++}
++
+ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq, bool fast_switch)
+ {
+@@ -480,6 +501,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
+ if (!cpudata->max_freq)
+ return -ENODEV;
+
++ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
++ amd_pstate_update_min_max_limit(policy);
++
+ cap_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+ max_perf = cap_perf;
+@@ -518,7 +542,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
+ static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+ {
+- return amd_pstate_update_freq(policy, target_freq, true);
++ if (!amd_pstate_update_freq(policy, target_freq, true))
++ return target_freq;
++ return policy->cur;
+ }
+
+ static void amd_pstate_adjust_perf(unsigned int cpu,
+@@ -532,6 +558,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ struct amd_cpudata *cpudata = policy->driver_data;
+ unsigned int target_freq;
+
++ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
++ amd_pstate_update_min_max_limit(policy);
++
++
+ cap_perf = READ_ONCE(cpudata->highest_perf);
+ lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ max_freq = READ_ONCE(cpudata->max_freq);
+@@ -745,6 +775,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+ /* Initial processor data capability frequencies */
+ cpudata->max_freq = max_freq;
+ cpudata->min_freq = min_freq;
++ cpudata->max_limit_freq = max_freq;
++ cpudata->min_limit_freq = min_freq;
+ cpudata->nominal_freq = nominal_freq;
+ cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+
+@@ -850,11 +882,16 @@ static ssize_t show_energy_performance_available_preferences(
+ {
+ int i = 0;
+ int offset = 0;
++ struct amd_cpudata *cpudata = policy->driver_data;
++
++ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
++ return sysfs_emit_at(buf, offset, "%s\n",
++ energy_perf_strings[EPP_INDEX_PERFORMANCE]);
+
+ while (energy_perf_strings[i] != NULL)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
+
+- sysfs_emit_at(buf, offset, "\n");
++ offset += sysfs_emit_at(buf, offset, "\n");
+
+ return offset;
+ }
+@@ -1183,16 +1220,25 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
+ return 0;
+ }
+
+-static void amd_pstate_epp_init(unsigned int cpu)
++static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+ {
+- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+- u32 max_perf, min_perf;
++ u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
+ u64 value;
+ s16 epp;
+
+ max_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
++ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
++ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
++
++ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
++ cpudata->max_limit_perf);
++ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
++ cpudata->max_limit_perf);
++
++ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
++ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+@@ -1210,9 +1256,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
+ value &= ~AMD_CPPC_DES_PERF(~0L);
+ value |= AMD_CPPC_DES_PERF(0);
+
+- if (cpudata->epp_policy == cpudata->policy)
+- goto skip_epp;
+-
+ cpudata->epp_policy = cpudata->policy;
+
+ /* Get BIOS pre-defined epp value */
+@@ -1222,7 +1265,7 @@ static void amd_pstate_epp_init(unsigned int cpu)
+ * This return value can only be negative for shared_memory
+ * systems where EPP register read/write not supported.
+ */
+- goto skip_epp;
++ return;
+ }
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+@@ -1236,8 +1279,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+ amd_pstate_set_epp(cpudata, epp);
+-skip_epp:
+- cpufreq_cpu_put(policy);
+ }
+
+ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+@@ -1252,7 +1293,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+
+ cpudata->policy = policy->policy;
+
+- amd_pstate_epp_init(policy->cpu);
++ amd_pstate_epp_update_limit(policy);
+
+ return 0;
+ }
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index a33df3c66c88c..40a9ff18da068 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+ len += sysfs_emit_at(buf, len, " From : To\n");
+ len += sysfs_emit_at(buf, len, " : ");
+ for (i = 0; i < stats->state_num; i++) {
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
+ }
+- if (len >= PAGE_SIZE)
+- return PAGE_SIZE;
++ if (len >= PAGE_SIZE - 1)
++ return PAGE_SIZE - 1;
+
+ len += sysfs_emit_at(buf, len, "\n");
+
+ for (i = 0; i < stats->state_num; i++) {
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
+
+ for (j = 0; j < stats->state_num; j++) {
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+
+ if (pending)
+@@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+
+ len += sysfs_emit_at(buf, len, "%9u ", count);
+ }
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "\n");
+ }
+
+- if (len >= PAGE_SIZE) {
++ if (len >= PAGE_SIZE - 1) {
+ pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
+ return -EFBIG;
+ }
+diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
+index 494d044b9e720..33728c242f66c 100644
+--- a/drivers/cpufreq/imx6q-cpufreq.c
++++ b/drivers/cpufreq/imx6q-cpufreq.c
+@@ -327,7 +327,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
+ imx6x_disable_freq_in_opp(dev, 696000000);
+
+ if (of_machine_is_compatible("fsl,imx6ull")) {
+- if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
++ if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
+ imx6x_disable_freq_in_opp(dev, 792000000);
+
+ if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
+diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
+index 88ef5e57ccd05..386aed3637b4e 100644
+--- a/drivers/cpufreq/tegra194-cpufreq.c
++++ b/drivers/cpufreq/tegra194-cpufreq.c
+@@ -450,6 +450,8 @@ static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
+ if (IS_ERR(opp))
+ continue;
+
++ dev_pm_opp_put(opp);
++
+ ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index eba2d750c3b07..066f08a3a040d 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -575,7 +575,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
+ return -EINVAL;
+
+- ctx->cdata.key_virt = key;
++ memcpy(ctx->key, key, keylen);
++ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
+index 9156bbe038b7b..a148ff1f0872c 100644
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -641,7 +641,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
+ return -EINVAL;
+
+- ctx->cdata.key_virt = key;
++ memcpy(ctx->key, key, keylen);
++ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
+index 839ea14b9a853..6f33149ef80df 100644
+--- a/drivers/crypto/ccp/dbc.c
++++ b/drivers/crypto/ccp/dbc.c
+@@ -205,7 +205,7 @@ int dbc_dev_init(struct psp_device *psp)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE);
+- dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
++ dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL | __GFP_ZERO, 0);
+ if (!dbc_dev->mbox) {
+ ret = -ENOMEM;
+ goto cleanup_dev;
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index 39297ce70f441..3dce35debf637 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -433,8 +433,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
+ module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
+ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
++static bool pf_q_num_flag;
+ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++ pf_q_num_flag = true;
++
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
+ }
+
+@@ -1033,7 +1036,7 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
+
+ for (i = 0; i < clusters_num; i++) {
+ ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+- if (ret < 0)
++ if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
+ return -EINVAL;
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
+
+@@ -1157,6 +1160,8 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &hpre_devices;
++ if (pf_q_num_flag)
++ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ }
+
+ ret = hisi_qm_init(qm);
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index a99fd589445ce..193b0b3a77cda 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -206,8 +206,6 @@
+ #define WAIT_PERIOD 20
+ #define REMOVE_WAIT_DELAY 10
+
+-#define QM_DRIVER_REMOVING 0
+-#define QM_RST_SCHED 1
+ #define QM_QOS_PARAM_NUM 2
+ #define QM_QOS_MAX_VAL 1000
+ #define QM_QOS_RATE 100
+@@ -849,6 +847,8 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
+ qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
+ qp->qp_status.cq_head, 0);
+ atomic_dec(&qp->qp_status.used);
++
++ cond_resched();
+ }
+
+ /* set c_flag */
+@@ -2824,7 +2824,6 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
+ mutex_init(&qm->mailbox_lock);
+ init_rwsem(&qm->qps_lock);
+ qm->qp_in_used = 0;
+- qm->misc_ctl = false;
+ if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
+ if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
+ dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
+@@ -5093,6 +5092,7 @@ free_eq_irq:
+
+ static int qm_get_qp_num(struct hisi_qm *qm)
+ {
++ struct device *dev = &qm->pdev->dev;
+ bool is_db_isolation;
+
+ /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
+@@ -5109,13 +5109,21 @@ static int qm_get_qp_num(struct hisi_qm *qm)
+ qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
+ QM_FUNC_MAX_QP_CAP, is_db_isolation);
+
+- /* check if qp number is valid */
+- if (qm->qp_num > qm->max_qp_num) {
+- dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
++ if (qm->qp_num <= qm->max_qp_num)
++ return 0;
++
++ if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) {
++ /* Check whether the set qp number is valid */
++ dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n",
+ qm->qp_num, qm->max_qp_num);
+ return -EINVAL;
+ }
+
++ dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n",
++ qm->qp_num, qm->max_qp_num);
++ qm->qp_num = qm->max_qp_num;
++ qm->debug.curr_qm_qp_num = qm->qp_num;
++
+ return 0;
+ }
+
+diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h
+index 1406a422d4551..8e36aa9c681be 100644
+--- a/drivers/crypto/hisilicon/qm_common.h
++++ b/drivers/crypto/hisilicon/qm_common.h
+@@ -4,7 +4,6 @@
+ #define QM_COMMON_H
+
+ #define QM_DBG_READ_LEN 256
+-#define QM_RESETTING 2
+
+ struct qm_cqe {
+ __le32 rsvd0;
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index 77f9f131b8503..62bd8936a9154 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -311,8 +311,11 @@ static int sec_diff_regs_show(struct seq_file *s, void *unused)
+ }
+ DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);
+
++static bool pf_q_num_flag;
+ static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++ pf_q_num_flag = true;
++
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
+ }
+
+@@ -1120,6 +1123,8 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &sec_devices;
++ if (pf_q_num_flag)
++ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
+index f3ce34198775d..84dbaeb07ea83 100644
+--- a/drivers/crypto/hisilicon/zip/zip_main.c
++++ b/drivers/crypto/hisilicon/zip/zip_main.c
+@@ -364,8 +364,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
+ module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
+ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
++static bool pf_q_num_flag;
+ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++ pf_q_num_flag = true;
++
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
+ }
+
+@@ -1139,6 +1142,8 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &zip_devices;
++ if (pf_q_num_flag)
++ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+index dd4464b7e00b1..a5691ba0b7244 100644
+--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+@@ -11,8 +11,13 @@
+ #include <adf_gen4_pm.h>
+ #include <adf_gen4_timer.h>
+ #include "adf_4xxx_hw_data.h"
++#include "adf_cfg_services.h"
+ #include "icp_qat_hw.h"
+
++#define ADF_AE_GROUP_0 GENMASK(3, 0)
++#define ADF_AE_GROUP_1 GENMASK(7, 4)
++#define ADF_AE_GROUP_2 BIT(8)
++
+ enum adf_fw_objs {
+ ADF_FW_SYM_OBJ,
+ ADF_FW_ASYM_OBJ,
+@@ -40,39 +45,45 @@ struct adf_fw_config {
+ };
+
+ static const struct adf_fw_config adf_fw_cy_config[] = {
+- {0xF0, ADF_FW_SYM_OBJ},
+- {0xF, ADF_FW_ASYM_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static const struct adf_fw_config adf_fw_dc_config[] = {
+- {0xF0, ADF_FW_DC_OBJ},
+- {0xF, ADF_FW_DC_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static const struct adf_fw_config adf_fw_sym_config[] = {
+- {0xF0, ADF_FW_SYM_OBJ},
+- {0xF, ADF_FW_SYM_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static const struct adf_fw_config adf_fw_asym_config[] = {
+- {0xF0, ADF_FW_ASYM_OBJ},
+- {0xF, ADF_FW_ASYM_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static const struct adf_fw_config adf_fw_asym_dc_config[] = {
+- {0xF0, ADF_FW_ASYM_OBJ},
+- {0xF, ADF_FW_DC_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static const struct adf_fw_config adf_fw_sym_dc_config[] = {
+- {0xF0, ADF_FW_SYM_OBJ},
+- {0xF, ADF_FW_DC_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
++};
++
++static const struct adf_fw_config adf_fw_dcc_config[] = {
++ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config));
+@@ -80,6 +91,7 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config));
+ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config));
+ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config));
+ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
++static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
+
+ /* Worker thread to service arbiter mappings */
+ static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
+@@ -94,36 +106,18 @@ static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
+ 0x0
+ };
+
++static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = {
++ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
++ 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
++ 0x0
++};
++
+ static struct adf_hw_device_class adf_4xxx_class = {
+ .name = ADF_4XXX_DEVICE_NAME,
+ .type = DEV_4XXX,
+ .instances = 0,
+ };
+
+-enum dev_services {
+- SVC_CY = 0,
+- SVC_CY2,
+- SVC_DC,
+- SVC_SYM,
+- SVC_ASYM,
+- SVC_DC_ASYM,
+- SVC_ASYM_DC,
+- SVC_DC_SYM,
+- SVC_SYM_DC,
+-};
+-
+-static const char *const dev_cfg_services[] = {
+- [SVC_CY] = ADF_CFG_CY,
+- [SVC_CY2] = ADF_CFG_ASYM_SYM,
+- [SVC_DC] = ADF_CFG_DC,
+- [SVC_SYM] = ADF_CFG_SYM,
+- [SVC_ASYM] = ADF_CFG_ASYM,
+- [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
+- [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
+- [SVC_DC_SYM] = ADF_CFG_DC_SYM,
+- [SVC_SYM_DC] = ADF_CFG_SYM_DC,
+-};
+-
+ static int get_service_enabled(struct adf_accel_dev *accel_dev)
+ {
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+@@ -137,7 +131,7 @@ static int get_service_enabled(struct adf_accel_dev *accel_dev)
+ return ret;
+ }
+
+- ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
++ ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services),
+ services);
+ if (ret < 0)
+ dev_err(&GET_DEV(accel_dev),
+@@ -212,6 +206,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+ {
+ struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 capabilities_sym, capabilities_asym, capabilities_dc;
++ u32 capabilities_dcc;
+ u32 fusectl1;
+
+ /* Read accelerator capabilities mask */
+@@ -284,6 +279,14 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+ return capabilities_sym | capabilities_asym;
+ case SVC_DC:
+ return capabilities_dc;
++ case SVC_DCC:
++ /*
++ * Sym capabilities are available for chaining operations,
++ * but sym crypto instances cannot be supported
++ */
++ capabilities_dcc = capabilities_dc | capabilities_sym;
++ capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
++ return capabilities_dcc;
+ case SVC_SYM:
+ return capabilities_sym;
+ case SVC_ASYM:
+@@ -309,6 +312,8 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_DC:
+ return thrd_to_arb_map_dc;
++ case SVC_DCC:
++ return thrd_to_arb_map_dcc;
+ default:
+ return default_thrd_to_arb_map;
+ }
+@@ -393,38 +398,96 @@ static u32 uof_get_num_objs(void)
+ return ARRAY_SIZE(adf_fw_cy_config);
+ }
+
+-static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
+- const char * const fw_objs[], int num_objs)
++static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
+ {
+- int id;
+-
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+- id = adf_fw_cy_config[obj_num].obj;
+- break;
++ return adf_fw_cy_config;
+ case SVC_DC:
+- id = adf_fw_dc_config[obj_num].obj;
+- break;
++ return adf_fw_dc_config;
++ case SVC_DCC:
++ return adf_fw_dcc_config;
+ case SVC_SYM:
+- id = adf_fw_sym_config[obj_num].obj;
+- break;
++ return adf_fw_sym_config;
+ case SVC_ASYM:
+- id = adf_fw_asym_config[obj_num].obj;
+- break;
++ return adf_fw_asym_config;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+- id = adf_fw_asym_dc_config[obj_num].obj;
+- break;
++ return adf_fw_asym_dc_config;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+- id = adf_fw_sym_dc_config[obj_num].obj;
+- break;
++ return adf_fw_sym_dc_config;
+ default:
+- id = -EINVAL;
+- break;
++ return NULL;
++ }
++}
++
++enum adf_rp_groups {
++ RP_GROUP_0 = 0,
++ RP_GROUP_1,
++ RP_GROUP_COUNT
++};
++
++static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
++{
++ enum adf_cfg_service_type rps[RP_GROUP_COUNT];
++ const struct adf_fw_config *fw_config;
++ u16 ring_to_svc_map;
++ int i, j;
++
++ fw_config = get_fw_config(accel_dev);
++ if (!fw_config)
++ return 0;
++
++ for (i = 0; i < RP_GROUP_COUNT; i++) {
++ switch (fw_config[i].ae_mask) {
++ case ADF_AE_GROUP_0:
++ j = RP_GROUP_0;
++ break;
++ case ADF_AE_GROUP_1:
++ j = RP_GROUP_1;
++ break;
++ default:
++ return 0;
++ }
++
++ switch (fw_config[i].obj) {
++ case ADF_FW_SYM_OBJ:
++ rps[j] = SYM;
++ break;
++ case ADF_FW_ASYM_OBJ:
++ rps[j] = ASYM;
++ break;
++ case ADF_FW_DC_OBJ:
++ rps[j] = COMP;
++ break;
++ default:
++ rps[j] = 0;
++ break;
++ }
+ }
+
++ ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
++ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
++ rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
++ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
++
++ return ring_to_svc_map;
++}
++
++static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
++ const char * const fw_objs[], int num_objs)
++{
++ const struct adf_fw_config *fw_config;
++ int id;
++
++ fw_config = get_fw_config(accel_dev);
++ if (fw_config)
++ id = fw_config[obj_num].obj;
++ else
++ id = -EINVAL;
++
+ if (id < 0 || id > num_objs)
+ return NULL;
+
+@@ -447,26 +510,13 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n
+
+ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+ {
+- switch (get_service_enabled(accel_dev)) {
+- case SVC_CY:
+- return adf_fw_cy_config[obj_num].ae_mask;
+- case SVC_DC:
+- return adf_fw_dc_config[obj_num].ae_mask;
+- case SVC_CY2:
+- return adf_fw_cy_config[obj_num].ae_mask;
+- case SVC_SYM:
+- return adf_fw_sym_config[obj_num].ae_mask;
+- case SVC_ASYM:
+- return adf_fw_asym_config[obj_num].ae_mask;
+- case SVC_ASYM_DC:
+- case SVC_DC_ASYM:
+- return adf_fw_asym_dc_config[obj_num].ae_mask;
+- case SVC_SYM_DC:
+- case SVC_DC_SYM:
+- return adf_fw_sym_dc_config[obj_num].ae_mask;
+- default:
++ const struct adf_fw_config *fw_config;
++
++ fw_config = get_fw_config(accel_dev);
++ if (!fw_config)
+ return 0;
+- }
++
++ return fw_config[obj_num].ae_mask;
+ }
+
+ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
+@@ -522,6 +572,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
+ hw_data->uof_get_ae_mask = uof_get_ae_mask;
+ hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
++ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->enable_pm = adf_gen4_enable_pm;
+diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+index 6d4e2e139ffa2..90f5c1ca7b8d8 100644
+--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
++++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+@@ -11,6 +11,7 @@
+ #include <adf_heartbeat.h>
+
+ #include "adf_4xxx_hw_data.h"
++#include "adf_cfg_services.h"
+ #include "qat_compression.h"
+ #include "qat_crypto.h"
+ #include "adf_transport_access_macros.h"
+@@ -23,30 +24,6 @@ static const struct pci_device_id adf_pci_tbl[] = {
+ };
+ MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+-enum configs {
+- DEV_CFG_CY = 0,
+- DEV_CFG_DC,
+- DEV_CFG_SYM,
+- DEV_CFG_ASYM,
+- DEV_CFG_ASYM_SYM,
+- DEV_CFG_ASYM_DC,
+- DEV_CFG_DC_ASYM,
+- DEV_CFG_SYM_DC,
+- DEV_CFG_DC_SYM,
+-};
+-
+-static const char * const services_operations[] = {
+- ADF_CFG_CY,
+- ADF_CFG_DC,
+- ADF_CFG_SYM,
+- ADF_CFG_ASYM,
+- ADF_CFG_ASYM_SYM,
+- ADF_CFG_ASYM_DC,
+- ADF_CFG_DC_ASYM,
+- ADF_CFG_SYM_DC,
+- ADF_CFG_DC_SYM,
+-};
+-
+ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+ {
+ if (accel_dev->hw_device) {
+@@ -292,16 +269,17 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
+ if (ret)
+ goto err;
+
+- ret = sysfs_match_string(services_operations, services);
++ ret = sysfs_match_string(adf_cfg_services, services);
+ if (ret < 0)
+ goto err;
+
+ switch (ret) {
+- case DEV_CFG_CY:
+- case DEV_CFG_ASYM_SYM:
++ case SVC_CY:
++ case SVC_CY2:
+ ret = adf_crypto_dev_config(accel_dev);
+ break;
+- case DEV_CFG_DC:
++ case SVC_DC:
++ case SVC_DCC:
+ ret = adf_comp_dev_config(accel_dev);
+ break;
+ default:
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+index e57abde66f4fb..79d5a1535eda3 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+@@ -29,7 +29,7 @@
+ #define ADF_PCI_MAX_BARS 3
+ #define ADF_DEVICE_NAME_LENGTH 32
+ #define ADF_ETR_MAX_RINGS_PER_BANK 16
+-#define ADF_MAX_MSIX_VECTOR_NAME 16
++#define ADF_MAX_MSIX_VECTOR_NAME 48
+ #define ADF_DEVICE_NAME_PREFIX "qat_"
+
+ enum adf_accel_capabilities {
+@@ -182,6 +182,7 @@ struct adf_hw_device_data {
+ void (*get_arb_info)(struct arb_info *arb_csrs_info);
+ void (*get_admin_info)(struct admin_info *admin_csrs_info);
+ enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
++ u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev);
+ int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+ void (*free_irq)(struct adf_accel_dev *accel_dev);
+ void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
+index ff790823b8686..194d64d4b99a1 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
+@@ -8,6 +8,7 @@
+ #include <linux/dma-mapping.h>
+ #include "adf_accel_devices.h"
+ #include "adf_common_drv.h"
++#include "adf_cfg.h"
+ #include "adf_heartbeat.h"
+ #include "icp_qat_fw_init_admin.h"
+
+@@ -212,6 +213,17 @@ int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp)
+ return 0;
+ }
+
++static int adf_set_chaining(struct adf_accel_dev *accel_dev)
++{
++ u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
++ struct icp_qat_fw_init_admin_resp resp = { };
++ struct icp_qat_fw_init_admin_req req = { };
++
++ req.cmd_id = ICP_QAT_FW_DC_CHAIN_INIT;
++
++ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
++}
++
+ static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
+ u32 *capabilities)
+ {
+@@ -284,6 +296,19 @@ int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks)
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ }
+
++static bool is_dcc_enabled(struct adf_accel_dev *accel_dev)
++{
++ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
++ int ret;
++
++ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
++ ADF_SERVICES_ENABLED, services);
++ if (ret)
++ return false;
++
++ return !strcmp(services, "dcc");
++}
++
+ /**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+@@ -297,6 +322,16 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+ u32 dc_capabilities = 0;
+ int ret;
+
++ ret = adf_set_fw_constants(accel_dev);
++ if (ret)
++ return ret;
++
++ if (is_dcc_enabled(accel_dev)) {
++ ret = adf_set_chaining(accel_dev);
++ if (ret)
++ return ret;
++ }
++
+ ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
+@@ -304,10 +339,6 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+ }
+ accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
+
+- ret = adf_set_fw_constants(accel_dev);
+- if (ret)
+- return ret;
+-
+ return adf_init_ae(accel_dev);
+ }
+ EXPORT_SYMBOL_GPL(adf_send_admin_init);
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+new file mode 100644
+index 0000000000000..b353d40c5c6d0
+--- /dev/null
++++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+@@ -0,0 +1,34 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/* Copyright(c) 2023 Intel Corporation */
++#ifndef _ADF_CFG_SERVICES_H_
++#define _ADF_CFG_SERVICES_H_
++
++#include "adf_cfg_strings.h"
++
++enum adf_services {
++ SVC_CY = 0,
++ SVC_CY2,
++ SVC_DC,
++ SVC_DCC,
++ SVC_SYM,
++ SVC_ASYM,
++ SVC_DC_ASYM,
++ SVC_ASYM_DC,
++ SVC_DC_SYM,
++ SVC_SYM_DC,
++};
++
++static const char *const adf_cfg_services[] = {
++ [SVC_CY] = ADF_CFG_CY,
++ [SVC_CY2] = ADF_CFG_ASYM_SYM,
++ [SVC_DC] = ADF_CFG_DC,
++ [SVC_DCC] = ADF_CFG_DCC,
++ [SVC_SYM] = ADF_CFG_SYM,
++ [SVC_ASYM] = ADF_CFG_ASYM,
++ [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
++ [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
++ [SVC_DC_SYM] = ADF_CFG_DC_SYM,
++ [SVC_SYM_DC] = ADF_CFG_SYM_DC,
++};
++
++#endif
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+index 6066dc637352c..322b76903a737 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+@@ -32,6 +32,7 @@
+ #define ADF_CFG_DC_ASYM "dc;asym"
+ #define ADF_CFG_SYM_DC "sym;dc"
+ #define ADF_CFG_DC_SYM "dc;sym"
++#define ADF_CFG_DCC "dcc"
+ #define ADF_SERVICES_ENABLED "ServicesEnabled"
+ #define ADF_PM_IDLE_SUPPORT "PmIdleSupport"
+ #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+index 673b5044c62a5..79ff7982378d9 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+@@ -25,6 +25,8 @@
+ #define ADF_STATUS_AE_STARTED 6
+ #define ADF_STATUS_PF_RUNNING 7
+ #define ADF_STATUS_IRQ_ALLOCATED 8
++#define ADF_STATUS_CRYPTO_ALGS_REGISTERED 9
++#define ADF_STATUS_COMP_ALGS_REGISTERED 10
+
+ enum adf_dev_reset_mode {
+ ADF_DEV_RESET_ASYNC = 0,
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
+index 89001fe92e762..0f9e2d59ce385 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
+@@ -97,6 +97,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
+ return -EFAULT;
+ }
+
++ if (hw_data->get_ring_to_svc_map)
++ hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev);
++
+ if (adf_ae_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to initialise Acceleration Engine\n");
+@@ -231,6 +234,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ return -EFAULT;
+ }
++ set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
+
+ if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
+ dev_err(&GET_DEV(accel_dev),
+@@ -239,6 +243,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ return -EFAULT;
+ }
++ set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
+
+ adf_dbgfs_add(accel_dev);
+
+@@ -272,13 +277,17 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
+ clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+- if (!list_empty(&accel_dev->crypto_list)) {
++ if (!list_empty(&accel_dev->crypto_list) &&
++ test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
+ qat_algs_unregister();
+ qat_asym_algs_unregister();
+ }
++ clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
+
+- if (!list_empty(&accel_dev->compression_list))
++ if (!list_empty(&accel_dev->compression_list) &&
++ test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
+ qat_comp_algs_unregister();
++ clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+@@ -440,13 +449,6 @@ int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+
+ mutex_lock(&accel_dev->state_lock);
+
+- if (!adf_dev_started(accel_dev)) {
+- dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
+- accel_dev->accel_id);
+- ret = -EINVAL;
+- goto out;
+- }
+-
+ if (reconfig) {
+ ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ goto out;
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+index a74d2f9303670..8f04b0d3c5ac8 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+@@ -5,6 +5,7 @@
+ #include <linux/pci.h>
+ #include "adf_accel_devices.h"
+ #include "adf_cfg.h"
++#include "adf_cfg_services.h"
+ #include "adf_common_drv.h"
+
+ static const char * const state_operations[] = {
+@@ -52,6 +53,13 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ case DEV_DOWN:
+ dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
+
++ if (!adf_dev_started(accel_dev)) {
++ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
++ accel_id);
++
++ break;
++ }
++
+ ret = adf_dev_down(accel_dev, true);
+ if (ret < 0)
+ return -EINVAL;
+@@ -61,7 +69,9 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+
+ ret = adf_dev_up(accel_dev, true);
+- if (ret < 0) {
++ if (ret == -EALREADY) {
++ break;
++ } else if (ret) {
+ dev_err(dev, "Failed to start device qat_dev%d\n",
+ accel_id);
+ adf_dev_down(accel_dev, true);
+@@ -75,18 +85,6 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ return count;
+ }
+
+-static const char * const services_operations[] = {
+- ADF_CFG_CY,
+- ADF_CFG_DC,
+- ADF_CFG_SYM,
+- ADF_CFG_ASYM,
+- ADF_CFG_ASYM_SYM,
+- ADF_CFG_ASYM_DC,
+- ADF_CFG_DC_ASYM,
+- ADF_CFG_SYM_DC,
+- ADF_CFG_DC_SYM,
+-};
+-
+ static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+ {
+@@ -121,7 +119,7 @@ static ssize_t cfg_services_store(struct device *dev, struct device_attribute *a
+ struct adf_accel_dev *accel_dev;
+ int ret;
+
+- ret = sysfs_match_string(services_operations, buf);
++ ret = sysfs_match_string(adf_cfg_services, buf);
+ if (ret < 0)
+ return ret;
+
+@@ -135,7 +133,7 @@ static ssize_t cfg_services_store(struct device *dev, struct device_attribute *a
+ return -EINVAL;
+ }
+
+- ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]);
++ ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+index 08bca1c506c0e..e2dd568b87b51 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+@@ -90,7 +90,7 @@ DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
+ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+ {
+ struct adf_etr_ring_debug_entry *ring_debug;
+- char entry_name[8];
++ char entry_name[16];
+
+ ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+ if (!ring_debug)
+@@ -192,7 +192,7 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+ {
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ struct dentry *parent = accel_dev->transport->debug;
+- char name[8];
++ char name[16];
+
+ snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+ bank->bank_debug_dir = debugfs_create_dir(name, parent);
+diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+index 3e968a4bcc9cd..019a6443834e0 100644
+--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
++++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+@@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id {
+ ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+ ICP_QAT_FW_HEARTBEAT_GET = 8,
+ ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
++ ICP_QAT_FW_DC_CHAIN_INIT = 11,
+ ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13,
+ ICP_QAT_FW_TIMER_GET = 19,
+ ICP_QAT_FW_PM_STATE_CONFIG = 128,
+diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
+index bb80455b3e81e..b97b678823a97 100644
+--- a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
++++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
+@@ -40,40 +40,44 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
+ spin_unlock_bh(&backlog->lock);
+ }
+
+-static void qat_alg_backlog_req(struct qat_alg_req *req,
+- struct qat_instance_backlog *backlog)
+-{
+- INIT_LIST_HEAD(&req->list);
+-
+- spin_lock_bh(&backlog->lock);
+- list_add_tail(&req->list, &backlog->list);
+- spin_unlock_bh(&backlog->lock);
+-}
+-
+-static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++static bool qat_alg_try_enqueue(struct qat_alg_req *req)
+ {
+ struct qat_instance_backlog *backlog = req->backlog;
+ struct adf_etr_ring_data *tx_ring = req->tx_ring;
+ u32 *fw_req = req->fw_req;
+
+- /* If any request is already backlogged, then add to backlog list */
++ /* Check if any request is already backlogged */
+ if (!list_empty(&backlog->list))
+- goto enqueue;
++ return false;
+
+- /* If ring is nearly full, then add to backlog list */
++ /* Check if ring is nearly full */
+ if (adf_ring_nearly_full(tx_ring))
+- goto enqueue;
++ return false;
+
+- /* If adding request to HW ring fails, then add to backlog list */
++ /* Try to enqueue to HW ring */
+ if (adf_send_message(tx_ring, fw_req))
+- goto enqueue;
++ return false;
+
+- return -EINPROGRESS;
++ return true;
++}
+
+-enqueue:
+- qat_alg_backlog_req(req, backlog);
+
+- return -EBUSY;
++static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++{
++ struct qat_instance_backlog *backlog = req->backlog;
++ int ret = -EINPROGRESS;
++
++ if (qat_alg_try_enqueue(req))
++ return ret;
++
++ spin_lock_bh(&backlog->lock);
++ if (!qat_alg_try_enqueue(req)) {
++ list_add_tail(&req->list, &backlog->list);
++ ret = -EBUSY;
++ }
++ spin_unlock_bh(&backlog->lock);
++
++ return ret;
+ }
+
+ int qat_alg_send_message(struct qat_alg_req *req)
+diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
+index 45e7e044cf4a0..8e5f3d84311e5 100644
+--- a/drivers/cxl/core/core.h
++++ b/drivers/cxl/core/core.h
+@@ -75,6 +75,7 @@ resource_size_t __rcrb_to_component(struct device *dev,
+ enum cxl_rcrb which);
+
+ extern struct rw_semaphore cxl_dpa_rwsem;
++extern struct rw_semaphore cxl_region_rwsem;
+
+ int cxl_memdev_init(void);
+ void cxl_memdev_exit(void);
+diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
+index 4449b34a80cc9..64e86b786db52 100644
+--- a/drivers/cxl/core/hdm.c
++++ b/drivers/cxl/core/hdm.c
+@@ -85,7 +85,7 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
+ struct cxl_component_regs *regs)
+ {
+ struct cxl_register_map map = {
+- .dev = &port->dev,
++ .host = &port->dev,
+ .resource = port->component_reg_phys,
+ .base = crb,
+ .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
+@@ -575,17 +575,11 @@ static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
+ CXL_HDM_DECODER0_CTRL_HOSTONLY);
+ }
+
+-static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
++static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+ {
+ struct cxl_dport **t = &cxlsd->target[0];
+ int ways = cxlsd->cxld.interleave_ways;
+
+- if (dev_WARN_ONCE(&cxlsd->cxld.dev,
+- ways > 8 || ways > cxlsd->nr_targets,
+- "ways: %d overflows targets: %d\n", ways,
+- cxlsd->nr_targets))
+- return -ENXIO;
+-
+ *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
+ if (ways > 1)
+ *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
+@@ -601,8 +595,6 @@ static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+ *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
+ if (ways > 7)
+ *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
+-
+- return 0;
+ }
+
+ /*
+@@ -650,6 +642,25 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
+ return -EBUSY;
+ }
+
++ /*
++ * For endpoint decoders hosted on CXL memory devices that
++ * support the sanitize operation, make sure sanitize is not in-flight.
++ */
++ if (is_endpoint_decoder(&cxld->dev)) {
++ struct cxl_endpoint_decoder *cxled =
++ to_cxl_endpoint_decoder(&cxld->dev);
++ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
++ struct cxl_memdev_state *mds =
++ to_cxl_memdev_state(cxlmd->cxlds);
++
++ if (mds && mds->security.sanitize_active) {
++ dev_dbg(&cxlmd->dev,
++ "attempted to commit %s during sanitize\n",
++ dev_name(&cxld->dev));
++ return -EBUSY;
++ }
++ }
++
+ down_read(&cxl_dpa_rwsem);
+ /* common decoder settings */
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
+@@ -670,13 +681,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
+ void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
+ u64 targets;
+
+- rc = cxlsd_set_targets(cxlsd, &targets);
+- if (rc) {
+- dev_dbg(&port->dev, "%s: target configuration error\n",
+- dev_name(&cxld->dev));
+- goto err;
+- }
+-
++ cxlsd_set_targets(cxlsd, &targets);
+ writel(upper_32_bits(targets), tl_hi);
+ writel(lower_32_bits(targets), tl_lo);
+ } else {
+@@ -694,7 +699,6 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
+
+ port->commit_end++;
+ rc = cxld_await_commit(hdm, cxld->id);
+-err:
+ if (rc) {
+ dev_dbg(&port->dev, "%s: error %d committing decoder\n",
+ dev_name(&cxld->dev), rc);
+diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
+index 4df4f614f490e..b91bb98869917 100644
+--- a/drivers/cxl/core/mbox.c
++++ b/drivers/cxl/core/mbox.c
+@@ -1125,20 +1125,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
+ }
+ EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
+
+-/**
+- * cxl_mem_sanitize() - Send a sanitization command to the device.
+- * @mds: The device data for the operation
+- * @cmd: The specific sanitization command opcode
+- *
+- * Return: 0 if the command was executed successfully, regardless of
+- * whether or not the actual security operation is done in the background,
+- * such as for the Sanitize case.
+- * Error return values can be the result of the mailbox command, -EINVAL
+- * when security requirements are not met or invalid contexts.
+- *
+- * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
+- */
+-int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
++static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
+ {
+ int rc;
+ u32 sec_out = 0;
+@@ -1183,7 +1170,45 @@ int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
+
+ return 0;
+ }
+-EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL);
++
++
++/**
++ * cxl_mem_sanitize() - Send a sanitization command to the device.
++ * @cxlmd: The device for the operation
++ * @cmd: The specific sanitization command opcode
++ *
++ * Return: 0 if the command was executed successfully, regardless of
++ * whether or not the actual security operation is done in the background,
++ * such as for the Sanitize case.
++ * Error return values can be the result of the mailbox command, -EINVAL
++ * when security requirements are not met or invalid contexts, or -EBUSY
++ * if the sanitize operation is already in flight.
++ *
++ * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
++ */
++int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
++{
++ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
++ struct cxl_port *endpoint;
++ int rc;
++
++ /* synchronize with cxl_mem_probe() and decoder write operations */
++ device_lock(&cxlmd->dev);
++ endpoint = cxlmd->endpoint;
++ down_read(&cxl_region_rwsem);
++ /*
++ * Require an endpoint to be safe otherwise the driver can not
++ * be sure that the device is unmapped.
++ */
++ if (endpoint && endpoint->commit_end == -1)
++ rc = __cxl_mem_sanitize(mds, cmd);
++ else
++ rc = -EBUSY;
++ up_read(&cxl_region_rwsem);
++ device_unlock(&cxlmd->dev);
++
++ return rc;
++}
+
+ static int add_dpa_res(struct device *dev, struct resource *parent,
+ struct resource *res, resource_size_t start,
+diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
+index 14b547c07f547..fed9573cf355e 100644
+--- a/drivers/cxl/core/memdev.c
++++ b/drivers/cxl/core/memdev.c
+@@ -125,13 +125,16 @@ static ssize_t security_state_show(struct device *dev,
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+- u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
+- u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
+- u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
+ unsigned long state = mds->security.state;
++ int rc = 0;
+
+- if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
+- return sysfs_emit(buf, "sanitize\n");
++ /* sync with latest submission state */
++ mutex_lock(&mds->mbox_mutex);
++ if (mds->security.sanitize_active)
++ rc = sysfs_emit(buf, "sanitize\n");
++ mutex_unlock(&mds->mbox_mutex);
++ if (rc)
++ return rc;
+
+ if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
+ return sysfs_emit(buf, "disabled\n");
+@@ -152,24 +155,17 @@ static ssize_t security_sanitize_store(struct device *dev,
+ const char *buf, size_t len)
+ {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+- struct cxl_port *port = cxlmd->endpoint;
+ bool sanitize;
+ ssize_t rc;
+
+ if (kstrtobool(buf, &sanitize) || !sanitize)
+ return -EINVAL;
+
+- if (!port || !is_cxl_endpoint(port))
+- return -EINVAL;
+-
+- /* ensure no regions are mapped to this memdev */
+- if (port->commit_end != -1)
+- return -EBUSY;
+-
+- rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
++ rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE);
++ if (rc)
++ return rc;
+
+- return rc ? rc : len;
++ return len;
+ }
+ static struct device_attribute dev_attr_security_sanitize =
+ __ATTR(sanitize, 0200, NULL, security_sanitize_store);
+@@ -179,24 +175,17 @@ static ssize_t security_erase_store(struct device *dev,
+ const char *buf, size_t len)
+ {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+- struct cxl_port *port = cxlmd->endpoint;
+ ssize_t rc;
+ bool erase;
+
+ if (kstrtobool(buf, &erase) || !erase)
+ return -EINVAL;
+
+- if (!port || !is_cxl_endpoint(port))
+- return -EINVAL;
+-
+- /* ensure no regions are mapped to this memdev */
+- if (port->commit_end != -1)
+- return -EBUSY;
+-
+- rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
++ rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE);
++ if (rc)
++ return rc;
+
+- return rc ? rc : len;
++ return len;
+ }
+ static struct device_attribute dev_attr_security_erase =
+ __ATTR(erase, 0200, NULL, security_erase_store);
+@@ -556,21 +545,11 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+ }
+ EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
+
+-static void cxl_memdev_security_shutdown(struct device *dev)
+-{
+- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+-
+- if (mds->security.poll)
+- cancel_delayed_work_sync(&mds->security.poll_dwork);
+-}
+-
+ static void cxl_memdev_shutdown(struct device *dev)
+ {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+ down_write(&cxl_memdev_rwsem);
+- cxl_memdev_security_shutdown(dev);
+ cxlmd->cxlds = NULL;
+ up_write(&cxl_memdev_rwsem);
+ }
+@@ -580,8 +559,8 @@ static void cxl_memdev_unregister(void *_cxlmd)
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
+
+- cxl_memdev_shutdown(dev);
+ cdev_device_del(&cxlmd->cdev, dev);
++ cxl_memdev_shutdown(dev);
+ put_device(dev);
+ }
+
+@@ -961,17 +940,16 @@ static const struct fw_upload_ops cxl_memdev_fw_ops = {
+ .cleanup = cxl_fw_cleanup,
+ };
+
+-static void devm_cxl_remove_fw_upload(void *fwl)
++static void cxl_remove_fw_upload(void *fwl)
+ {
+ firmware_upload_unregister(fwl);
+ }
+
+-int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
++int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
+ {
+ struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct device *dev = &cxlds->cxlmd->dev;
+ struct fw_upload *fwl;
+- int rc;
+
+ if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
+ return 0;
+@@ -979,19 +957,10 @@ int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
+ fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
+ &cxl_memdev_fw_ops, mds);
+ if (IS_ERR(fwl))
+- return dev_err_probe(dev, PTR_ERR(fwl),
+- "Failed to register firmware loader\n");
+-
+- rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
+- fwl);
+- if (rc)
+- dev_err(dev,
+- "Failed to add firmware loader remove action: %d\n",
+- rc);
+-
+- return rc;
++ return PTR_ERR(fwl);
++ return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
+ }
+-EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
++EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL);
+
+ static const struct file_operations cxl_memdev_fops = {
+ .owner = THIS_MODULE,
+@@ -1002,36 +971,8 @@ static const struct file_operations cxl_memdev_fops = {
+ .llseek = noop_llseek,
+ };
+
+-static void put_sanitize(void *data)
+-{
+- struct cxl_memdev_state *mds = data;
+-
+- sysfs_put(mds->security.sanitize_node);
+-}
+-
+-static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
+-{
+- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+- struct device *dev = &cxlmd->dev;
+- struct kernfs_node *sec;
+-
+- sec = sysfs_get_dirent(dev->kobj.sd, "security");
+- if (!sec) {
+- dev_err(dev, "sysfs_get_dirent 'security' failed\n");
+- return -ENODEV;
+- }
+- mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
+- sysfs_put(sec);
+- if (!mds->security.sanitize_node) {
+- dev_err(dev, "sysfs_get_dirent 'state' failed\n");
+- return -ENODEV;
+- }
+-
+- return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
+- }
+-
+-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
++struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
++ struct cxl_dev_state *cxlds)
+ {
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+@@ -1059,11 +1000,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
+ if (rc)
+ goto err;
+
+- rc = cxl_memdev_security_init(cxlmd);
+- if (rc)
+- goto err;
+-
+- rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
++ rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
+ if (rc)
+ return ERR_PTR(rc);
+ return cxlmd;
+@@ -1079,6 +1016,50 @@ err:
+ }
+ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
+
++static void sanitize_teardown_notifier(void *data)
++{
++ struct cxl_memdev_state *mds = data;
++ struct kernfs_node *state;
++
++ /*
++ * Prevent new irq triggered invocations of the workqueue and
++ * flush inflight invocations.
++ */
++ mutex_lock(&mds->mbox_mutex);
++ state = mds->security.sanitize_node;
++ mds->security.sanitize_node = NULL;
++ mutex_unlock(&mds->mbox_mutex);
++
++ cancel_delayed_work_sync(&mds->security.poll_dwork);
++ sysfs_put(state);
++}
++
++int devm_cxl_sanitize_setup_notifier(struct device *host,
++ struct cxl_memdev *cxlmd)
++{
++ struct cxl_dev_state *cxlds = cxlmd->cxlds;
++ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
++ struct kernfs_node *sec;
++
++ if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
++ return 0;
++
++ /*
++ * Note, the expectation is that @cxlmd would have failed to be
++ * created if these sysfs_get_dirent calls fail.
++ */
++ sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security");
++ if (!sec)
++ return -ENOENT;
++ mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
++ sysfs_put(sec);
++ if (!mds->security.sanitize_node)
++ return -ENOENT;
++
++ return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
++}
++EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL);
++
+ __init int cxl_memdev_init(void)
+ {
+ dev_t devt;
+diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
+index 7ca01a834e188..6a75a3cb601ec 100644
+--- a/drivers/cxl/core/port.c
++++ b/drivers/cxl/core/port.c
+@@ -28,6 +28,12 @@
+ * instantiated by the core.
+ */
+
++/*
++ * All changes to the interleave configuration occur with this lock held
++ * for write.
++ */
++DECLARE_RWSEM(cxl_region_rwsem);
++
+ static DEFINE_IDA(cxl_port_ida);
+ static DEFINE_XARRAY(cxl_root_buses);
+
+@@ -691,14 +697,14 @@ err:
+ return ERR_PTR(rc);
+ }
+
+-static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
++static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
+ resource_size_t component_reg_phys)
+ {
+ if (component_reg_phys == CXL_RESOURCE_NONE)
+ return 0;
+
+ *map = (struct cxl_register_map) {
+- .dev = dev,
++ .host = host,
+ .reg_type = CXL_REGLOC_RBI_COMPONENT,
+ .resource = component_reg_phys,
+ .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
+@@ -716,13 +722,23 @@ static int cxl_port_setup_regs(struct cxl_port *port,
+ component_reg_phys);
+ }
+
+-static int cxl_dport_setup_regs(struct cxl_dport *dport,
++static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
+ resource_size_t component_reg_phys)
+ {
++ int rc;
++
+ if (dev_is_platform(dport->dport_dev))
+ return 0;
+- return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
+- component_reg_phys);
++
++ /*
++ * use @dport->dport_dev for the context for error messages during
++ * register probing, and fixup @host after the fact, since @host may be
++ * NULL.
++ */
++ rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
++ component_reg_phys);
++ dport->comp_map.host = host;
++ return rc;
+ }
+
+ static struct cxl_port *__devm_cxl_add_port(struct device *host,
+@@ -983,7 +999,16 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
+ if (!dport)
+ return ERR_PTR(-ENOMEM);
+
+- if (rcrb != CXL_RESOURCE_NONE) {
++ dport->dport_dev = dport_dev;
++ dport->port_id = port_id;
++ dport->port = port;
++
++ if (rcrb == CXL_RESOURCE_NONE) {
++ rc = cxl_dport_setup_regs(&port->dev, dport,
++ component_reg_phys);
++ if (rc)
++ return ERR_PTR(rc);
++ } else {
+ dport->rcrb.base = rcrb;
+ component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
+ CXL_RCRB_DOWNSTREAM);
+@@ -992,6 +1017,14 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
+ return ERR_PTR(-ENXIO);
+ }
+
++ /*
++ * RCH @dport is not ready to map until associated with its
++ * memdev
++ */
++ rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys);
++ if (rc)
++ return ERR_PTR(rc);
++
+ dport->rch = true;
+ }
+
+@@ -999,14 +1032,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
+ dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
+ &component_reg_phys);
+
+- dport->dport_dev = dport_dev;
+- dport->port_id = port_id;
+- dport->port = port;
+-
+- rc = cxl_dport_setup_regs(dport, component_reg_phys);
+- if (rc)
+- return ERR_PTR(rc);
+-
+ cond_cxl_root_lock(port);
+ rc = add_dport(port, dport);
+ cond_cxl_root_unlock(port);
+@@ -1217,35 +1242,39 @@ static struct device *grandparent(struct device *dev)
+ return NULL;
+ }
+
++static struct device *endpoint_host(struct cxl_port *endpoint)
++{
++ struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
++
++ if (is_cxl_root(port))
++ return port->uport_dev;
++ return &port->dev;
++}
++
+ static void delete_endpoint(void *data)
+ {
+ struct cxl_memdev *cxlmd = data;
+ struct cxl_port *endpoint = cxlmd->endpoint;
+- struct cxl_port *parent_port;
+- struct device *parent;
+-
+- parent_port = cxl_mem_find_port(cxlmd, NULL);
+- if (!parent_port)
+- goto out;
+- parent = &parent_port->dev;
++ struct device *host = endpoint_host(endpoint);
+
+- device_lock(parent);
+- if (parent->driver && !endpoint->dead) {
+- devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
+- devm_release_action(parent, cxl_unlink_uport, endpoint);
+- devm_release_action(parent, unregister_port, endpoint);
++ device_lock(host);
++ if (host->driver && !endpoint->dead) {
++ devm_release_action(host, cxl_unlink_parent_dport, endpoint);
++ devm_release_action(host, cxl_unlink_uport, endpoint);
++ devm_release_action(host, unregister_port, endpoint);
+ }
+ cxlmd->endpoint = NULL;
+- device_unlock(parent);
+- put_device(parent);
+-out:
++ device_unlock(host);
+ put_device(&endpoint->dev);
++ put_device(host);
+ }
+
+ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
+ {
++ struct device *host = endpoint_host(endpoint);
+ struct device *dev = &cxlmd->dev;
+
++ get_device(host);
+ get_device(&endpoint->dev);
+ cxlmd->endpoint = endpoint;
+ cxlmd->depth = endpoint->depth;
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 6d63b8798c299..9d60020c5cb3b 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -28,12 +28,6 @@
+ * 3. Decoder targets
+ */
+
+-/*
+- * All changes to the interleave configuration occur with this lock held
+- * for write.
+- */
+-static DECLARE_RWSEM(cxl_region_rwsem);
+-
+ static struct cxl_region *to_cxl_region(struct device *dev);
+
+ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+@@ -294,7 +288,7 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ */
+ rc = cxl_region_invalidate_memregion(cxlr);
+ if (rc)
+- return rc;
++ goto out;
+
+ if (commit) {
+ rc = cxl_region_decode_commit(cxlr);
+@@ -1133,7 +1127,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ }
+
+ if (is_cxl_root(parent_port)) {
+- parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
++ /*
++ * Root decoder IG is always set to value in CFMWS which
++ * may be different than this region's IG. We can use the
++ * region's IG here since interleave_granularity_store()
++ * does not allow interleaved host-bridges with
++ * root IG != region IG.
++ */
++ parent_ig = p->interleave_granularity;
+ parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
+ /*
+ * For purposes of address bit routing, use power-of-2 math for
+@@ -1195,6 +1196,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ return rc;
+ }
+
++ if (iw > 8 || iw > cxlsd->nr_targets) {
++ dev_dbg(&cxlr->dev,
++ "%s:%s:%s: ways: %d overflows targets: %d\n",
++ dev_name(port->uport_dev), dev_name(&port->dev),
++ dev_name(&cxld->dev), iw, cxlsd->nr_targets);
++ return -ENXIO;
++ }
++
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ if (cxld->interleave_ways != iw ||
+ cxld->interleave_granularity != ig ||
+@@ -1480,6 +1489,14 @@ static int cxl_region_attach_auto(struct cxl_region *cxlr,
+ return 0;
+ }
+
++static int cmp_interleave_pos(const void *a, const void *b)
++{
++ struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
++ struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
++
++ return cxled_a->pos - cxled_b->pos;
++}
++
+ static struct cxl_port *next_port(struct cxl_port *port)
+ {
+ if (!port->parent_dport)
+@@ -1487,119 +1504,127 @@ static struct cxl_port *next_port(struct cxl_port *port)
+ return port->parent_dport->port;
+ }
+
+-static int decoder_match_range(struct device *dev, void *data)
++static int match_switch_decoder_by_range(struct device *dev, void *data)
+ {
+- struct cxl_endpoint_decoder *cxled = data;
+ struct cxl_switch_decoder *cxlsd;
++ struct range *r1, *r2 = data;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxlsd = to_cxl_switch_decoder(dev);
+- return range_contains(&cxlsd->cxld.hpa_range, &cxled->cxld.hpa_range);
+-}
+-
+-static void find_positions(const struct cxl_switch_decoder *cxlsd,
+- const struct cxl_port *iter_a,
+- const struct cxl_port *iter_b, int *a_pos,
+- int *b_pos)
+-{
+- int i;
++ r1 = &cxlsd->cxld.hpa_range;
+
+- for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) {
+- if (cxlsd->target[i] == iter_a->parent_dport)
+- *a_pos = i;
+- else if (cxlsd->target[i] == iter_b->parent_dport)
+- *b_pos = i;
+- if (*a_pos >= 0 && *b_pos >= 0)
+- break;
+- }
++ if (is_root_decoder(dev))
++ return range_contains(r1, r2);
++ return (r1->start == r2->start && r1->end == r2->end);
+ }
+
+-static int cmp_decode_pos(const void *a, const void *b)
++static int find_pos_and_ways(struct cxl_port *port, struct range *range,
++ int *pos, int *ways)
+ {
+- struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
+- struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
+- struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a);
+- struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b);
+- struct cxl_port *port_a = cxled_to_port(cxled_a);
+- struct cxl_port *port_b = cxled_to_port(cxled_b);
+- struct cxl_port *iter_a, *iter_b, *port = NULL;
+ struct cxl_switch_decoder *cxlsd;
++ struct cxl_port *parent;
+ struct device *dev;
+- int a_pos, b_pos;
+- unsigned int seq;
+-
+- /* Exit early if any prior sorting failed */
+- if (cxled_a->pos < 0 || cxled_b->pos < 0)
+- return 0;
++ int rc = -ENXIO;
+
+- /*
+- * Walk up the hierarchy to find a shared port, find the decoder that
+- * maps the range, compare the relative position of those dport
+- * mappings.
+- */
+- for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) {
+- struct cxl_port *next_a, *next_b;
++ parent = next_port(port);
++ if (!parent)
++ return rc;
+
+- next_a = next_port(iter_a);
+- if (!next_a)
+- break;
++ dev = device_find_child(&parent->dev, range,
++ match_switch_decoder_by_range);
++ if (!dev) {
++ dev_err(port->uport_dev,
++ "failed to find decoder mapping %#llx-%#llx\n",
++ range->start, range->end);
++ return rc;
++ }
++ cxlsd = to_cxl_switch_decoder(dev);
++ *ways = cxlsd->cxld.interleave_ways;
+
+- for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) {
+- next_b = next_port(iter_b);
+- if (next_a != next_b)
+- continue;
+- port = next_a;
++ for (int i = 0; i < *ways; i++) {
++ if (cxlsd->target[i] == port->parent_dport) {
++ *pos = i;
++ rc = 0;
+ break;
+ }
+-
+- if (port)
+- break;
+ }
++ put_device(dev);
+
+- if (!port) {
+- dev_err(cxlmd_a->dev.parent,
+- "failed to find shared port with %s\n",
+- dev_name(cxlmd_b->dev.parent));
+- goto err;
+- }
++ return rc;
++}
+
+- dev = device_find_child(&port->dev, cxled_a, decoder_match_range);
+- if (!dev) {
+- struct range *range = &cxled_a->cxld.hpa_range;
++/**
++ * cxl_calc_interleave_pos() - calculate an endpoint position in a region
++ * @cxled: endpoint decoder member of given region
++ *
++ * The endpoint position is calculated by traversing the topology from
++ * the endpoint to the root decoder and iteratively applying this
++ * calculation:
++ *
++ * position = position * parent_ways + parent_pos;
++ *
++ * ...where @position is inferred from switch and root decoder target lists.
++ *
++ * Return: position >= 0 on success
++ * -ENXIO on failure
++ */
++static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
++{
++ struct cxl_port *iter, *port = cxled_to_port(cxled);
++ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
++ struct range *range = &cxled->cxld.hpa_range;
++ int parent_ways = 0, parent_pos = 0, pos = 0;
++ int rc;
+
+- dev_err(port->uport_dev,
+- "failed to find decoder that maps %#llx-%#llx\n",
+- range->start, range->end);
+- goto err;
+- }
++ /*
++ * Example: the expected interleave order of the 4-way region shown
++ * below is: mem0, mem2, mem1, mem3
++ *
++ * root_port
++ * / \
++ * host_bridge_0 host_bridge_1
++ * | | | |
++ * mem0 mem1 mem2 mem3
++ *
++ * In the example the calculator will iterate twice. The first iteration
++ * uses the mem position in the host-bridge and the ways of the host-
++ * bridge to generate the first, or local, position. The second
++ * iteration uses the host-bridge position in the root_port and the ways
++ * of the root_port to refine the position.
++ *
++ * A trace of the calculation per endpoint looks like this:
++ * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0
++ * pos = 0 * 2 + 0 pos = 0 * 2 + 1
++ * pos: 0 pos: 1
++ *
++ * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1
++ * pos = 1 * 2 + 0 pos = 1 * 2 + 1
++ * pos: 2 pos = 3
++ *
++ * Note that while this example is simple, the method applies to more
++ * complex topologies, including those with switches.
++ */
+
+- cxlsd = to_cxl_switch_decoder(dev);
+- do {
+- seq = read_seqbegin(&cxlsd->target_lock);
+- find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos);
+- } while (read_seqretry(&cxlsd->target_lock, seq));
++ /* Iterate from endpoint to root_port refining the position */
++ for (iter = port; iter; iter = next_port(iter)) {
++ if (is_cxl_root(iter))
++ break;
+
+- put_device(dev);
++ rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
++ if (rc)
++ return rc;
+
+- if (a_pos < 0 || b_pos < 0) {
+- dev_err(port->uport_dev,
+- "failed to find shared decoder for %s and %s\n",
+- dev_name(cxlmd_a->dev.parent),
+- dev_name(cxlmd_b->dev.parent));
+- goto err;
++ pos = pos * parent_ways + parent_pos;
+ }
+
+- dev_dbg(port->uport_dev, "%s comes %s %s\n",
+- dev_name(cxlmd_a->dev.parent),
+- a_pos - b_pos < 0 ? "before" : "after",
+- dev_name(cxlmd_b->dev.parent));
++ dev_dbg(&cxlmd->dev,
++ "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
++ dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
++ dev_name(&port->dev), range->start, range->end, pos);
+
+- return a_pos - b_pos;
+-err:
+- cxled_a->pos = -1;
+- return 0;
++ return pos;
+ }
+
+ static int cxl_region_sort_targets(struct cxl_region *cxlr)
+@@ -1607,22 +1632,21 @@ static int cxl_region_sort_targets(struct cxl_region *cxlr)
+ struct cxl_region_params *p = &cxlr->params;
+ int i, rc = 0;
+
+- sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos,
+- NULL);
+-
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
++ cxled->pos = cxl_calc_interleave_pos(cxled);
+ /*
+- * Record that sorting failed, but still continue to restore
+- * cxled->pos with its ->targets[] position so that follow-on
+- * code paths can reliably do p->targets[cxled->pos] to
+- * self-reference their entry.
++ * Record that sorting failed, but still continue to calc
++ * cxled->pos so that follow-on code paths can reliably
++ * do p->targets[cxled->pos] to self-reference their entry.
+ */
+ if (cxled->pos < 0)
+ rc = -ENXIO;
+- cxled->pos = i;
+ }
++ /* Keep the cxlr target list in interleave position order */
++ sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
++ cmp_interleave_pos, NULL);
+
+ dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
+ return rc;
+@@ -1658,6 +1682,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ return -ENXIO;
+ }
+
++ if (p->nr_targets >= p->interleave_ways) {
++ dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
++ p->nr_targets);
++ return -EINVAL;
++ }
++
+ ep_port = cxled_to_port(cxled);
+ root_port = cxlrd_to_port(cxlrd);
+ dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
+@@ -1750,7 +1780,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ if (p->nr_targets == p->interleave_ways) {
+ rc = cxl_region_setup_targets(cxlr);
+ if (rc)
+- goto err_decrement;
++ return rc;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+@@ -1761,13 +1791,27 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ .end = p->res->end,
+ };
+
+- return 0;
++ if (p->nr_targets != p->interleave_ways)
++ return 0;
+
+-err_decrement:
+- p->nr_targets--;
+- cxled->pos = -1;
+- p->targets[pos] = NULL;
+- return rc;
++ /*
++ * Test the auto-discovery position calculator function
++ * against this successfully created user-defined region.
++ * A fail message here means that this interleave config
++ * will fail when presented as CXL_REGION_F_AUTO.
++ */
++ for (int i = 0; i < p->nr_targets; i++) {
++ struct cxl_endpoint_decoder *cxled = p->targets[i];
++ int test_pos;
++
++ test_pos = cxl_calc_interleave_pos(cxled);
++ dev_dbg(&cxled->cxld.dev,
++ "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
++ (test_pos == cxled->pos) ? "success" : "fail",
++ test_pos, cxled->pos);
++ }
++
++ return 0;
+ }
+
+ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+@@ -2696,7 +2740,7 @@ err:
+ return rc;
+ }
+
+-static int match_decoder_by_range(struct device *dev, void *data)
++static int match_root_decoder_by_range(struct device *dev, void *data)
+ {
+ struct range *r1, *r2 = data;
+ struct cxl_root_decoder *cxlrd;
+@@ -2827,7 +2871,7 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
+ int rc;
+
+ cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
+- match_decoder_by_range);
++ match_root_decoder_by_range);
+ if (!cxlrd_dev) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s no CXL window for range %#llx:%#llx\n",
+diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
+index 6281127b3e9d9..e0fbe964f6f0a 100644
+--- a/drivers/cxl/core/regs.c
++++ b/drivers/cxl/core/regs.c
+@@ -204,7 +204,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
+ struct cxl_component_regs *regs,
+ unsigned long map_mask)
+ {
+- struct device *dev = map->dev;
++ struct device *host = map->host;
+ struct mapinfo {
+ const struct cxl_reg_map *rmap;
+ void __iomem **addr;
+@@ -225,7 +225,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
+ continue;
+ phys_addr = map->resource + mi->rmap->offset;
+ length = mi->rmap->size;
+- *(mi->addr) = devm_cxl_iomap_block(dev, phys_addr, length);
++ *(mi->addr) = devm_cxl_iomap_block(host, phys_addr, length);
+ if (!*(mi->addr))
+ return -ENOMEM;
+ }
+@@ -237,7 +237,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
+ int cxl_map_device_regs(const struct cxl_register_map *map,
+ struct cxl_device_regs *regs)
+ {
+- struct device *dev = map->dev;
++ struct device *host = map->host;
+ resource_size_t phys_addr = map->resource;
+ struct mapinfo {
+ const struct cxl_reg_map *rmap;
+@@ -259,7 +259,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
+
+ addr = phys_addr + mi->rmap->offset;
+ length = mi->rmap->size;
+- *(mi->addr) = devm_cxl_iomap_block(dev, addr, length);
++ *(mi->addr) = devm_cxl_iomap_block(host, addr, length);
+ if (!*(mi->addr))
+ return -ENOMEM;
+ }
+@@ -309,7 +309,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ int regloc, i;
+
+ *map = (struct cxl_register_map) {
+- .dev = &pdev->dev,
++ .host = &pdev->dev,
+ .resource = CXL_RESOURCE_NONE,
+ };
+
+@@ -403,15 +403,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL);
+
+ static int cxl_map_regblock(struct cxl_register_map *map)
+ {
+- struct device *dev = map->dev;
++ struct device *host = map->host;
+
+ map->base = ioremap(map->resource, map->max_size);
+ if (!map->base) {
+- dev_err(dev, "failed to map registers\n");
++ dev_err(host, "failed to map registers\n");
+ return -ENOMEM;
+ }
+
+- dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
++ dev_dbg(host, "Mapped CXL Memory Device resource %pa\n", &map->resource);
+ return 0;
+ }
+
+@@ -425,28 +425,28 @@ static int cxl_probe_regs(struct cxl_register_map *map)
+ {
+ struct cxl_component_reg_map *comp_map;
+ struct cxl_device_reg_map *dev_map;
+- struct device *dev = map->dev;
++ struct device *host = map->host;
+ void __iomem *base = map->base;
+
+ switch (map->reg_type) {
+ case CXL_REGLOC_RBI_COMPONENT:
+ comp_map = &map->component_map;
+- cxl_probe_component_regs(dev, base, comp_map);
+- dev_dbg(dev, "Set up component registers\n");
++ cxl_probe_component_regs(host, base, comp_map);
++ dev_dbg(host, "Set up component registers\n");
+ break;
+ case CXL_REGLOC_RBI_MEMDEV:
+ dev_map = &map->device_map;
+- cxl_probe_device_regs(dev, base, dev_map);
++ cxl_probe_device_regs(host, base, dev_map);
+ if (!dev_map->status.valid || !dev_map->mbox.valid ||
+ !dev_map->memdev.valid) {
+- dev_err(dev, "registers not found: %s%s%s\n",
++ dev_err(host, "registers not found: %s%s%s\n",
+ !dev_map->status.valid ? "status " : "",
+ !dev_map->mbox.valid ? "mbox " : "",
+ !dev_map->memdev.valid ? "memdev " : "");
+ return -ENXIO;
+ }
+
+- dev_dbg(dev, "Probing device registers...\n");
++ dev_dbg(host, "Probing device registers...\n");
+ break;
+ default:
+ break;
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index 76d92561af294..b5b015b661eae 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -247,7 +247,7 @@ struct cxl_pmu_reg_map {
+
+ /**
+ * struct cxl_register_map - DVSEC harvested register block mapping parameters
+- * @dev: device for devm operations and logging
++ * @host: device for devm operations and logging
+ * @base: virtual base of the register-block-BAR + @block_offset
+ * @resource: physical resource base of the register block
+ * @max_size: maximum mapping size to perform register search
+@@ -257,7 +257,7 @@ struct cxl_pmu_reg_map {
+ * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units
+ */
+ struct cxl_register_map {
+- struct device *dev;
++ struct device *host;
+ void __iomem *base;
+ resource_size_t resource;
+ resource_size_t max_size;
+diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
+index 706f8a6d1ef43..6933bc20e76b6 100644
+--- a/drivers/cxl/cxlmem.h
++++ b/drivers/cxl/cxlmem.h
+@@ -84,9 +84,12 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
+ return is_cxl_memdev(port->uport_dev);
+ }
+
+-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
++struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
++ struct cxl_dev_state *cxlds);
++int devm_cxl_sanitize_setup_notifier(struct device *host,
++ struct cxl_memdev *cxlmd);
+ struct cxl_memdev_state;
+-int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds);
++int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds);
+ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped);
+@@ -360,16 +363,16 @@ struct cxl_fw_state {
+ *
+ * @state: state of last security operation
+ * @enabled_cmds: All security commands enabled in the CEL
+- * @poll: polling for sanitization is enabled, device has no mbox irq support
+ * @poll_tmo_secs: polling timeout
++ * @sanitize_active: sanitize completion pending
+ * @poll_dwork: polling work item
+ * @sanitize_node: sanitation sysfs file to notify
+ */
+ struct cxl_security_state {
+ unsigned long state;
+ DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
+- bool poll;
+ int poll_tmo_secs;
++ bool sanitize_active;
+ struct delayed_work poll_dwork;
+ struct kernfs_node *sanitize_node;
+ };
+@@ -883,7 +886,7 @@ static inline void cxl_mem_active_dec(void)
+ }
+ #endif
+
+-int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd);
++int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
+
+ struct cxl_hdm {
+ struct cxl_component_regs regs;
+diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
+index 44a21ab7add51..8bece1e2e2491 100644
+--- a/drivers/cxl/pci.c
++++ b/drivers/cxl/pci.c
+@@ -128,10 +128,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
+ reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
+ opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
+ if (opcode == CXL_MBOX_OP_SANITIZE) {
++ mutex_lock(&mds->mbox_mutex);
+ if (mds->security.sanitize_node)
+- sysfs_notify_dirent(mds->security.sanitize_node);
+-
+- dev_dbg(cxlds->dev, "Sanitization operation ended\n");
++ mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
++ mutex_unlock(&mds->mbox_mutex);
+ } else {
+ /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
+ rcuwait_wake_up(&mds->mbox_wait);
+@@ -152,18 +152,16 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
+ mutex_lock(&mds->mbox_mutex);
+ if (cxl_mbox_background_complete(cxlds)) {
+ mds->security.poll_tmo_secs = 0;
+- put_device(cxlds->dev);
+-
+ if (mds->security.sanitize_node)
+ sysfs_notify_dirent(mds->security.sanitize_node);
++ mds->security.sanitize_active = false;
+
+ dev_dbg(cxlds->dev, "Sanitization operation ended\n");
+ } else {
+ int timeout = mds->security.poll_tmo_secs + 10;
+
+ mds->security.poll_tmo_secs = min(15 * 60, timeout);
+- queue_delayed_work(system_wq, &mds->security.poll_dwork,
+- timeout * HZ);
++ schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
+ }
+ mutex_unlock(&mds->mbox_mutex);
+ }
+@@ -295,18 +293,15 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
+ * and allow userspace to poll(2) for completion.
+ */
+ if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
+- if (mds->security.poll) {
+- /* hold the device throughout */
+- get_device(cxlds->dev);
+-
+- /* give first timeout a second */
+- timeout = 1;
+- mds->security.poll_tmo_secs = timeout;
+- queue_delayed_work(system_wq,
+- &mds->security.poll_dwork,
+- timeout * HZ);
+- }
+-
++ if (mds->security.sanitize_active)
++ return -EBUSY;
++
++ /* give first timeout a second */
++ timeout = 1;
++ mds->security.poll_tmo_secs = timeout;
++ mds->security.sanitize_active = true;
++ schedule_delayed_work(&mds->security.poll_dwork,
++ timeout * HZ);
+ dev_dbg(dev, "Sanitization operation started\n");
+ goto success;
+ }
+@@ -389,7 +384,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+ const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
+ struct device *dev = cxlds->dev;
+ unsigned long timeout;
++ int irq, msgnum;
+ u64 md_status;
++ u32 ctrl;
+
+ timeout = jiffies + mbox_ready_timeout * HZ;
+ do {
+@@ -437,33 +434,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+ dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
+
+ rcuwait_init(&mds->mbox_wait);
++ INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
+
+- if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
+- u32 ctrl;
+- int irq, msgnum;
+- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+-
+- msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
+- irq = pci_irq_vector(pdev, msgnum);
+- if (irq < 0)
+- goto mbox_poll;
+-
+- if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
+- goto mbox_poll;
++ /* background command interrupts are optional */
++ if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
++ return 0;
+
+- /* enable background command mbox irq support */
+- ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+- ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
+- writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
++ msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
++ irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
++ if (irq < 0)
++ return 0;
+
++ if (cxl_request_irq(cxlds, irq, NULL, cxl_pci_mbox_irq))
+ return 0;
+- }
+
+-mbox_poll:
+- mds->security.poll = true;
+- INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
++ dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
++ /* enable background command mbox irq support */
++ ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
++ ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
++ writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+
+- dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
+ return 0;
+ }
+
+@@ -484,7 +474,7 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
+ resource_size_t component_reg_phys;
+
+ *map = (struct cxl_register_map) {
+- .dev = &pdev->dev,
++ .host = &pdev->dev,
+ .resource = CXL_RESOURCE_NONE,
+ };
+
+@@ -882,11 +872,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (rc)
+ return rc;
+
+- cxlmd = devm_cxl_add_memdev(cxlds);
++ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
+ if (IS_ERR(cxlmd))
+ return PTR_ERR(cxlmd);
+
+- rc = cxl_memdev_setup_fw_upload(mds);
++ rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
++ if (rc)
++ return rc;
++
++ rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
+index 39ac069cabc75..74893c06aa087 100644
+--- a/drivers/devfreq/event/rockchip-dfi.c
++++ b/drivers/devfreq/event/rockchip-dfi.c
+@@ -193,14 +193,15 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
+ return dev_err_probe(dev, PTR_ERR(data->clk),
+ "Cannot get the clk pclk_ddr_mon\n");
+
+- /* try to find the optional reference to the pmu syscon */
+ node = of_parse_phandle(np, "rockchip,pmu", 0);
+- if (node) {
+- data->regmap_pmu = syscon_node_to_regmap(node);
+- of_node_put(node);
+- if (IS_ERR(data->regmap_pmu))
+- return PTR_ERR(data->regmap_pmu);
+- }
++ if (!node)
++ return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n");
++
++ data->regmap_pmu = syscon_node_to_regmap(node);
++ of_node_put(node);
++ if (IS_ERR(data->regmap_pmu))
++ return PTR_ERR(data->regmap_pmu);
++
+ data->dev = dev;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
+index 38b4110378de0..eb8b733065b24 100644
+--- a/drivers/dma-buf/dma-resv.c
++++ b/drivers/dma-buf/dma-resv.c
+@@ -301,7 +301,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+
+ dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
+ if ((old->context == fence->context && old_usage >= usage &&
+- dma_fence_is_later(fence, old)) ||
++ dma_fence_is_later_or_same(fence, old)) ||
+ dma_fence_is_signaled(old)) {
+ dma_resv_list_set(fobj, i, fence, usage);
+ dma_fence_put(old);
+diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
+index dc096839ac637..c5e679070e463 100644
+--- a/drivers/dma/idxd/Makefile
++++ b/drivers/dma/idxd/Makefile
+@@ -1,12 +1,12 @@
+ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+
++obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
++idxd_bus-y := bus.o
++
+ obj-$(CONFIG_INTEL_IDXD) += idxd.o
+ idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o
+
+ idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
+
+-obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
+-idxd_bus-y := bus.o
+-
+ obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
+ idxd_compat-y := compat.o
+diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
+index 1b046d9a3a269..16d342654da2b 100644
+--- a/drivers/dma/pxa_dma.c
++++ b/drivers/dma/pxa_dma.c
+@@ -722,7 +722,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd)
+ dma_addr_t dma;
+ struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
+
+- BUG_ON(sw_desc->nb_desc == 0);
+ for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
+ if (i > 0)
+ dma = sw_desc->hw_desc[i - 1]->ddadr;
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index bae08b3f55c73..f414efdbd809e 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -489,7 +489,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
+ src_maxburst = chan->dma_config.src_maxburst;
+ dst_maxburst = chan->dma_config.dst_maxburst;
+
+- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
++ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+ ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+
+@@ -965,7 +965,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+ if (!desc)
+ return NULL;
+
+- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
++ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+ ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+ cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index aa8e2e8ac2609..33d6d931b33bb 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2401,7 +2401,7 @@ static int edma_probe(struct platform_device *pdev)
+ if (irq < 0 && node)
+ irq = irq_of_parse_and_map(node, 0);
+
+- if (irq >= 0) {
++ if (irq > 0) {
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
+ dev_name(dev));
+ ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
+@@ -2417,7 +2417,7 @@ static int edma_probe(struct platform_device *pdev)
+ if (irq < 0 && node)
+ irq = irq_of_parse_and_map(node, 2);
+
+- if (irq >= 0) {
++ if (irq > 0) {
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
+ dev_name(dev));
+ ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index aa597cda0d887..2828e9573e90b 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -717,14 +717,11 @@ static void create_units(struct fw_device *device)
+ fw_unit_attributes,
+ &unit->attribute_group);
+
+- if (device_register(&unit->device) < 0)
+- goto skip_unit;
+-
+ fw_device_get(device);
+- continue;
+-
+- skip_unit:
+- kfree(unit);
++ if (device_register(&unit->device) < 0) {
++ put_device(&unit->device);
++ continue;
++ }
+ }
+ }
+
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index 7edf2c95282fa..e779d866022b9 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+ sdev->use_10_for_rw = 1;
+
+ if (sbp2_param_exclusive_login) {
+- sdev->manage_system_start_stop = true;
+- sdev->manage_runtime_start_stop = true;
+- sdev->manage_shutdown = true;
++ sdev->manage_system_start_stop = 1;
++ sdev->manage_runtime_start_stop = 1;
++ sdev->manage_shutdown = 1;
+ }
+
+ if (sdev->type == TYPE_ROM)
+diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
+index 2b8bfcd010f5f..7865438b36960 100644
+--- a/drivers/firmware/arm_ffa/bus.c
++++ b/drivers/firmware/arm_ffa/bus.c
+@@ -193,6 +193,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ dev->release = ffa_release_device;
+ dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
+
++ ffa_dev->id = id;
+ ffa_dev->vm_id = vm_id;
+ ffa_dev->ops = ops;
+ uuid_copy(&ffa_dev->uuid, uuid);
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 121f4fc903cd5..7cd6b1564e801 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -587,17 +587,9 @@ static int ffa_partition_info_get(const char *uuid_str,
+ return 0;
+ }
+
+-static void _ffa_mode_32bit_set(struct ffa_device *dev)
+-{
+- dev->mode_32bit = true;
+-}
+-
+ static void ffa_mode_32bit_set(struct ffa_device *dev)
+ {
+- if (drv_info->version > FFA_VERSION_1_0)
+- return;
+-
+- _ffa_mode_32bit_set(dev);
++ dev->mode_32bit = true;
+ }
+
+ static int ffa_sync_send_receive(struct ffa_device *dev,
+@@ -706,7 +698,7 @@ static void ffa_setup_partitions(void)
+
+ if (drv_info->version > FFA_VERSION_1_0 &&
+ !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
+- _ffa_mode_32bit_set(ffa_dev);
++ ffa_mode_32bit_set(ffa_dev);
+ }
+ kfree(pbuf);
+ }
+diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
+index 135278ddaf627..79fb687bb90f9 100644
+--- a/drivers/firmware/efi/unaccepted_memory.c
++++ b/drivers/firmware/efi/unaccepted_memory.c
+@@ -100,7 +100,7 @@ retry:
+ * overlap on physical address level.
+ */
+ list_for_each_entry(entry, &accepting_list, list) {
+- if (entry->end < range.start)
++ if (entry->end <= range.start)
+ continue;
+ if (entry->start >= range.end)
+ continue;
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index 06fe8aca870d7..69831f1d91e3f 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -167,6 +167,12 @@ static enum qcom_scm_convention __get_convention(void)
+ if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
+ return qcom_scm_convention;
+
++ /*
++ * Per the "SMC calling convention specification", the 64-bit calling
++ * convention can only be used when the client is 64-bit, otherwise
++ * system will encounter the undefined behaviour.
++ */
++#if IS_ENABLED(CONFIG_ARM64)
+ /*
+ * Device isn't required as there is only one argument - no device
+ * needed to dma_map_single to secure world
+@@ -187,6 +193,7 @@ static enum qcom_scm_convention __get_convention(void)
+ forced = true;
+ goto found;
+ }
++#endif
+
+ probed_convention = SMC_CONVENTION_ARM_32;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
+index 51d062e0c3f12..c1590d3aa9cb7 100644
+--- a/drivers/firmware/tegra/bpmp.c
++++ b/drivers/firmware/tegra/bpmp.c
+@@ -313,6 +313,8 @@ static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+ }
+
++static int __maybe_unused tegra_bpmp_resume(struct device *dev);
++
+ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+ {
+@@ -325,6 +327,14 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
++ if (bpmp->suspended) {
++ /* Reset BPMP IPC channels during resume based on flags passed */
++ if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
++ tegra_bpmp_resume(bpmp->dev);
++ else
++ return -EAGAIN;
++ }
++
+ channel = bpmp->tx_channel;
+
+ spin_lock(&bpmp->atomic_tx_lock);
+@@ -364,6 +374,14 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
++ if (bpmp->suspended) {
++ /* Reset BPMP IPC channels during resume based on flags passed */
++ if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
++ tegra_bpmp_resume(bpmp->dev);
++ else
++ return -EAGAIN;
++ }
++
+ channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+ msg->tx.size);
+ if (IS_ERR(channel))
+@@ -796,10 +814,21 @@ deinit:
+ return err;
+ }
+
++static int __maybe_unused tegra_bpmp_suspend(struct device *dev)
++{
++ struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
++
++ bpmp->suspended = true;
++
++ return 0;
++}
++
+ static int __maybe_unused tegra_bpmp_resume(struct device *dev)
+ {
+ struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
+
++ bpmp->suspended = false;
++
+ if (bpmp->soc->ops->resume)
+ return bpmp->soc->ops->resume(bpmp);
+ else
+@@ -807,6 +836,7 @@ static int __maybe_unused tegra_bpmp_resume(struct device *dev)
+ }
+
+ static const struct dev_pm_ops tegra_bpmp_pm_ops = {
++ .suspend_noirq = tegra_bpmp_suspend,
+ .resume_noirq = tegra_bpmp_resume,
+ };
+
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index 26a37f47f4ca5..66c3846c91476 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -190,19 +190,6 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ return 0;
+ }
+
+-/**
+- * ti_sci_debugfs_destroy() - clean up log debug file
+- * @pdev: platform device pointer
+- * @info: Pointer to SCI entity information
+- */
+-static void ti_sci_debugfs_destroy(struct platform_device *pdev,
+- struct ti_sci_info *info)
+-{
+- if (IS_ERR(info->debug_region))
+- return;
+-
+- debugfs_remove(info->d);
+-}
+ #else /* CONFIG_DEBUG_FS */
+ static inline int ti_sci_debugfs_create(struct platform_device *dev,
+ struct ti_sci_info *info)
+@@ -3449,43 +3436,12 @@ out:
+ return ret;
+ }
+
+-static int ti_sci_remove(struct platform_device *pdev)
+-{
+- struct ti_sci_info *info;
+- struct device *dev = &pdev->dev;
+- int ret = 0;
+-
+- of_platform_depopulate(dev);
+-
+- info = platform_get_drvdata(pdev);
+-
+- if (info->nb.notifier_call)
+- unregister_restart_handler(&info->nb);
+-
+- mutex_lock(&ti_sci_list_mutex);
+- if (info->users)
+- ret = -EBUSY;
+- else
+- list_del(&info->node);
+- mutex_unlock(&ti_sci_list_mutex);
+-
+- if (!ret) {
+- ti_sci_debugfs_destroy(pdev, info);
+-
+- /* Safe to free channels since no more users */
+- mbox_free_channel(info->chan_tx);
+- mbox_free_channel(info->chan_rx);
+- }
+-
+- return ret;
+-}
+-
+ static struct platform_driver ti_sci_driver = {
+ .probe = ti_sci_probe,
+- .remove = ti_sci_remove,
+ .driver = {
+ .name = "ti-sci",
+ .of_match_table = of_match_ptr(ti_sci_of_match),
++ .suppress_bind_attrs = true,
+ },
+ };
+ module_platform_driver(ti_sci_driver);
+diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
+index 44bf1709a6488..a8e5ac95cf170 100644
+--- a/drivers/gpio/gpio-sim.c
++++ b/drivers/gpio/gpio-sim.c
+@@ -1438,10 +1438,10 @@ static const struct config_item_type gpio_sim_device_config_group_type = {
+ static struct config_group *
+ gpio_sim_config_make_device_group(struct config_group *group, const char *name)
+ {
+- struct gpio_sim_device *dev __free(kfree) = NULL;
+ int id;
+
+- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ struct gpio_sim_device *dev __free(kfree) = kzalloc(sizeof(*dev),
++ GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 51e41676de0b8..5d04720107ef5 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -1655,6 +1655,26 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
++ {
++ /*
++ * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
++ * a "dolby" button. At the ACPI level an _AEI event-handler
++ * is connected which sets an ACPI variable to 1 on both
++ * edges. This variable can be polled + cleared to 0 using
++ * WMI. But since the variable is set on both edges the WMI
++ * interface is pretty useless even when polling.
++ * So instead the x86-android-tablets code instantiates
++ * a gpio-keys platform device for it.
++ * Ignore the _AEI handler for the pin, so that it is not busy.
++ */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
++ },
++ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++ .ignore_interrupt = "INT33FC:00@3",
++ },
++ },
+ {} /* Terminating entry */
+ };
+
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 531faabead0f4..d9525d95e818d 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -512,6 +512,10 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
+ #if IS_ENABLED(CONFIG_SND_SOC_CS42L56)
+ { "reset", "cirrus,gpio-nreset", "cirrus,cs42l56" },
+ #endif
++#if IS_ENABLED(CONFIG_SND_SOC_MT2701_CS42448)
++ { "i2s1-in-sel-gpio1", NULL, "mediatek,mt2701-cs42448-machine" },
++ { "i2s1-in-sel-gpio2", NULL, "mediatek,mt2701-cs42448-machine" },
++#endif
+ #if IS_ENABLED(CONFIG_SND_SOC_TLV320AIC3X)
+ { "reset", "gpio-reset", "ti,tlv320aic3x" },
+ { "reset", "gpio-reset", "ti,tlv320aic33" },
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index 38ccec913f009..f3a09ecb76992 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -29,6 +29,7 @@
+ #include "amdgpu.h"
+ #include "atom.h"
+
++#include <linux/device.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/acpi.h>
+@@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
++ /* ATRM is for on-platform devices only */
++ if (dev_is_removable(&adev->pdev->dev))
++ return false;
++
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index b6298e901cbd4..9a53ca555e708 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -183,6 +183,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ }
+
+ rcu_read_unlock();
++ *result = NULL;
+ return -ENOENT;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index d93a8961274c6..f4fd0d5bd9b68 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1411,7 +1411,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ if (r == -ENOMEM)
+ DRM_ERROR("Not enough memory for command submission!\n");
+ else if (r != -ERESTARTSYS && r != -EAGAIN)
+- DRM_ERROR("Failed to process the buffer list %d!\n", r);
++ DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ goto error_fini;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index a4faea4fa0b59..05405da51e7a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -748,6 +748,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ ssize_t result = 0;
+ int r;
+
++ if (!adev->smc_rreg)
++ return -EPERM;
++
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+@@ -804,6 +807,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
+ ssize_t result = 0;
+ int r;
+
++ if (!adev->smc_wreg)
++ return -EPERM;
++
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 2b8356699f235..a164857bdb9f4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -43,6 +43,7 @@
+ #include <drm/drm_fb_helper.h>
+ #include <drm/drm_probe_helper.h>
+ #include <drm/amdgpu_drm.h>
++#include <linux/device.h>
+ #include <linux/vgaarb.h>
+ #include <linux/vga_switcheroo.h>
+ #include <linux/efi.h>
+@@ -2018,7 +2019,6 @@ out:
+ */
+ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ {
+- struct drm_device *dev = adev_to_drm(adev);
+ struct pci_dev *parent;
+ int i, r;
+ bool total;
+@@ -2089,7 +2089,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
+ ((adev->flags & AMD_IS_APU) == 0) &&
+- !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
++ !dev_is_removable(&adev->pdev->dev))
+ adev->flags |= AMD_IS_PX;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+@@ -2103,6 +2103,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+ adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
++ if (!amdgpu_device_pcie_dynamic_switching_supported())
++ adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+
+ total = true;
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+@@ -3901,7 +3903,7 @@ fence_driver_init:
+
+ px = amdgpu_device_supports_px(ddev);
+
+- if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++ if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ apple_gmux_detect(NULL, NULL)))
+ vga_switcheroo_register_client(adev->pdev,
+ &amdgpu_switcheroo_ops, px);
+@@ -4046,7 +4048,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+
+ px = amdgpu_device_supports_px(adev_to_drm(adev));
+
+- if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++ if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ apple_gmux_detect(NULL, NULL)))
+ vga_switcheroo_unregister_client(adev->pdev);
+
+@@ -5183,7 +5185,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+- if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
++ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
++ amdgpu_ras_get_context(adev)->reboot) {
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 7d5e7ad28ba82..68a901287264f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -93,6 +93,7 @@
+ MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+
+ #define mmRCC_CONFIG_MEMSIZE 0xde3
++#define mmMP0_SMN_C2PMSG_33 0x16061
+ #define mmMM_INDEX 0x0
+ #define mmMM_INDEX_HI 0x6
+ #define mmMM_DATA 0x1
+@@ -231,8 +232,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
+ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ uint8_t *binary)
+ {
+- uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+- int ret = 0;
++ uint64_t vram_size;
++ u32 msg;
++ int i, ret = 0;
++
++ /* It can take up to a second for IFWI init to complete on some dGPUs,
++ * but generally it should be in the 60-100ms range. Normally this starts
++ * as soon as the device gets power so by the time the OS loads this has long
++ * completed. However, when a card is hotplugged via e.g., USB4, we need to
++ * wait for this to complete. Once the C2PMSG is updated, we can
++ * continue.
++ */
++ if (dev_is_removable(&adev->pdev->dev)) {
++ for (i = 0; i < 1000; i++) {
++ msg = RREG32(mmMP0_SMN_C2PMSG_33);
++ if (msg & 0x80000000)
++ break;
++ msleep(1);
++ }
++ }
++ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+
+ if (vram_size) {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 363e6a2cad8c2..578aeba49ea8e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -340,14 +340,11 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+ adev->have_disp_power_ref = true;
+ return ret;
+ }
+- /* if we have no active crtcs, then drop the power ref
+- * we got before
++ /* if we have no active crtcs, then go to
++ * drop the power ref we got before
+ */
+- if (!active && adev->have_disp_power_ref) {
+- pm_runtime_put_autosuspend(dev->dev);
++ if (!active && adev->have_disp_power_ref)
+ adev->have_disp_power_ref = false;
+- }
+-
+ out:
+ /* drop the power reference we got coming in here */
+ pm_runtime_put_autosuspend(dev->dev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 81edf66dbea8b..2c35036e4ba25 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2195,6 +2195,8 @@ retry_init:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
++ pci_wake_from_d3(pdev, TRUE);
++
+ /*
+ * For runpm implemented via BACO, PMFW will handle the
+ * timing for BACO in and out:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 2382921710ece..ef4cb921781d7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -384,9 +384,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring = &kiq->ring;
+ u32 domain = AMDGPU_GEM_DOMAIN_GTT;
+
++#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
+ /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
+ domain |= AMDGPU_GEM_DOMAIN_VRAM;
++#endif
+
+ /* create MQD for KIQ */
+ if (!adev->enable_mes_kiq && !ring->mqd_obj) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index 6c6184f0dbc17..508f02eb0cf8f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -28,7 +28,7 @@
+ #define AMDGPU_IH_MAX_NUM_IVS 32
+
+ #define IH_RING_SIZE (256 * 1024)
+-#define IH_SW_RING_SIZE (8 * 1024) /* enough for 256 CAM entries */
++#define IH_SW_RING_SIZE (16 * 1024) /* enough for 512 CAM entries */
+
+ struct amdgpu_device;
+ struct amdgpu_iv_entry;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index b6015157763af..6aa75052309ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -556,8 +556,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
+ mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
+ mqd_prop.hqd_active = false;
+
++ if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
++ p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
++ mutex_lock(&adev->srbm_mutex);
++ amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
++ }
++
+ mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
+
++ if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
++ p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
++ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
++ mutex_unlock(&adev->srbm_mutex);
++ }
++
+ amdgpu_bo_unreserve(q->mqd_obj);
+ }
+
+@@ -993,9 +1005,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
+ switch (queue_type) {
+ case AMDGPU_RING_TYPE_GFX:
+ ring->funcs = adev->gfx.gfx_ring[0].funcs;
++ ring->me = adev->gfx.gfx_ring[0].me;
++ ring->pipe = adev->gfx.gfx_ring[0].pipe;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ ring->funcs = adev->gfx.compute_ring[0].funcs;
++ ring->me = adev->gfx.compute_ring[0].me;
++ ring->pipe = adev->gfx.compute_ring[0].pipe;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ ring->funcs = adev->sdma.instance[0].ring.funcs;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 163445baa4fc8..6f6341f702789 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -1373,7 +1373,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
+ {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+- sysfs_remove_file_from_group(&adev->dev->kobj,
++ if (adev->dev->kobj.sd)
++ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &con->badpages_attr.attr,
+ RAS_FS_NAME);
+ }
+@@ -1390,7 +1391,8 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
+ .attrs = attrs,
+ };
+
+- sysfs_remove_group(&adev->dev->kobj, &group);
++ if (adev->dev->kobj.sd)
++ sysfs_remove_group(&adev->dev->kobj, &group);
+
+ return 0;
+ }
+@@ -1437,7 +1439,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ if (!obj || !obj->attr_inuse)
+ return -EINVAL;
+
+- sysfs_remove_file_from_group(&adev->dev->kobj,
++ if (adev->dev->kobj.sd)
++ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ RAS_FS_NAME);
+ obj->attr_inuse = 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+index 595d5e535aca6..9d82701d365bb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+@@ -214,6 +214,12 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ control->i2c_address = EEPROM_I2C_MADDR_0;
+ return true;
+ case IP_VERSION(13, 0, 0):
++ if (strnstr(atom_ctx->vbios_pn, "D707",
++ sizeof(atom_ctx->vbios_pn)))
++ control->i2c_address = EEPROM_I2C_MADDR_0;
++ else
++ control->i2c_address = EEPROM_I2C_MADDR_4;
++ return true;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 10):
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 36b55d2bd51a9..03b4bcfca1963 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -292,8 +292,15 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+ void *ptr;
+ int i, idx;
+
++ bool in_ras_intr = amdgpu_ras_intr_triggered();
++
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
++ /* err_event_athub will corrupt VCPU buffer, so we need to
++ * restore fw data and clear buffer in amdgpu_vcn_resume() */
++ if (in_ras_intr)
++ return 0;
++
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+index 7148a216ae2fe..db6fc0cb18eb8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+@@ -239,6 +239,8 @@ static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
+
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
++ if (!mode)
++ continue;
+ drm_mode_probed_add(connector, mode);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 82f25996ff5ef..89c8e51cd3323 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1095,8 +1095,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
+ bo = gem_to_amdgpu_bo(gobj);
+ }
+ mem = bo->tbo.resource;
+- if (mem->mem_type == TTM_PL_TT ||
+- mem->mem_type == AMDGPU_PL_PREEMPT)
++ if (mem && (mem->mem_type == TTM_PL_TT ||
++ mem->mem_type == AMDGPU_PL_PREEMPT))
+ pages_addr = bo->tbo.ttm->dma_address;
+ }
+
+@@ -2125,7 +2125,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
+ * Returns:
+ * 0 for success, error for failure.
+ */
+-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
++int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
++ int32_t xcp_id)
+ {
+ struct amdgpu_bo *root_bo;
+ struct amdgpu_bo_vm *root;
+@@ -2144,6 +2145,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
+ INIT_LIST_HEAD(&vm->done);
+ INIT_LIST_HEAD(&vm->pt_freed);
+ INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
++ INIT_KFIFO(vm->faults);
+
+ r = amdgpu_vm_init_entities(adev, vm);
+ if (r)
+@@ -2178,34 +2180,33 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
+ false, &root, xcp_id);
+ if (r)
+ goto error_free_delayed;
+- root_bo = &root->bo;
++
++ root_bo = amdgpu_bo_ref(&root->bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+- if (r)
+- goto error_free_root;
++ if (r) {
++ amdgpu_bo_unref(&root->shadow);
++ amdgpu_bo_unref(&root_bo);
++ goto error_free_delayed;
++ }
+
++ amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
+ r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
+ if (r)
+- goto error_unreserve;
+-
+- amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
++ goto error_free_root;
+
+ r = amdgpu_vm_pt_clear(adev, vm, root, false);
+ if (r)
+- goto error_unreserve;
++ goto error_free_root;
+
+ amdgpu_bo_unreserve(vm->root.bo);
+-
+- INIT_KFIFO(vm->faults);
++ amdgpu_bo_unref(&root_bo);
+
+ return 0;
+
+-error_unreserve:
+- amdgpu_bo_unreserve(vm->root.bo);
+-
+ error_free_root:
+- amdgpu_bo_unref(&root->shadow);
++ amdgpu_vm_pt_free_root(adev, vm);
++ amdgpu_bo_unreserve(vm->root.bo);
+ amdgpu_bo_unref(&root_bo);
+- vm->root.bo = NULL;
+
+ error_free_delayed:
+ dma_fence_put(vm->last_tlb_flush);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 9032d7a24d7cd..306252cd67fd7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -6457,11 +6457,11 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+- memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ } else {
+ /* restore mqd with the backup copy */
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+- memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
+ /* reset the ring */
+ ring->wptr = 0;
+ *ring->wptr_cpu_addr = 0;
+@@ -6735,7 +6735,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
+ /* reset MQD to a clean status */
+ if (adev->gfx.kiq[0].mqd_backup)
+- memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
+
+ /* reset ring buffer */
+ ring->wptr = 0;
+@@ -6758,7 +6758,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.kiq[0].mqd_backup)
+- memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
+ }
+
+ return 0;
+@@ -6779,11 +6779,11 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ } else {
+ /* restore MQD to a clean status */
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ /* reset ring buffer */
+ ring->wptr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 762d7a19f1be1..b346eb0a0db11 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -83,6 +83,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
+
++static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
++ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
++};
++
+ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
+@@ -275,6 +279,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
+ default:
+ break;
+ }
++ soc15_program_register_sequence(adev,
++ golden_settings_gc_11_0,
++ (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
++
+ }
+
+ static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
+@@ -390,7 +398,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
+
+- r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
+@@ -3684,11 +3692,11 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+- memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ } else {
+ /* restore mqd with the backup copy */
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+- memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
+ /* reset the ring */
+ ring->wptr = 0;
+ *ring->wptr_cpu_addr = 0;
+@@ -3977,7 +3985,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
+ /* reset MQD to a clean status */
+ if (adev->gfx.kiq[0].mqd_backup)
+- memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
+
+ /* reset ring buffer */
+ ring->wptr = 0;
+@@ -4000,7 +4008,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.kiq[0].mqd_backup)
+- memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
+ }
+
+ return 0;
+@@ -4021,11 +4029,11 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ } else {
+ /* restore MQD to a clean status */
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ /* reset ring buffer */
+ ring->wptr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 885ebd703260f..1943beb135c4c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -883,8 +883,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+- r = amdgpu_ib_get(adev, NULL, 16,
+- AMDGPU_IB_POOL_DIRECT, &ib);
++
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err1;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index fd61574a737cb..2e23d08b45f4a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1039,8 +1039,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+- r = amdgpu_ib_get(adev, NULL, 16,
+- AMDGPU_IB_POOL_DIRECT, &ib);
++
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err1;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+index 18ce5fe45f6f8..e481ef73af6e5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+@@ -296,8 +296,8 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+- r = amdgpu_ib_get(adev, NULL, 16,
+- AMDGPU_IB_POOL_DIRECT, &ib);
++
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err1;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+index 4038455d79984..ef368ca79a668 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+@@ -28,6 +28,7 @@
+ #include "nbio/nbio_2_3_offset.h"
+ #include "nbio/nbio_2_3_sh_mask.h"
+ #include <uapi/linux/kfd_ioctl.h>
++#include <linux/device.h>
+ #include <linux/pci.h>
+
+ #define smnPCIE_CONFIG_CNTL 0x11180044
+@@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
+
+ data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+
+- if (pci_is_thunderbolt_attached(adev->pdev))
++ if (dev_is_removable(&adev->pdev->dev))
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ else
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+@@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+- if (pci_is_thunderbolt_attached(adev->pdev))
++ if (dev_is_removable(&adev->pdev->dev))
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ else
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+index 469eed084976c..52d80f286b3dd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+@@ -59,6 +59,9 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
+ /* Read USB-PD from LFB */
+ #define GFX_CMD_USB_PD_USE_LFB 0x480
+
++/* Retry times for vmbx ready wait */
++#define PSP_VMBX_POLLING_LIMIT 20000
++
+ /* VBIOS gfl defines */
+ #define MBOX_READY_MASK 0x80000000
+ #define MBOX_STATUS_MASK 0x0000FFFF
+@@ -138,7 +141,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
+ struct amdgpu_device *adev = psp->adev;
+ int retry_loop, ret;
+
+- for (retry_loop = 0; retry_loop < 70; retry_loop++) {
++ for (retry_loop = 0; retry_loop < PSP_VMBX_POLLING_LIMIT; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_33 set to 1 */
+ ret = psp_wait_for(
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+index c7991e07b6be5..a7697ec8188e0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+@@ -268,7 +268,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ SE_ID),
+@@ -284,7 +284,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+- pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
++ pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+@@ -310,7 +310,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
+ ERR_TYPE);
+- pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
++ pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+index f933bd231fb9c..2a65792fd1162 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+@@ -150,7 +150,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
+
+ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+ {
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
+@@ -165,7 +165,7 @@ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+
+ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+ {
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
+@@ -177,7 +177,7 @@ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+
+ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
+ {
+- pr_warn(
++ pr_warn_ratelimited(
+ "sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index 830396b1c3b14..27cdaea405017 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -333,7 +333,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
+@@ -347,7 +347,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+- pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
++ pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+@@ -366,7 +366,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
+- pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
++ pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index bb16b795d1bc2..63ce30ea68915 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -495,11 +495,11 @@ svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
+
+ /* We need a new svm_bo. Spin-loop to wait for concurrent
+ * svm_range_bo_release to finish removing this range from
+- * its range list. After this, it is safe to reuse the
+- * svm_bo pointer and svm_bo_list head.
++ * its range list and set prange->svm_bo to null. After this,
++ * it is safe to reuse the svm_bo pointer and svm_bo_list head.
+ */
+- while (!list_empty_careful(&prange->svm_bo_list))
+- ;
++ while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
++ cond_resched();
+
+ return false;
+ }
+@@ -628,8 +628,15 @@ create_bo_failed:
+
+ void svm_range_vram_node_free(struct svm_range *prange)
+ {
+- svm_range_bo_unref(prange->svm_bo);
+- prange->ttm_res = NULL;
++ /* serialize prange->svm_bo unref */
++ mutex_lock(&prange->lock);
++ /* prange->svm_bo has not been unref */
++ if (prange->ttm_res) {
++ prange->ttm_res = NULL;
++ mutex_unlock(&prange->lock);
++ svm_range_bo_unref(prange->svm_bo);
++ } else
++ mutex_unlock(&prange->lock);
+ }
+
+ struct kfd_node *
+@@ -760,7 +767,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
+ prange->flags &= ~attrs[i].value;
+ break;
+ case KFD_IOCTL_SVM_ATTR_GRANULARITY:
+- prange->granularity = attrs[i].value;
++ prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
+ break;
+ default:
+ WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
+@@ -820,7 +827,7 @@ svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
+ }
+ }
+
+- return !prange->is_error_flag;
++ return true;
+ }
+
+ /**
+@@ -1662,73 +1669,66 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
+
+ start = prange->start << PAGE_SHIFT;
+ end = (prange->last + 1) << PAGE_SHIFT;
+- for (addr = start; addr < end && !r; ) {
++ for (addr = start; !r && addr < end; ) {
+ struct hmm_range *hmm_range;
+ struct vm_area_struct *vma;
+- unsigned long next;
++ unsigned long next = 0;
+ unsigned long offset;
+ unsigned long npages;
+ bool readonly;
+
+ vma = vma_lookup(mm, addr);
+- if (!vma) {
++ if (vma) {
++ readonly = !(vma->vm_flags & VM_WRITE);
++
++ next = min(vma->vm_end, end);
++ npages = (next - addr) >> PAGE_SHIFT;
++ WRITE_ONCE(p->svms.faulting_task, current);
++ r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
++ readonly, owner, NULL,
++ &hmm_range);
++ WRITE_ONCE(p->svms.faulting_task, NULL);
++ if (r) {
++ pr_debug("failed %d to get svm range pages\n", r);
++ if (r == -EBUSY)
++ r = -EAGAIN;
++ }
++ } else {
+ r = -EFAULT;
+- goto unreserve_out;
+- }
+- readonly = !(vma->vm_flags & VM_WRITE);
+-
+- next = min(vma->vm_end, end);
+- npages = (next - addr) >> PAGE_SHIFT;
+- WRITE_ONCE(p->svms.faulting_task, current);
+- r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+- readonly, owner, NULL,
+- &hmm_range);
+- WRITE_ONCE(p->svms.faulting_task, NULL);
+- if (r) {
+- pr_debug("failed %d to get svm range pages\n", r);
+- if (r == -EBUSY)
+- r = -EAGAIN;
+- goto unreserve_out;
+ }
+
+- offset = (addr - start) >> PAGE_SHIFT;
+- r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
+- hmm_range->hmm_pfns);
+- if (r) {
+- pr_debug("failed %d to dma map range\n", r);
+- goto unreserve_out;
++ if (!r) {
++ offset = (addr - start) >> PAGE_SHIFT;
++ r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
++ hmm_range->hmm_pfns);
++ if (r)
++ pr_debug("failed %d to dma map range\n", r);
+ }
+
+ svm_range_lock(prange);
+- if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
++ if (!r && amdgpu_hmm_range_get_pages_done(hmm_range)) {
+ pr_debug("hmm update the range, need validate again\n");
+ r = -EAGAIN;
+- goto unlock_out;
+ }
+- if (!list_empty(&prange->child_list)) {
++
++ if (!r && !list_empty(&prange->child_list)) {
+ pr_debug("range split by unmap in parallel, validate again\n");
+ r = -EAGAIN;
+- goto unlock_out;
+ }
+
+- r = svm_range_map_to_gpus(prange, offset, npages, readonly,
+- ctx->bitmap, wait, flush_tlb);
++ if (!r)
++ r = svm_range_map_to_gpus(prange, offset, npages, readonly,
++ ctx->bitmap, wait, flush_tlb);
++
++ if (!r && next == end)
++ prange->mapped_to_gpu = true;
+
+-unlock_out:
+ svm_range_unlock(prange);
+
+ addr = next;
+ }
+
+- if (addr == end) {
+- prange->validated_once = true;
+- prange->mapped_to_gpu = true;
+- }
+-
+-unreserve_out:
+ svm_range_unreserve_bos(ctx);
+-
+- prange->is_error_flag = !!r;
+ if (!r)
+ prange->validate_timestamp = ktime_get_boottime();
+
+@@ -2097,7 +2097,8 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
+ next = interval_tree_iter_next(node, start, last);
+ next_start = min(node->last, last) + 1;
+
+- if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
++ if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
++ prange->mapped_to_gpu) {
+ /* nothing to do */
+ } else if (node->start < start || node->last > last) {
+ /* node intersects the update range and its attributes
+@@ -3507,7 +3508,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
+ struct svm_range *next;
+ bool update_mapping = false;
+ bool flush_tlb;
+- int r = 0;
++ int r, ret = 0;
+
+ pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
+ p->pasid, &p->svms, start, start + size - 1, size);
+@@ -3595,7 +3596,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
+ out_unlock_range:
+ mutex_unlock(&prange->migrate_mutex);
+ if (r)
+- break;
++ ret = r;
+ }
+
+ dynamic_svm_range_dump(svms);
+@@ -3608,7 +3609,7 @@ out:
+ pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
+ &p->svms, start, start + size - 1, r);
+
+- return r;
++ return ret ? ret : r;
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+index 9e668eeefb32d..25f7119057386 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+@@ -132,9 +132,7 @@ struct svm_range {
+ struct list_head child_list;
+ DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
+ DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
+- bool validated_once;
+ bool mapped_to_gpu;
+- bool is_error_flag;
+ };
+
+ static inline void svm_range_lock(struct svm_range *prange)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 868946dd7ef12..f5fdb61c821d0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1692,8 +1692,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ dce_version_to_string(adev->dm.dc->ctx->dce_version));
+ } else {
+- DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER,
+- dce_version_to_string(adev->dm.dc->ctx->dce_version));
++ DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ goto error;
+ }
+
+@@ -2085,7 +2084,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ struct dmub_srv_create_params create_params;
+ struct dmub_srv_region_params region_params;
+ struct dmub_srv_region_info region_info;
+- struct dmub_srv_fb_params fb_params;
++ struct dmub_srv_memory_params memory_params;
+ struct dmub_srv_fb_info *fb_info;
+ struct dmub_srv *dmub_srv;
+ const struct dmcub_firmware_header_v1_0 *hdr;
+@@ -2185,6 +2184,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
++ region_params.is_mailbox_in_inbox = false;
+
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ &region_info);
+@@ -2208,10 +2208,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ return r;
+
+ /* Rebase the regions on the framebuffer address. */
+- memset(&fb_params, 0, sizeof(fb_params));
+- fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
+- fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
+- fb_params.region_info = &region_info;
++ memset(&memory_params, 0, sizeof(memory_params));
++ memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
++ memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
++ memory_params.region_info = &region_info;
+
+ adev->dm.dmub_fb_info =
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+@@ -2223,7 +2223,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ return -ENOMEM;
+ }
+
+- status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
++ status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ return -EINVAL;
+@@ -6236,7 +6236,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+- dm_new_state->abm_level = val;
++ dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
+ ret = 0;
+ }
+
+@@ -6281,7 +6281,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+- *val = dm_state->abm_level;
++ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
++ dm_state->abm_level : 0;
+ ret = 0;
+ }
+
+@@ -6354,7 +6355,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+ state->pbn = 0;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+- state->abm_level = amdgpu_dm_abm_level;
++ state->abm_level = amdgpu_dm_abm_level ?:
++ ABM_LEVEL_IMMEDIATE_DISABLE;
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+ }
+@@ -7431,6 +7433,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ int i;
+ int result = -EIO;
+
++ if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
++ return result;
++
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+@@ -9539,14 +9544,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
+ struct drm_plane *other;
+ struct drm_plane_state *old_other_state, *new_other_state;
+ struct drm_crtc_state *new_crtc_state;
++ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ int i;
+
+ /*
+- * TODO: Remove this hack once the checks below are sufficient
+- * enough to determine when we need to reset all the planes on
+- * the stream.
++ * TODO: Remove this hack for all asics once it proves that the
++ * fast updates works fine on DCN3.2+.
+ */
+- if (state->allow_modeset)
++ if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
+ return true;
+
+ /* Exit early if we know that we're adding or removing the plane. */
+@@ -9892,16 +9897,27 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ }
+ }
+
++static void
++dm_get_plane_scale(struct drm_plane_state *plane_state,
++ int *out_plane_scale_w, int *out_plane_scale_h)
++{
++ int plane_src_w, plane_src_h;
++
++ dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
++ *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
++ *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
++}
++
+ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
+ {
+- struct drm_plane *cursor = crtc->cursor, *underlying;
++ struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
++ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ int i;
+ int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+- int cursor_src_w, cursor_src_h;
+- int underlying_src_w, underlying_src_h;
++ bool any_relevant_change = false;
+
+ /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
+ * cursor per pipe but it's going to inherit the scaling and
+@@ -9909,13 +9925,50 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ * blending properties match the underlying planes'.
+ */
+
+- new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
+- if (!new_cursor_state || !new_cursor_state->fb)
++ /* If no plane was enabled or changed scaling, no need to check again */
++ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
++ int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
++
++ if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
++ continue;
++
++ if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
++ any_relevant_change = true;
++ break;
++ }
++
++ if (new_plane_state->fb == old_plane_state->fb &&
++ new_plane_state->crtc_w == old_plane_state->crtc_w &&
++ new_plane_state->crtc_h == old_plane_state->crtc_h)
++ continue;
++
++ dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
++ dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
++
++ if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
++ any_relevant_change = true;
++ break;
++ }
++ }
++
++ if (!any_relevant_change)
+ return 0;
+
+- dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
+- cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
+- cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
++ new_cursor_state = drm_atomic_get_plane_state(state, cursor);
++ if (IS_ERR(new_cursor_state))
++ return PTR_ERR(new_cursor_state);
++
++ if (!new_cursor_state->fb)
++ return 0;
++
++ dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
++
++ /* Need to check all enabled planes, even if this commit doesn't change
++ * their state
++ */
++ i = drm_atomic_add_affected_planes(state, crtc);
++ if (i)
++ return i;
+
+ for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ /* Narrow down to non-cursor planes on the same CRTC as the cursor */
+@@ -9926,10 +9979,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ if (!new_underlying_state->fb)
+ continue;
+
+- dm_get_oriented_plane_size(new_underlying_state,
+- &underlying_src_w, &underlying_src_h);
+- underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
+- underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
++ dm_get_plane_scale(new_underlying_state,
++ &underlying_scale_w, &underlying_scale_h);
+
+ if (cursor_scale_w != underlying_scale_w ||
+ cursor_scale_h != underlying_scale_h) {
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 57230661132bd..28f5eb9ecbd3e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1598,31 +1598,31 @@ enum dc_status dm_dp_mst_is_port_support_mode(
+ unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
+ unsigned int max_compressed_bw_in_kbps = 0;
+ struct dc_dsc_bw_range bw_range = {0};
+- struct drm_dp_mst_topology_mgr *mst_mgr;
++ uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
+
+ /*
+- * check if the mode could be supported if DSC pass-through is supported
+- * AND check if there enough bandwidth available to support the mode
+- * with DSC enabled.
++ * Consider the case with the depth of the mst topology tree is equal or less than 2
++ * A. When dsc bitstream can be transmitted along the entire path
++ * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
++ * 2. dsc passthrough supported at MST branch, or
++ * 3. dsc decoding supported at leaf MST device
++ * Use maximum dsc compression as bw constraint
++ * B. When dsc bitstream cannot be transmitted along the entire path
++ * Use native bw as bw constraint
+ */
+ if (is_dsc_common_config_possible(stream, &bw_range) &&
+- aconnector->mst_output_port->passthrough_aux) {
+- mst_mgr = aconnector->mst_output_port->mgr;
+- mutex_lock(&mst_mgr->lock);
+-
++ (aconnector->mst_output_port->passthrough_aux ||
++ aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
+ cur_link_settings = stream->link->verified_link_cap;
+
+ upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+- &cur_link_settings
+- );
+- down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
++ &cur_link_settings);
++ down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
+
+ /* pick the bottleneck */
+ end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
+ down_link_bw_in_kbps);
+
+- mutex_unlock(&mst_mgr->lock);
+-
+ /*
+ * use the maximum dsc compression bandwidth as the required
+ * bandwidth for the mode
+@@ -1637,8 +1637,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
+ /* check if mode could be supported within full_pbn */
+ bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
+ pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
+-
+- if (pbn > aconnector->mst_output_port->full_pbn)
++ if (pbn > full_pbn)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index d08e60dff46de..a1be93f6385c6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -990,7 +990,8 @@ static bool dc_construct(struct dc *dc,
+ /* set i2c speed if not done by the respective dcnxxx__resource.c */
+ if (dc->caps.i2c_speed_in_khz_hdcp == 0)
+ dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
+-
++ if (dc->caps.max_optimizable_video_width == 0)
++ dc->caps.max_optimizable_video_width = 5120;
+ dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
+ if (!dc->clk_mgr)
+ goto fail;
+@@ -1069,53 +1070,6 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
+ }
+ }
+
+-static void phantom_pipe_blank(
+- struct dc *dc,
+- struct timing_generator *tg,
+- int width,
+- int height)
+-{
+- struct dce_hwseq *hws = dc->hwseq;
+- enum dc_color_space color_space;
+- struct tg_color black_color = {0};
+- struct output_pixel_processor *opp = NULL;
+- uint32_t num_opps, opp_id_src0, opp_id_src1;
+- uint32_t otg_active_width, otg_active_height;
+- uint32_t i;
+-
+- /* program opp dpg blank color */
+- color_space = COLOR_SPACE_SRGB;
+- color_space_to_black_color(dc, color_space, &black_color);
+-
+- otg_active_width = width;
+- otg_active_height = height;
+-
+- /* get the OPTC source */
+- tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+- ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+-
+- for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+- if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+- opp = dc->res_pool->opps[i];
+- break;
+- }
+- }
+-
+- if (opp && opp->funcs->opp_set_disp_pattern_generator)
+- opp->funcs->opp_set_disp_pattern_generator(
+- opp,
+- CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+- COLOR_DEPTH_UNDEFINED,
+- &black_color,
+- otg_active_width,
+- otg_active_height,
+- 0);
+-
+- if (tg->funcs->is_tg_enabled(tg))
+- hws->funcs.wait_for_blank_complete(opp);
+-}
+-
+ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+ {
+ if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+@@ -1206,7 +1160,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+
+ main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
+ main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+- phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
++ if (dc->hwss.blank_phantom)
++ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
+ tg->funcs->enable_crtc(tg);
+ }
+ }
+@@ -1888,7 +1843,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
+- if (dc->debug.enable_double_buffered_dsc_pg_support)
++ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
+ disable_dangling_plane(dc, context);
+@@ -1995,7 +1950,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ dc->hwss.optimize_bandwidth(dc, context);
+ }
+
+- if (dc->debug.enable_double_buffered_dsc_pg_support)
++ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
+ if (dc->ctx->dce_version >= DCE_VERSION_MAX)
+@@ -2242,7 +2197,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
+
+ dc->hwss.optimize_bandwidth(dc, context);
+
+- if (dc->debug.enable_double_buffered_dsc_pg_support)
++ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, true);
+ }
+
+@@ -2488,6 +2443,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
+ }
+
+ static enum surface_update_type get_scaling_info_update_type(
++ const struct dc *dc,
+ const struct dc_surface_update *u)
+ {
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+@@ -2520,6 +2476,12 @@ static enum surface_update_type get_scaling_info_update_type(
+ update_flags->bits.clock_change = 1;
+ }
+
++ if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
++ (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
++ u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
++ /* Changing clip size of a large surface may result in MPC slice count change */
++ update_flags->bits.bandwidth_change = 1;
++
+ if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+ || u->scaling_info->src_rect.y != u->surface->src_rect.y
+ || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+@@ -2557,7 +2519,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
+ type = get_plane_info_update_type(u);
+ elevate_update_type(&overall_type, type);
+
+- type = get_scaling_info_update_type(u);
++ type = get_scaling_info_update_type(dc, u);
+ elevate_update_type(&overall_type, type);
+
+ if (u->flip_addr) {
+@@ -3571,7 +3533,7 @@ static void commit_planes_for_stream(struct dc *dc,
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->hwss.prepare_bandwidth(dc, context);
+
+- if (dc->debug.enable_double_buffered_dsc_pg_support)
++ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
+ context_clock_trace(dc, context);
+@@ -4374,6 +4336,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
+ update_type,
+ context);
+ } else {
++ if (!stream_update &&
++ dc->hwss.is_pipe_topology_transition_seamless &&
++ !dc->hwss.is_pipe_topology_transition_seamless(
++ dc, dc->current_state, context)) {
++
++ DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
++ BREAK_TO_DEBUGGER();
++ }
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+@@ -5284,3 +5254,24 @@ void dc_query_current_properties(struct dc *dc, struct dc_current_properties *pr
+ properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size;
+ }
+
++/**
++ *****************************************************************************
++ * dc_set_edp_power() - DM controls eDP power to be ON/OFF
++ *
++ * Called when DM wants to power on/off eDP.
++ * Only work on links with flag skip_implict_edp_power_control is set.
++ *
++ *****************************************************************************
++ */
++void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
++ bool powerOn)
++{
++ if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
++ return;
++
++ if (edp_link->skip_implict_edp_power_control == false)
++ return;
++
++ edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
++}
++
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index f7b51aca60200..8873acfe309c8 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -996,7 +996,7 @@ static void adjust_recout_for_visual_confirm(struct rect *recout,
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ int dpp_offset, base_offset;
+
+- if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
++ if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE || !pipe_ctx->plane_res.dpp)
+ return;
+
+ dpp_offset = pipe_ctx->stream->timing.v_addressable / VISUAL_CONFIRM_DPP_OFFSET_DENO;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 01fe2d2fd2417..ebe571fcefe32 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -582,7 +582,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+- if (res_ctx->pipe_ctx[i].stream != stream)
++ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
+ continue;
+
+ return tg->funcs->get_frame_count(tg);
+@@ -641,7 +641,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+- if (res_ctx->pipe_ctx[i].stream != stream)
++ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
+ continue;
+
+ tg->funcs->get_scanoutpos(tg,
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 31e3183497a7f..3f33740e2f659 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -231,6 +231,11 @@ struct dc_caps {
+ uint32_t dmdata_alloc_size;
+ unsigned int max_cursor_size;
+ unsigned int max_video_width;
++ /*
++ * max video plane width that can be safely assumed to be always
++ * supported by single DPP pipe.
++ */
++ unsigned int max_optimizable_video_width;
+ unsigned int min_horizontal_blanking_period;
+ int linear_pitch_alignment;
+ bool dcc_const_color;
+@@ -1533,7 +1538,6 @@ struct dc_link {
+ enum edp_revision edp_revision;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+
+- struct backlight_settings backlight_settings;
+ struct psr_settings psr_settings;
+
+ struct replay_settings replay_settings;
+@@ -1573,6 +1577,7 @@ struct dc_link {
+ struct phy_state phy_state;
+ // BW ALLOCATON USB4 ONLY
+ struct dc_dpia_bw_alloc dpia_bw_alloc_config;
++ bool skip_implict_edp_power_control;
+ };
+
+ /* Return an enumerated dc_link.
+@@ -1592,6 +1597,9 @@ void dc_get_edp_links(const struct dc *dc,
+ struct dc_link **edp_links,
+ int *edp_num);
+
++void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
++ bool powerOn);
++
+ /* The function initiates detection handshake over the given link. It first
+ * determines if there are display connections over the link. If so it initiates
+ * detection protocols supported by the connected receiver device. The function
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 3697ea1d14c1b..d5b3e3a32cc6d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -302,7 +302,6 @@ struct dc_stream_state {
+ bool vblank_synchronized;
+ bool fpo_in_use;
+ struct mall_stream_config mall_stream_config;
+- bool skip_edp_power_down;
+ };
+
+ #define ABM_LEVEL_IMMEDIATE_DISABLE 255
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 445ad79001ce2..accffba5a6834 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -189,6 +189,7 @@ struct dc_panel_patch {
+ unsigned int disable_fams;
+ unsigned int skip_avmute;
+ unsigned int mst_start_top_delay;
++ unsigned int remove_sink_ext_caps;
+ };
+
+ struct dc_edid_caps {
+@@ -1002,10 +1003,6 @@ struct link_mst_stream_allocation_table {
+ struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+ };
+
+-struct backlight_settings {
+- uint32_t backlight_millinits;
+-};
+-
+ /* PSR feature flags */
+ struct psr_settings {
+ bool psr_feature_enabled; // PSR is supported by sink
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+index b87bfecb7755a..a8e79104b684e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+@@ -586,7 +586,8 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+ if (state == PSR_STATE0)
+ break;
+ }
+- fsleep(500);
++ /* must *not* be fsleep - this can be called from high irq levels */
++ udelay(500);
+ }
+
+ /* assert if max retry hit */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+index 0f24b6fbd2201..4704c9c85ee6f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+@@ -216,7 +216,8 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
+ break;
+ }
+
+- fsleep(500);
++ /* must *not* be fsleep - this can be called from high irq levels */
++ udelay(500);
+ }
+
+ /* assert if max retry hit */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 2a6157555fd1e..9c78e42418f34 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1226,7 +1226,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
+ struct dce_hwseq *hws = link->dc->hwseq;
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+- if (!stream->skip_edp_power_down)
++ if (!link->skip_implict_edp_power_control)
+ hws->funcs.edp_backlight_control(link, false);
+ link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 9834b75f1837b..79befa17bb037 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -111,7 +111,8 @@ void dcn10_lock_all_pipes(struct dc *dc,
+ if (pipe_ctx->top_pipe ||
+ !pipe_ctx->stream ||
+ (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
+- !tg->funcs->is_tg_enabled(tg))
++ !tg->funcs->is_tg_enabled(tg) ||
++ pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
+ if (lock)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index aeadc587433fd..a2e1ca3b93e86 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1830,8 +1830,16 @@ void dcn20_program_front_end_for_ctx(
+ dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+- if (tg->funcs->enable_crtc)
++ if (tg->funcs->enable_crtc) {
++ if (dc->hwss.blank_phantom) {
++ int main_pipe_width, main_pipe_height;
++
++ main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width;
++ main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height;
++ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
++ }
+ tg->funcs->enable_crtc(tg);
++ }
+ }
+ }
+ /* OTG blank before disabling all front ends */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+index 4d2820ffe4682..33a8626bda735 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+@@ -476,7 +476,8 @@ void dcn314_disable_link_output(struct dc_link *link,
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+- link->dc->hwss.edp_backlight_control)
++ link->dc->hwss.edp_backlight_control &&
++ !link->skip_implict_edp_power_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 004beed9bd444..3e65e683db0ac 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -869,7 +869,7 @@ static const struct dc_plane_cap plane_cap = {
+ static const struct dc_debug_options debug_defaults_drv = {
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
+- .minimum_z8_residency_time = 2000,
++ .minimum_z8_residency_time = 2100,
+ .psr_skip_crtc_disable = true,
+ .replay_skip_crtc_disabled = true,
+ .disable_dmcu = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index 680e7fa8d18ab..650e1598bddcb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -77,6 +77,9 @@ void dcn32_dsc_pg_control(
+ if (hws->ctx->dc->debug.disable_dsc_power_gate)
+ return;
+
++ if (!hws->ctx->dc->debug.enable_double_buffered_dsc_pg_support)
++ return;
++
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+@@ -214,7 +217,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
+ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
+ {
+ int i;
+- uint8_t num_ways = 0;
++ uint32_t num_ways = 0;
+ uint32_t mall_ss_size_bytes = 0;
+
+ mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
+@@ -244,7 +247,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
+ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ {
+ union dmub_rb_cmd cmd;
+- uint8_t ways, i;
++ uint8_t i;
++ uint32_t ways;
+ int j;
+ bool mall_ss_unsupported = false;
+ struct dc_plane_state *plane = NULL;
+@@ -304,7 +308,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
+ cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
+ cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
+- cmd.cab.cab_alloc_ways = ways;
++ cmd.cab.cab_alloc_ways = (uint8_t)ways;
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+
+@@ -482,8 +486,7 @@ bool dcn32_set_mcm_luts(
+ if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->blend_tf->pwl;
+ else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
+- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+- plane_state->blend_tf,
++ cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ lut_params = &dpp_base->regamma_params;
+ }
+@@ -497,8 +500,7 @@ bool dcn32_set_mcm_luts(
+ else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ // TODO: dpp_base replace
+ ASSERT(false);
+- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+- plane_state->in_shaper_func,
++ cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ lut_params = &dpp_base->shaper_params;
+ }
+@@ -1573,3 +1575,101 @@ void dcn32_init_blank(
+ if (opp)
+ hws->funcs.wait_for_blank_complete(opp);
+ }
++
++void dcn32_blank_phantom(struct dc *dc,
++ struct timing_generator *tg,
++ int width,
++ int height)
++{
++ struct dce_hwseq *hws = dc->hwseq;
++ enum dc_color_space color_space;
++ struct tg_color black_color = {0};
++ struct output_pixel_processor *opp = NULL;
++ uint32_t num_opps, opp_id_src0, opp_id_src1;
++ uint32_t otg_active_width, otg_active_height;
++ uint32_t i;
++
++ /* program opp dpg blank color */
++ color_space = COLOR_SPACE_SRGB;
++ color_space_to_black_color(dc, color_space, &black_color);
++
++ otg_active_width = width;
++ otg_active_height = height;
++
++ /* get the OPTC source */
++ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
++ ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
++
++ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
++ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
++ opp = dc->res_pool->opps[i];
++ break;
++ }
++ }
++
++ if (opp && opp->funcs->opp_set_disp_pattern_generator)
++ opp->funcs->opp_set_disp_pattern_generator(
++ opp,
++ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
++ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
++ COLOR_DEPTH_UNDEFINED,
++ &black_color,
++ otg_active_width,
++ otg_active_height,
++ 0);
++
++ if (tg->funcs->is_tg_enabled(tg))
++ hws->funcs.wait_for_blank_complete(opp);
++}
++
++bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
++ const struct dc_state *cur_ctx,
++ const struct dc_state *new_ctx)
++{
++ int i;
++ const struct pipe_ctx *cur_pipe, *new_pipe;
++ bool is_seamless = true;
++
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ cur_pipe = &cur_ctx->res_ctx.pipe_ctx[i];
++ new_pipe = &new_ctx->res_ctx.pipe_ctx[i];
++
++ if (resource_is_pipe_type(cur_pipe, FREE_PIPE) ||
++ resource_is_pipe_type(new_pipe, FREE_PIPE))
++ /* adding or removing free pipes is always seamless */
++ continue;
++ else if (resource_is_pipe_type(cur_pipe, OTG_MASTER)) {
++ if (resource_is_pipe_type(new_pipe, OTG_MASTER))
++ if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id)
++ /* OTG master with the same stream is seamless */
++ continue;
++ } else if (resource_is_pipe_type(cur_pipe, OPP_HEAD)) {
++ if (resource_is_pipe_type(new_pipe, OPP_HEAD)) {
++ if (cur_pipe->stream_res.tg == new_pipe->stream_res.tg)
++ /*
++ * OPP heads sharing the same timing
++ * generator is seamless
++ */
++ continue;
++ }
++ } else if (resource_is_pipe_type(cur_pipe, DPP_PIPE)) {
++ if (resource_is_pipe_type(new_pipe, DPP_PIPE)) {
++ if (cur_pipe->stream_res.opp == new_pipe->stream_res.opp)
++ /*
++ * DPP pipes sharing the same OPP head is
++ * seamless
++ */
++ continue;
++ }
++ }
++
++ /*
++ * This pipe's transition doesn't fall under any seamless
++ * conditions
++ */
++ is_seamless = false;
++ break;
++ }
++
++ return is_seamless;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+index 2d2628f31bed7..9992e40acd217 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+@@ -115,4 +115,13 @@ void dcn32_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+
++void dcn32_blank_phantom(struct dc *dc,
++ struct timing_generator *tg,
++ int width,
++ int height);
++
++bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
++ const struct dc_state *cur_ctx,
++ const struct dc_state *new_ctx);
++
+ #endif /* __DC_HWSS_DCN32_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+index c7417147dff19..1edadff39a5ef 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+@@ -115,6 +115,8 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
+ .update_phantom_vp_position = dcn32_update_phantom_vp_position,
+ .update_dsc_pg = dcn32_update_dsc_pg,
+ .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
++ .blank_phantom = dcn32_blank_phantom,
++ .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
+ };
+
+ static const struct hwseq_private_funcs dcn32_private_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 5805fb02af14e..f2de0c7584947 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -948,10 +948,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ {
+ int plane_count;
+ int i;
+- unsigned int min_dst_y_next_start_us;
+
+ plane_count = 0;
+- min_dst_y_next_start_us = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+@@ -973,26 +971,15 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+ struct dc_stream_status *stream_status = &context->stream_status[0];
+- struct dc_stream_state *current_stream = context->streams[0];
+ int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+ bool is_pwrseq0 = link->link_index == 0;
+- bool isFreesyncVideo;
+-
+- isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
+- isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
+- min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
+- break;
+- }
+- }
+
+ /* Don't support multi-plane configurations */
+ if (stream_status->plane_count > 1)
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
+
+- if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
++ if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ return DCN_ZSTATE_SUPPORT_ALLOW;
+ else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+ return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index 711d4085b33b8..cf3b400c8619b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -1964,6 +1964,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
+ int i, pipe_idx, vlevel_temp = 0;
+ double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
+ double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
++ double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
+ double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
+ dm_dram_clock_change_unsupported;
+@@ -2151,7 +2152,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
+ }
+
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+- min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
++ min_dram_speed_mts = dram_speed_from_validation;
+ min_dram_speed_mts_margin = 160;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 02ff99f7bec2b..66e680902c95c 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -388,6 +388,11 @@ struct hw_sequencer_funcs {
+ void (*z10_restore)(const struct dc *dc);
+ void (*z10_save_init)(struct dc *dc);
+
++ void (*blank_phantom)(struct dc *dc,
++ struct timing_generator *tg,
++ int width,
++ int height);
++
+ void (*update_visual_confirm_color)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ int mpcc_id);
+@@ -396,6 +401,9 @@ struct hw_sequencer_funcs {
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe);
+ void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe);
++ bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
++ const struct dc_state *cur_ctx,
++ const struct dc_state *new_ctx);
+
+ void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
+ void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
+index e3e8c76c17cfa..d7685368140ab 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
+@@ -295,6 +295,7 @@ struct link_service {
+ bool (*edp_receiver_ready_T9)(struct dc_link *link);
+ bool (*edp_receiver_ready_T7)(struct dc_link *link);
+ bool (*edp_power_alpm_dpcd_enable)(struct dc_link *link, bool enable);
++ void (*edp_set_panel_power)(struct dc_link *link, bool powerOn);
+
+
+ /*************************** DP CTS ************************************/
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+index c9b6676eaf53b..c7a9e286a5d4d 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+@@ -876,7 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ (link->dpcd_sink_ext_caps.bits.oled == 1)) {
+ dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
+- set_cached_brightness_aux(link);
++ set_default_brightness_aux(link);
+ }
+
+ return true;
+@@ -1085,6 +1085,9 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ link->ctx->dc->debug.hdmi20_disable = true;
+
++ if (sink->edid_caps.panel_patch.remove_sink_ext_caps)
++ link->dpcd_sink_ext_caps.raw = 0;
++
+ if (dc_is_hdmi_signal(link->connector_signal))
+ read_scdc_caps(link->ddc, link->local_sink);
+
+@@ -1163,6 +1166,12 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
+ // Override dc_panel_config if system has specific settings
+ dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
++
++ //sink only can use supported link rate table, we are foreced to enable it
++ if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)
++ link->panel_config.ilr.optimize_edp_link_rate = true;
++ if (edp_is_ilr_optimization_enabled(link))
++ link->reported_link_cap.link_rate = get_max_link_rate_from_ilr_table(link);
+ }
+
+ } else {
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+index 79aef205598b7..35d087cf1980f 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+@@ -1930,7 +1930,7 @@ static void disable_link_dp(struct dc_link *link,
+ dp_disable_link_phy(link, link_res, signal);
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+- if (!link->dc->config.edp_no_power_sequencing)
++ if (!link->skip_implict_edp_power_control)
+ link->dc->hwss.edp_power_control(link, false);
+ }
+
+@@ -2140,8 +2140,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
+ if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
+- set_cached_brightness_aux(link);
+-
++ set_default_brightness_aux(link);
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ msleep(bl_oled_enable_delay);
+ edp_backlight_enable_aux(link, true);
+@@ -2219,7 +2218,7 @@ static enum dc_status enable_link(
+ * link settings. Need to call disable first before enabling at
+ * new link settings.
+ */
+- if (link->link_status.link_active && !stream->skip_edp_power_down)
++ if (link->link_status.link_active)
+ disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+
+ switch (pipe_ctx->stream->signal) {
+@@ -2338,9 +2337,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
+ dc->hwss.disable_stream(pipe_ctx);
+ } else {
+ dc->hwss.disable_stream(pipe_ctx);
+- if (!pipe_ctx->stream->skip_edp_power_down) {
+- disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+- }
++ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ }
+
+ if (pipe_ctx->stream->timing.flags.DSC) {
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+index 0895742a31024..e406561c2c237 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+@@ -223,6 +223,7 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s
+ link_srv->edp_receiver_ready_T9 = edp_receiver_ready_T9;
+ link_srv->edp_receiver_ready_T7 = edp_receiver_ready_T7;
+ link_srv->edp_power_alpm_dpcd_enable = edp_power_alpm_dpcd_enable;
++ link_srv->edp_set_panel_power = edp_set_panel_power;
+ }
+
+ /* link dp cts implements dp compliance test automation protocols and manual
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+index 237e0ff955f3c..db87aa7b5c90f 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+@@ -707,8 +707,7 @@ bool edp_decide_link_settings(struct dc_link *link,
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+- link->dpcd_caps.edp_supported_link_rates_count == 0) {
++ if (!edp_is_ilr_optimization_enabled(link)) {
+ *link_setting = link->verified_link_cap;
+ return true;
+ }
+@@ -772,8 +771,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+- if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+- link->dpcd_caps.edp_supported_link_rates_count == 0)) {
++ if (!edp_is_ilr_optimization_enabled(link)) {
+ /* for DSC enabled case, we search for minimum lane count */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+@@ -1938,9 +1936,7 @@ void detect_edp_sink_caps(struct dc_link *link)
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
+- (link->panel_config.ilr.optimize_edp_link_rate ||
+- link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
++ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13) {
+ // Read DPCD 00010h - 0001Fh 16 bytes at one shot
+ core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
+ supported_link_rates, sizeof(supported_link_rates));
+@@ -1958,12 +1954,10 @@ void detect_edp_sink_caps(struct dc_link *link)
+ link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
+ link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
+ link->dpcd_caps.edp_supported_link_rates_count++;
+-
+- if (link->reported_link_cap.link_rate < link_rate)
+- link->reported_link_cap.link_rate = link_rate;
+ }
+ }
+ }
++
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP,
+ &backlight_adj_cap, sizeof(backlight_adj_cap));
+
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+index b7abba55bc2fd..0050e0a06cbc2 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+@@ -73,7 +73,8 @@ void dp_disable_link_phy(struct dc_link *link,
+ {
+ struct dc *dc = link->ctx->dc;
+
+- if (!link->wa_flags.dp_keep_receiver_powered)
++ if (!link->wa_flags.dp_keep_receiver_powered &&
++ !link->skip_implict_edp_power_control)
+ dpcd_write_rx_power_ctrl(link, false);
+
+ dc->hwss.disable_link_output(link, link_res, signal);
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+index fd8f6f1981461..68096d12f52fd 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+@@ -115,7 +115,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
+ lt_settings->cr_pattern_time = 16000;
+
+ /* Fixed VS/PE specific: Toggle link rate */
+- apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate);
++ apply_toggle_rate_wa = ((link->vendor_specific_lttpr_link_rate_wa == target_rate) || (link->vendor_specific_lttpr_link_rate_wa == 0));
+ target_rate = get_dpcd_link_rate(&lt_settings->link_settings);
+ toggle_rate = (target_rate == 0x6) ? 0xA : 0x6;
+
+@@ -271,7 +271,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+
+- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
++ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+@@ -617,7 +617,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+
+- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
++ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+index 98e715aa6d8e3..fe74d4252a510 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+@@ -33,6 +33,7 @@
+ #include "link_dp_capability.h"
+ #include "dm_helpers.h"
+ #include "dal_asic_id.h"
++#include "link_dp_phy.h"
+ #include "dce/dmub_psr.h"
+ #include "dc/dc_dmub_srv.h"
+ #include "dce/dmub_replay.h"
+@@ -167,7 +168,6 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
+ *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+ *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+- link->backlight_settings.backlight_millinits = backlight_millinits;
+
+ if (!link->dpcd_caps.panel_luminance_control) {
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+@@ -280,9 +280,9 @@ bool set_default_brightness_aux(struct dc_link *link)
+ if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
+ if (!read_default_bl_aux(link, &default_backlight))
+ default_backlight = 150000;
+- // if < 5 nits or > 5000, it might be wrong readback
+- if (default_backlight < 5000 || default_backlight > 5000000)
+- default_backlight = 150000; //
++ // if > 5000, it might be wrong readback
++ if (default_backlight > 5000000)
++ default_backlight = 150000;
+
+ return edp_set_backlight_level_nits(link, true,
+ default_backlight, 0);
+@@ -290,14 +290,23 @@ bool set_default_brightness_aux(struct dc_link *link)
+ return false;
+ }
+
+-bool set_cached_brightness_aux(struct dc_link *link)
++bool edp_is_ilr_optimization_enabled(struct dc_link *link)
+ {
+- if (link->backlight_settings.backlight_millinits)
+- return edp_set_backlight_level_nits(link, true,
+- link->backlight_settings.backlight_millinits, 0);
+- else
+- return set_default_brightness_aux(link);
+- return false;
++ if (link->dpcd_caps.edp_supported_link_rates_count == 0 || !link->panel_config.ilr.optimize_edp_link_rate)
++ return false;
++ return true;
++}
++
++enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link)
++{
++ enum dc_link_rate link_rate = link->reported_link_cap.link_rate;
++
++ for (int i = 0; i < link->dpcd_caps.edp_supported_link_rates_count; i++) {
++ if (link_rate < link->dpcd_caps.edp_supported_link_rates[i])
++ link_rate = link->dpcd_caps.edp_supported_link_rates[i];
++ }
++
++ return link_rate;
+ }
+
+ bool edp_is_ilr_optimization_required(struct dc_link *link,
+@@ -311,8 +320,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link,
+
+ ASSERT(link || crtc_timing); // invalid input
+
+- if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+- !link->panel_config.ilr.optimize_edp_link_rate)
++ if (!edp_is_ilr_optimization_enabled(link))
+ return false;
+
+
+@@ -362,6 +370,34 @@ void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
+ link->dc->hwss.edp_backlight_control(link, true);
+ }
+
++void edp_set_panel_power(struct dc_link *link, bool powerOn)
++{
++ if (powerOn) {
++ // 1. panel VDD on
++ if (!link->dc->config.edp_no_power_sequencing)
++ link->dc->hwss.edp_power_control(link, true);
++ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
++
++ // 2. panel BL on
++ if (link->dc->hwss.edp_backlight_control)
++ link->dc->hwss.edp_backlight_control(link, true);
++
++ // 3. Rx power on
++ dpcd_write_rx_power_ctrl(link, true);
++ } else {
++ // 3. Rx power off
++ dpcd_write_rx_power_ctrl(link, false);
++
++ // 2. panel BL off
++ if (link->dc->hwss.edp_backlight_control)
++ link->dc->hwss.edp_backlight_control(link, false);
++
++ // 1. panel VDD off
++ if (!link->dc->config.edp_no_power_sequencing)
++ link->dc->hwss.edp_power_control(link, false);
++ }
++}
++
+ bool edp_wait_for_t12(struct dc_link *link)
+ {
+ if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+index 0a5bbda8c739c..a034288ad75d4 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+@@ -30,7 +30,6 @@
+ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
+ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
+ bool set_default_brightness_aux(struct dc_link *link);
+-bool set_cached_brightness_aux(struct dc_link *link);
+ void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
+ int edp_get_backlight_level(const struct dc_link *link);
+ bool edp_get_backlight_level_nits(struct dc_link *link,
+@@ -64,9 +63,12 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
+ bool edp_wait_for_t12(struct dc_link *link);
+ bool edp_is_ilr_optimization_required(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing);
++bool edp_is_ilr_optimization_enabled(struct dc_link *link);
++enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link);
+ bool edp_backlight_enable_aux(struct dc_link *link, bool enable);
+ void edp_add_delay_for_T9(struct dc_link *link);
+ bool edp_receiver_ready_T9(struct dc_link *link);
+ bool edp_receiver_ready_T7(struct dc_link *link);
+ bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
++void edp_set_panel_power(struct dc_link *link, bool powerOn);
+ #endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+index 2d995c87fbb98..d3c4a9a577eea 100644
+--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+@@ -186,6 +186,7 @@ struct dmub_srv_region_params {
+ uint32_t vbios_size;
+ const uint8_t *fw_inst_const;
+ const uint8_t *fw_bss_data;
++ bool is_mailbox_in_inbox;
+ };
+
+ /**
+@@ -205,20 +206,25 @@ struct dmub_srv_region_params {
+ */
+ struct dmub_srv_region_info {
+ uint32_t fb_size;
++ uint32_t inbox_size;
+ uint8_t num_regions;
+ struct dmub_region regions[DMUB_WINDOW_TOTAL];
+ };
+
+ /**
+- * struct dmub_srv_fb_params - parameters used for driver fb setup
++ * struct dmub_srv_memory_params - parameters used for driver fb setup
+ * @region_info: region info calculated by dmub service
+- * @cpu_addr: base cpu address for the framebuffer
+- * @gpu_addr: base gpu virtual address for the framebuffer
++ * @cpu_fb_addr: base cpu address for the framebuffer
++ * @cpu_inbox_addr: base cpu address for the gart
++ * @gpu_fb_addr: base gpu virtual address for the framebuffer
++ * @gpu_inbox_addr: base gpu virtual address for the gart
+ */
+-struct dmub_srv_fb_params {
++struct dmub_srv_memory_params {
+ const struct dmub_srv_region_info *region_info;
+- void *cpu_addr;
+- uint64_t gpu_addr;
++ void *cpu_fb_addr;
++ void *cpu_inbox_addr;
++ uint64_t gpu_fb_addr;
++ uint64_t gpu_inbox_addr;
+ };
+
+ /**
+@@ -546,8 +552,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+- const struct dmub_srv_fb_params *params,
++enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
++ const struct dmub_srv_memory_params *params,
+ struct dmub_srv_fb_info *out);
+
+ /**
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index 93624ffe4eb82..6c45e216c709c 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -386,7 +386,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
+ uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+ uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
+-
++ uint32_t previous_top = 0;
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+@@ -411,8 +411,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ bios->base = dmub_align(stack->top, 256);
+ bios->top = bios->base + params->vbios_size;
+
+- mail->base = dmub_align(bios->top, 256);
+- mail->top = mail->base + DMUB_MAILBOX_SIZE;
++ if (params->is_mailbox_in_inbox) {
++ mail->base = 0;
++ mail->top = mail->base + DMUB_MAILBOX_SIZE;
++ previous_top = bios->top;
++ } else {
++ mail->base = dmub_align(bios->top, 256);
++ mail->top = mail->base + DMUB_MAILBOX_SIZE;
++ previous_top = mail->top;
++ }
+
+ fw_info = dmub_get_fw_meta_info(params);
+
+@@ -431,7 +438,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ dmub->fw_version = fw_info->fw_version;
+ }
+
+- trace_buff->base = dmub_align(mail->top, 256);
++ trace_buff->base = dmub_align(previous_top, 256);
+ trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
+
+ fw_state->base = dmub_align(trace_buff->top, 256);
+@@ -442,11 +449,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+
+ out->fb_size = dmub_align(scratch_mem->top, 4096);
+
++ if (params->is_mailbox_in_inbox)
++ out->inbox_size = dmub_align(mail->top, 4096);
++
+ return DMUB_STATUS_OK;
+ }
+
+-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+- const struct dmub_srv_fb_params *params,
++enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
++ const struct dmub_srv_memory_params *params,
+ struct dmub_srv_fb_info *out)
+ {
+ uint8_t *cpu_base;
+@@ -461,8 +471,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
+ return DMUB_STATUS_INVALID;
+
+- cpu_base = (uint8_t *)params->cpu_addr;
+- gpu_base = params->gpu_addr;
++ cpu_base = (uint8_t *)params->cpu_fb_addr;
++ gpu_base = params->gpu_fb_addr;
+
+ for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
+ const struct dmub_region *reg =
+@@ -470,6 +480,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+
+ out->fb[i].cpu_addr = cpu_base + reg->base;
+ out->fb[i].gpu_addr = gpu_base + reg->base;
++
++ if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
++ out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
++ out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
++ }
++
+ out->fb[i].size = reg->top - reg->base;
+ }
+
+@@ -658,9 +674,16 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+- dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+- dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
++ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
++ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
++
++ if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
++ return DMUB_STATUS_HW_FAILURE;
++ } else {
++ dmub->inbox1_rb.rptr = rptr;
++ dmub->inbox1_rb.wrpt = wptr;
++ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
++ }
+ }
+
+ return DMUB_STATUS_OK;
+@@ -694,6 +717,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
++ if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
++ dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
++ return DMUB_STATUS_HW_FAILURE;
++ }
++
+ if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ return DMUB_STATUS_OK;
+
+@@ -969,6 +997,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
+ ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
+ if (ack)
+ return DMUB_STATUS_OK;
++ udelay(1);
+ }
+ return DMUB_STATUS_TIMEOUT;
+ }
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
+index c92c4b83253f8..4bff1ef8a9a64 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
+@@ -6369,6 +6369,8 @@
+ #define regTCP_INVALIDATE_BASE_IDX 1
+ #define regTCP_STATUS 0x19a1
+ #define regTCP_STATUS_BASE_IDX 1
++#define regTCP_CNTL 0x19a2
++#define regTCP_CNTL_BASE_IDX 1
+ #define regTCP_CNTL2 0x19a3
+ #define regTCP_CNTL2_BASE_IDX 1
+ #define regTCP_DEBUG_INDEX 0x19a5
+diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
+index 0b6a057e0a4c4..5aac8d545bdc6 100644
+--- a/drivers/gpu/drm/amd/include/pptable.h
++++ b/drivers/gpu/drm/amd/include/pptable.h
+@@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+ typedef struct _ATOM_PPLIB_STATE
+ {
+ UCHAR ucNonClockStateIndex;
+- UCHAR ucClockStateIndices[1]; // variable-sized
++ UCHAR ucClockStateIndices[]; // variable-sized
+ } ATOM_PPLIB_STATE;
+
+
+@@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+- UCHAR clockInfoIndex[1];
++ UCHAR clockInfoIndex[];
+ } ATOM_PPLIB_STATE_V2;
+
+ typedef struct _StateArray{
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 8bb2da13826f1..b4c9fedaa51de 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -734,7 +734,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+- if (count > 127)
++ if (count > 127 || count == 0)
+ return -EINVAL;
+
+ if (*buf == 's')
+@@ -754,7 +754,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ else
+ return -EINVAL;
+
+- memcpy(buf_cpy, buf, count+1);
++ memcpy(buf_cpy, buf, count);
++ buf_cpy[count] = 0;
+
+ tmp_str = buf_cpy;
+
+@@ -771,6 +772,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ return -EINVAL;
+ parameter_size++;
+
++ if (!tmp_str)
++ break;
++
+ while (isspace(*tmp_str))
+ tmp_str++;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+index 7a31cfa5e7fb4..9fcad69a9f344 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+@@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
+ typedef struct _ATOM_Tonga_State_Array {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_State_Array;
+
+ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+@@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+ typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_MCLK_Dependency_Table;
+
+ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+@@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_SCLK_Dependency_Table;
+
+ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+@@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+ typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Polaris_SCLK_Dependency_Table;
+
+ typedef struct _ATOM_Tonga_PCIE_Record {
+@@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record {
+ typedef struct _ATOM_Tonga_PCIE_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_PCIE_Table;
+
+ typedef struct _ATOM_Polaris10_PCIE_Record {
+@@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record {
+ typedef struct _ATOM_Polaris10_PCIE_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Polaris10_PCIE_Table;
+
+
+@@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record {
+ typedef struct _ATOM_Tonga_MM_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_MM_Dependency_Table;
+
+ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+@@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+ typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_Voltage_Lookup_Table;
+
+ typedef struct _ATOM_Tonga_Fan_Table {
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 5a2371484a58c..11372fcc59c8f 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1823,9 +1823,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+
+ data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
+ data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
+- data->pcie_dpm_key_disabled =
+- !amdgpu_device_pcie_dynamic_switching_supported() ||
+- !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
++ data->pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
+ /* need to set voltage control types before EVV patching */
+ data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
+ data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index f005a90c35af4..b47fd42414f46 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1232,7 +1232,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
+ {
+ struct smu_feature *feature = &smu->smu_feature;
+ struct amdgpu_device *adev = smu->adev;
+- uint32_t pcie_gen = 0, pcie_width = 0;
++ uint8_t pcie_gen = 0, pcie_width = 0;
+ uint64_t features_supported;
+ int ret = 0;
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+index 5a52098bcf166..72ed836328966 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+@@ -844,7 +844,7 @@ struct pptable_funcs {
+ * &pcie_gen_cap: Maximum allowed PCIe generation.
+ * &pcie_width_cap: Maximum allowed PCIe width.
+ */
+- int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
++ int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
+
+ /**
+ * @i2c_init: Initialize i2c.
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index 355c156d871af..cc02f979e9e98 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -296,8 +296,8 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ uint32_t pptable_id);
+
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap);
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap);
+
+ #endif
+ #endif
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index 18487ae10bcff..c564f6e191f84 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -2376,8 +2376,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
+ }
+
+ static int navi10_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap)
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap)
+ {
+ struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index da2860da60188..a7f4f82d23b4b 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2085,14 +2085,14 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
+ #define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap)
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap)
+ {
+ struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+ uint8_t *table_member1, *table_member2;
+- uint32_t min_gen_speed, max_gen_speed;
+- uint32_t min_lane_width, max_lane_width;
++ uint8_t min_gen_speed, max_gen_speed;
++ uint8_t min_lane_width, max_lane_width;
+ uint32_t smu_pcie_arg;
+ int ret, i;
+
+@@ -2108,7 +2108,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ min_lane_width = min_lane_width > max_lane_width ?
+ max_lane_width : min_lane_width;
+
+- if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ pcie_table->pcie_gen[0] = max_gen_speed;
+ pcie_table->pcie_lane[0] = max_lane_width;
+ } else {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+index cc3169400c9b0..08fff9600bd29 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+@@ -257,8 +257,11 @@ static int aldebaran_tables_init(struct smu_context *smu)
+ }
+
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+- if (!smu_table->ecc_table)
++ if (!smu_table->ecc_table) {
++ kfree(smu_table->metrics_table);
++ kfree(smu_table->gpu_metrics_table);
+ return -ENOMEM;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 0232adb95df3a..5355f621388bb 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -2420,8 +2420,8 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
+ }
+
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap)
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap)
+ {
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+@@ -2430,7 +2430,10 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+ uint32_t smu_pcie_arg;
+ int ret, i;
+
+- if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++ if (!num_of_levels)
++ return 0;
++
++ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 3903a47669e43..4022dd44ebb2b 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -352,12 +352,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
+ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
+- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO ||
+- powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
++ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) {
+ smu_baco->platform_support = true;
+
+- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+- smu_baco->maco_support = true;
++ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
++ smu_baco->maco_support = true;
++ }
+
+ /*
+ * We are in the transition to a new OD mechanism.
+@@ -2163,38 +2163,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+ }
+ }
+
+- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
+- (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
+- ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
+- ret = smu_cmn_update_table(smu,
+- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+- WORKLOAD_PPLIB_COMPUTE_BIT,
+- (void *)(&activity_monitor_external),
+- false);
+- if (ret) {
+- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+- return ret;
+- }
+-
+- ret = smu_cmn_update_table(smu,
+- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+- WORKLOAD_PPLIB_CUSTOM_BIT,
+- (void *)(&activity_monitor_external),
+- true);
+- if (ret) {
+- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+- return ret;
+- }
+-
+- workload_type = smu_cmn_to_asic_specific_index(smu,
+- CMN2ASIC_MAPPING_WORKLOAD,
+- PP_SMC_POWER_PROFILE_CUSTOM);
+- } else {
+- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+- workload_type = smu_cmn_to_asic_specific_index(smu,
++ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
++ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ smu->power_profile_mode);
+- }
+
+ if (workload_type < 0)
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+index de80e191a92c4..24d6811438c5c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+@@ -1968,8 +1968,10 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
+
+ metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+- if (ret)
++ if (ret) {
++ kfree(metrics);
+ return ret;
++ }
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 94ef5b4d116d7..51ae41cb43ea0 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -341,12 +341,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
+ if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
+- if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO ||
+- powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
++ if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
+ smu_baco->platform_support = true;
+
+- if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
+- smu_baco->maco_support = true;
++ if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
++ && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
++ smu_baco->maco_support = true;
++ }
+
+ #if 0
+ if (!overdrive_lowerlimits->FeatureCtrlMask ||
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+index 4618687a8f4d6..f3e744172673c 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+@@ -1223,7 +1223,7 @@ int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ return 0;
+ }
+
+-static void
++static int
+ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ struct komeda_pipeline_state *new)
+ {
+@@ -1243,8 +1243,12 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ c = komeda_pipeline_get_component(pipe, id);
+ c_st = komeda_component_get_state_and_set_user(c,
+ drm_st, NULL, new->crtc);
++ if (PTR_ERR(c_st) == -EDEADLK)
++ return -EDEADLK;
+ WARN_ON(IS_ERR(c_st));
+ }
++
++ return 0;
+ }
+
+ /* release unclaimed pipeline resource */
+@@ -1266,9 +1270,8 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ if (WARN_ON(IS_ERR_OR_NULL(st)))
+ return -EINVAL;
+
+- komeda_pipeline_unbound_components(pipe, st);
++ return komeda_pipeline_unbound_components(pipe, st);
+
+- return 0;
+ }
+
+ /* Since standalone disabled components must be disabled separately and in the
+diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+index d207b03f8357c..78122b35a0cbb 100644
+--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
++++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+@@ -358,11 +358,18 @@ static void aspeed_gfx_remove(struct platform_device *pdev)
+ sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
+ drm_dev_unregister(drm);
+ aspeed_gfx_unload(drm);
++ drm_atomic_helper_shutdown(drm);
++}
++
++static void aspeed_gfx_shutdown(struct platform_device *pdev)
++{
++ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+ }
+
+ static struct platform_driver aspeed_gfx_platform_driver = {
+ .probe = aspeed_gfx_probe,
+ .remove_new = aspeed_gfx_remove,
++ .shutdown = aspeed_gfx_shutdown,
+ .driver = {
+ .name = "aspeed_gfx",
+ .of_match_table = aspeed_gfx_match,
+diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
+index 848a9f1403e89..f7053f2972bb9 100644
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -172,6 +172,17 @@ to_ast_sil164_connector(struct drm_connector *connector)
+ return container_of(connector, struct ast_sil164_connector, base);
+ }
+
++struct ast_bmc_connector {
++ struct drm_connector base;
++ struct drm_connector *physical_connector;
++};
++
++static inline struct ast_bmc_connector *
++to_ast_bmc_connector(struct drm_connector *connector)
++{
++ return container_of(connector, struct ast_bmc_connector, base);
++}
++
+ /*
+ * Device
+ */
+@@ -216,7 +227,7 @@ struct ast_device {
+ } astdp;
+ struct {
+ struct drm_encoder encoder;
+- struct drm_connector connector;
++ struct ast_bmc_connector bmc_connector;
+ } bmc;
+ } output;
+
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 32f04ec6c386f..3de0f457fff6a 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -1767,6 +1767,30 @@ static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+ };
+
++static int ast_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
++ struct drm_modeset_acquire_ctx *ctx,
++ bool force)
++{
++ struct ast_bmc_connector *bmc_connector = to_ast_bmc_connector(connector);
++ struct drm_connector *physical_connector = bmc_connector->physical_connector;
++
++ /*
++ * Most user-space compositors cannot handle more than one connected
++ * connector per CRTC. Hence, we only mark the BMC as connected if the
++ * physical connector is disconnected. If the physical connector's status
++ * is connected or unknown, the BMC remains disconnected. This has no
++ * effect on the output of the BMC.
++ *
++ * FIXME: Remove this logic once user-space compositors can handle more
++ * than one connector per CRTC. The BMC should always be connected.
++ */
++
++ if (physical_connector && physical_connector->status == connector_status_disconnected)
++ return connector_status_connected;
++
++ return connector_status_disconnected;
++}
++
+ static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
+ {
+ return drm_add_modes_noedid(connector, 4096, 4096);
+@@ -1774,6 +1798,7 @@ static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
+
+ static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
+ .get_modes = ast_bmc_connector_helper_get_modes,
++ .detect_ctx = ast_bmc_connector_helper_detect_ctx,
+ };
+
+ static const struct drm_connector_funcs ast_bmc_connector_funcs = {
+@@ -1784,12 +1809,33 @@ static const struct drm_connector_funcs ast_bmc_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ };
+
+-static int ast_bmc_output_init(struct ast_device *ast)
++static int ast_bmc_connector_init(struct drm_device *dev,
++ struct ast_bmc_connector *bmc_connector,
++ struct drm_connector *physical_connector)
++{
++ struct drm_connector *connector = &bmc_connector->base;
++ int ret;
++
++ ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
++ DRM_MODE_CONNECTOR_VIRTUAL);
++ if (ret)
++ return ret;
++
++ drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
++
++ bmc_connector->physical_connector = physical_connector;
++
++ return 0;
++}
++
++static int ast_bmc_output_init(struct ast_device *ast,
++ struct drm_connector *physical_connector)
+ {
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.bmc.encoder;
+- struct drm_connector *connector = &ast->output.bmc.connector;
++ struct ast_bmc_connector *bmc_connector = &ast->output.bmc.bmc_connector;
++ struct drm_connector *connector = &bmc_connector->base;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder,
+@@ -1799,13 +1845,10 @@ static int ast_bmc_output_init(struct ast_device *ast)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+- ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
+- DRM_MODE_CONNECTOR_VIRTUAL);
++ ret = ast_bmc_connector_init(dev, bmc_connector, physical_connector);
+ if (ret)
+ return ret;
+
+- drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
+-
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+@@ -1864,6 +1907,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
+ int ast_mode_config_init(struct ast_device *ast)
+ {
+ struct drm_device *dev = &ast->base;
++ struct drm_connector *physical_connector = NULL;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+@@ -1904,23 +1948,27 @@ int ast_mode_config_init(struct ast_device *ast)
+ ret = ast_vga_output_init(ast);
+ if (ret)
+ return ret;
++ physical_connector = &ast->output.vga.vga_connector.base;
+ }
+ if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
+ ret = ast_sil164_output_init(ast);
+ if (ret)
+ return ret;
++ physical_connector = &ast->output.sil164.sil164_connector.base;
+ }
+ if (ast->tx_chip_types & AST_TX_DP501_BIT) {
+ ret = ast_dp501_output_init(ast);
+ if (ret)
+ return ret;
++ physical_connector = &ast->output.dp501.connector;
+ }
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
+ ret = ast_astdp_output_init(ast);
+ if (ret)
+ return ret;
++ physical_connector = &ast->output.astdp.connector;
+ }
+- ret = ast_bmc_output_init(ast);
++ ret = ast_bmc_output_init(ast, physical_connector);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
+index 44a660a4bdbfc..ba82a1142adf7 100644
+--- a/drivers/gpu/drm/bridge/Kconfig
++++ b/drivers/gpu/drm/bridge/Kconfig
+@@ -181,6 +181,7 @@ config DRM_NWL_MIPI_DSI
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
++ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ select MFD_SYSCON
+ select MULTIPLEXER
+@@ -227,6 +228,7 @@ config DRM_SAMSUNG_DSIM
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
++ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ The Samsung MIPI DSIM bridge controller driver.
+diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
+index ec35215a20034..cced81633ddcd 100644
+--- a/drivers/gpu/drm/bridge/cadence/Kconfig
++++ b/drivers/gpu/drm/bridge/cadence/Kconfig
+@@ -4,6 +4,7 @@ config DRM_CDNS_DSI
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
++ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ depends on OF
+ help
+diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
+index 466641c77fe91..8f5846b76d594 100644
+--- a/drivers/gpu/drm/bridge/ite-it66121.c
++++ b/drivers/gpu/drm/bridge/ite-it66121.c
+@@ -884,14 +884,14 @@ static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
+ mutex_lock(&ctx->lock);
+ ret = it66121_preamble_ddc(ctx);
+ if (ret) {
+- edid = ERR_PTR(ret);
++ edid = NULL;
+ goto out_unlock;
+ }
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
+ IT66121_DDC_HEADER_EDID);
+ if (ret) {
+- edid = ERR_PTR(ret);
++ edid = NULL;
+ goto out_unlock;
+ }
+
+@@ -1447,10 +1447,14 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
+ struct it66121_ctx *ctx = dev_get_drvdata(dev);
+
+ mutex_lock(&ctx->lock);
+-
+- memcpy(buf, ctx->connector->eld,
+- min(sizeof(ctx->connector->eld), len));
+-
++ if (!ctx->connector) {
++ /* Pass en empty ELD if connector not available */
++ dev_dbg(dev, "No connector present, passing empty EDID data");
++ memset(buf, 0, len);
++ } else {
++ memcpy(buf, ctx->connector->eld,
++ min(sizeof(ctx->connector->eld), len));
++ }
+ mutex_unlock(&ctx->lock);
+
+ return 0;
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index 4eaea67fb71c2..03532efb893bb 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -45,7 +45,6 @@ struct lt8912 {
+
+ u8 data_lanes;
+ bool is_power_on;
+- bool is_attached;
+ };
+
+ static int lt8912_write_init_config(struct lt8912 *lt)
+@@ -559,6 +558,13 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ struct lt8912 *lt = bridge_to_lt8912(bridge);
+ int ret;
+
++ ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
++ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
++ if (ret < 0) {
++ dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
++ return ret;
++ }
++
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ ret = lt8912_bridge_connector_init(bridge);
+ if (ret) {
+@@ -575,8 +581,6 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ if (ret)
+ goto error;
+
+- lt->is_attached = true;
+-
+ return 0;
+
+ error:
+@@ -588,15 +592,10 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
+ {
+ struct lt8912 *lt = bridge_to_lt8912(bridge);
+
+- if (lt->is_attached) {
+- lt8912_hard_power_off(lt);
+-
+- if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
+- drm_bridge_hpd_disable(lt->hdmi_port);
++ lt8912_hard_power_off(lt);
+
+- drm_connector_unregister(&lt->connector);
+- drm_connector_cleanup(&lt->connector);
+- }
++ if (lt->connector.dev && lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
++ drm_bridge_hpd_disable(lt->hdmi_port);
+ }
+
+ static enum drm_connector_status
+@@ -750,7 +749,6 @@ static void lt8912_remove(struct i2c_client *client)
+ {
+ struct lt8912 *lt = i2c_get_clientdata(client);
+
+- lt8912_bridge_detach(&lt->bridge);
+ drm_bridge_remove(&lt->bridge);
+ lt8912_free_i2c(lt);
+ lt8912_put_dt(lt);
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+index 22c84d29c2bc5..6f33bb0dd32aa 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+@@ -929,9 +929,9 @@ retry:
+ init_waitqueue_head(&lt9611uxc->wq);
+ INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+
+- ret = devm_request_threaded_irq(dev, client->irq, NULL,
+- lt9611uxc_irq_thread_handler,
+- IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
++ ret = request_threaded_irq(client->irq, NULL,
++ lt9611uxc_irq_thread_handler,
++ IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_disable_regulators;
+@@ -967,6 +967,8 @@ retry:
+ return lt9611uxc_audio_init(dev, lt9611uxc);
+
+ err_remove_bridge:
++ free_irq(client->irq, lt9611uxc);
++ cancel_work_sync(&lt9611uxc->work);
+ drm_bridge_remove(&lt9611uxc->bridge);
+
+ err_disable_regulators:
+@@ -983,7 +985,7 @@ static void lt9611uxc_remove(struct i2c_client *client)
+ {
+ struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
+
+- disable_irq(client->irq);
++ free_irq(client->irq, lt9611uxc);
+ cancel_work_sync(&lt9611uxc->work);
+ lt9611uxc_audio_exit(lt9611uxc);
+ drm_bridge_remove(&lt9611uxc->bridge);
+diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
+index cf777bdb25d2a..19bdb32dbc9aa 100644
+--- a/drivers/gpu/drm/bridge/samsung-dsim.c
++++ b/drivers/gpu/drm/bridge/samsung-dsim.c
+@@ -385,7 +385,7 @@ static const unsigned int imx8mm_dsim_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+- [PHYCTRL_ULPS_EXIT] = 0,
++ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
+@@ -413,6 +413,7 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
+ .m_min = 41,
+ .m_max = 125,
+ .min_freq = 500,
++ .has_broken_fifoctrl_emptyhdr = 1,
+ };
+
+ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
+@@ -429,6 +430,7 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
+ .m_min = 41,
+ .m_max = 125,
+ .min_freq = 500,
++ .has_broken_fifoctrl_emptyhdr = 1,
+ };
+
+ static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
+@@ -1010,8 +1012,20 @@ static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
+ do {
+ u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
+
+- if (reg & DSIM_SFR_HEADER_EMPTY)
+- return 0;
++ if (!dsi->driver_data->has_broken_fifoctrl_emptyhdr) {
++ if (reg & DSIM_SFR_HEADER_EMPTY)
++ return 0;
++ } else {
++ if (!(reg & DSIM_SFR_HEADER_FULL)) {
++ /*
++ * Wait a little bit, so the pending data can
++ * actually leave the FIFO to avoid overflow.
++ */
++ if (!cond_resched())
++ usleep_range(950, 1050);
++ return 0;
++ }
++ }
+
+ if (!cond_resched())
+ usleep_range(950, 1050);
+diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
+index 819a4b6ec2a07..6eed5c4232956 100644
+--- a/drivers/gpu/drm/bridge/tc358768.c
++++ b/drivers/gpu/drm/bridge/tc358768.c
+@@ -15,6 +15,7 @@
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
++#include <linux/units.h>
+
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_drv.h>
+@@ -216,6 +217,10 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
+ u32 tmp, orig;
+
+ tc358768_read(priv, reg, &orig);
++
++ if (priv->error)
++ return;
++
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ if (tmp != orig)
+@@ -600,7 +605,7 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
+
+ dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
+ clk_get_rate(priv->refclk), fbd, prd, frs);
+- dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
++ dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, HSByteClk %u\n",
+ priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
+ dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
+ tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
+@@ -623,15 +628,14 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
+ return tc358768_clear_error(priv);
+ }
+
+-#define TC358768_PRECISION 1000
+-static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
++static u32 tc358768_ns_to_cnt(u32 ns, u32 period_ps)
+ {
+- return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
++ return DIV_ROUND_UP(ns * 1000, period_ps);
+ }
+
+-static u32 tc358768_to_ns(u32 nsk)
++static u32 tc358768_ps_to_ns(u32 ps)
+ {
+- return (nsk / TC358768_PRECISION);
++ return ps / 1000;
+ }
+
+ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+@@ -642,13 +646,15 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ u32 val, val2, lptxcnt, hact, data_type;
+ s32 raw_val;
+ const struct drm_display_mode *mode;
+- u32 dsibclk_nsk, dsiclk_nsk, ui_nsk;
+- u32 dsiclk, dsibclk, video_start;
++ u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
++ u32 dsiclk, hsbyteclk, video_start;
+ const u32 internal_delay = 40;
+ int ret, i;
++ struct videomode vm;
++ struct device *dev = priv->dev;
+
+ if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+- dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n");
++ dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n");
+ mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ }
+
+@@ -656,7 +662,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+
+ ret = tc358768_sw_reset(priv);
+ if (ret) {
+- dev_err(priv->dev, "Software reset failed: %d\n", ret);
++ dev_err(dev, "Software reset failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+@@ -664,45 +670,47 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ mode = &bridge->encoder->crtc->state->adjusted_mode;
+ ret = tc358768_setup_pll(priv, mode);
+ if (ret) {
+- dev_err(priv->dev, "PLL setup failed: %d\n", ret);
++ dev_err(dev, "PLL setup failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
++ drm_display_mode_to_videomode(mode, &vm);
++
+ dsiclk = priv->dsiclk;
+- dsibclk = dsiclk / 4;
++ hsbyteclk = dsiclk / 4;
+
+ /* Data Format Control Register */
+ val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB888:
+ val |= (0x3 << 4);
+- hact = mode->hdisplay * 3;
+- video_start = (mode->htotal - mode->hsync_start) * 3;
++ hact = vm.hactive * 3;
++ video_start = (vm.hsync_len + vm.hback_porch) * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ val |= (0x4 << 4);
+- hact = mode->hdisplay * 3;
+- video_start = (mode->htotal - mode->hsync_start) * 3;
++ hact = vm.hactive * 3;
++ video_start = (vm.hsync_len + vm.hback_porch) * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ val |= (0x4 << 4) | BIT(3);
+- hact = mode->hdisplay * 18 / 8;
+- video_start = (mode->htotal - mode->hsync_start) * 18 / 8;
++ hact = vm.hactive * 18 / 8;
++ video_start = (vm.hsync_len + vm.hback_porch) * 18 / 8;
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ val |= (0x5 << 4);
+- hact = mode->hdisplay * 2;
+- video_start = (mode->htotal - mode->hsync_start) * 2;
++ hact = vm.hactive * 2;
++ video_start = (vm.hsync_len + vm.hback_porch) * 2;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+- dev_err(priv->dev, "Invalid data format (%u)\n",
++ dev_err(dev, "Invalid data format (%u)\n",
+ dsi_dev->format);
+ tc358768_hw_disable(priv);
+ return;
+@@ -722,67 +730,67 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
+
+ /* DSI Timings */
+- dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
+- dsibclk);
+- dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
+- ui_nsk = dsiclk_nsk / 2;
+- dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
+- dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
+- dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
++ hsbyteclk_ps = (u32)div_u64(PICO, hsbyteclk);
++ dsiclk_ps = (u32)div_u64(PICO, dsiclk);
++ ui_ps = dsiclk_ps / 2;
++ dev_dbg(dev, "dsiclk: %u ps, ui %u ps, hsbyteclk %u ps\n", dsiclk_ps,
++ ui_ps, hsbyteclk_ps);
+
+ /* LP11 > 100us for D-PHY Rx Init */
+- val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
+- dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
++ val = tc358768_ns_to_cnt(100 * 1000, hsbyteclk_ps) - 1;
++ dev_dbg(dev, "LINEINITCNT: %u\n", val);
+ tc358768_write(priv, TC358768_LINEINITCNT, val);
+
+ /* LPTimeCnt > 50ns */
+- val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
++ val = tc358768_ns_to_cnt(50, hsbyteclk_ps) - 1;
+ lptxcnt = val;
+- dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
++ dev_dbg(dev, "LPTXTIMECNT: %u\n", val);
+ tc358768_write(priv, TC358768_LPTXTIMECNT, val);
+
+ /* 38ns < TCLK_PREPARE < 95ns */
+- val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
++ val = tc358768_ns_to_cnt(65, hsbyteclk_ps) - 1;
++ dev_dbg(dev, "TCLK_PREPARECNT %u\n", val);
+ /* TCLK_PREPARE + TCLK_ZERO > 300ns */
+- val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk),
+- dsibclk_nsk) - 2;
++ val2 = tc358768_ns_to_cnt(300 - tc358768_ps_to_ns(2 * ui_ps),
++ hsbyteclk_ps) - 2;
++ dev_dbg(dev, "TCLK_ZEROCNT %u\n", val2);
+ val |= val2 << 8;
+- dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+
+ /* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */
+- raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5;
++ raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(2 * ui_ps), hsbyteclk_ps) - 5;
+ val = clamp(raw_val, 0, 127);
+- dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
++ dev_dbg(dev, "TCLK_TRAILCNT: %u\n", val);
+ tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+
+ /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+- val = 50 + tc358768_to_ns(4 * ui_nsk);
+- val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
++ val = 50 + tc358768_ps_to_ns(4 * ui_ps);
++ val = tc358768_ns_to_cnt(val, hsbyteclk_ps) - 1;
++ dev_dbg(dev, "THS_PREPARECNT %u\n", val);
+ /* THS_PREPARE + THS_ZERO > 145ns + 10*UI */
+- raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10;
++ raw_val = tc358768_ns_to_cnt(145 - tc358768_ps_to_ns(3 * ui_ps), hsbyteclk_ps) - 10;
+ val2 = clamp(raw_val, 0, 127);
++ dev_dbg(dev, "THS_ZEROCNT %u\n", val2);
+ val |= val2 << 8;
+- dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+
+ /* TWAKEUP > 1ms in lptxcnt steps */
+- val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
++ val = tc358768_ns_to_cnt(1020000, hsbyteclk_ps);
+ val = val / (lptxcnt + 1) - 1;
+- dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
++ dev_dbg(dev, "TWAKEUP: %u\n", val);
+ tc358768_write(priv, TC358768_TWAKEUP, val);
+
+ /* TCLK_POSTCNT > 60ns + 52*UI */
+- val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
+- dsibclk_nsk) - 3;
+- dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
++ val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(52 * ui_ps),
++ hsbyteclk_ps) - 3;
++ dev_dbg(dev, "TCLK_POSTCNT: %u\n", val);
+ tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+
+ /* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */
+- raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk),
+- dsibclk_nsk) - 4;
++ raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(18 * ui_ps),
++ hsbyteclk_ps) - 4;
+ val = clamp(raw_val, 0, 15);
+- dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
++ dev_dbg(dev, "THS_TRAILCNT: %u\n", val);
+ tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+
+ val = BIT(0);
+@@ -790,16 +798,17 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ val |= BIT(i + 1);
+ tc358768_write(priv, TC358768_HSTXVREGEN, val);
+
+- if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+- tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
++ tc358768_write(priv, TC358768_TXOPTIONCNTRL,
++ (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : BIT(0));
+
+ /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+- val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+- val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1;
+- val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+- dsibclk_nsk) - 2;
++ val = tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps * 4);
++ val = tc358768_ns_to_cnt(val, hsbyteclk_ps) / 4 - 1;
++ dev_dbg(dev, "TXTAGOCNT: %u\n", val);
++ val2 = tc358768_ns_to_cnt(tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps),
++ hsbyteclk_ps) - 2;
++ dev_dbg(dev, "RXTASURECNT: %u\n", val2);
+ val = val << 16 | val2;
+- dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
+ tc358768_write(priv, TC358768_BTACNTRL1, val);
+
+ /* START[0] */
+@@ -810,43 +819,43 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ tc358768_write(priv, TC358768_DSI_EVENT, 0);
+
+ /* vact */
+- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
++ tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
+
+ /* vsw */
+- tc358768_write(priv, TC358768_DSI_VSW,
+- mode->vsync_end - mode->vsync_start);
++ tc358768_write(priv, TC358768_DSI_VSW, vm.vsync_len);
++
+ /* vbp */
+- tc358768_write(priv, TC358768_DSI_VBPR,
+- mode->vtotal - mode->vsync_end);
++ tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch);
+
+ /* hsw * byteclk * ndl / pclk */
+- val = (u32)div_u64((mode->hsync_end - mode->hsync_start) *
+- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+- mode->clock * 1000);
++ val = (u32)div_u64(vm.hsync_len *
++ (u64)hsbyteclk * priv->dsi_lanes,
++ vm.pixelclock);
+ tc358768_write(priv, TC358768_DSI_HSW, val);
+
+ /* hbp * byteclk * ndl / pclk */
+- val = (u32)div_u64((mode->htotal - mode->hsync_end) *
+- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+- mode->clock * 1000);
++ val = (u32)div_u64(vm.hback_porch *
++ (u64)hsbyteclk * priv->dsi_lanes,
++ vm.pixelclock);
+ tc358768_write(priv, TC358768_DSI_HBPR, val);
+ } else {
+ /* Set event mode */
+ tc358768_write(priv, TC358768_DSI_EVENT, 1);
+
+ /* vact */
+- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
++ tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
+
+ /* vsw (+ vbp) */
+ tc358768_write(priv, TC358768_DSI_VSW,
+- mode->vtotal - mode->vsync_start);
++ vm.vsync_len + vm.vback_porch);
++
+ /* vbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_VBPR, 0);
+
+ /* (hsw + hbp) * byteclk * ndl / pclk */
+- val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+- mode->clock * 1000);
++ val = (u32)div_u64((vm.hsync_len + vm.hback_porch) *
++ (u64)hsbyteclk * priv->dsi_lanes,
++ vm.pixelclock);
+ tc358768_write(priv, TC358768_DSI_HSW, val);
+
+ /* hbp (not used in event mode) */
+@@ -857,11 +866,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ tc358768_write(priv, TC358768_DSI_HACT, hact);
+
+ /* VSYNC polarity */
+- if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
+- tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
++ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5),
++ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(5) : 0);
++
+ /* HSYNC polarity */
+- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+- tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
++ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0),
++ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(0) : 0);
+
+ /* Start DSI Tx */
+ tc358768_write(priv, TC358768_DSI_START, 0x1);
+@@ -891,7 +901,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+- dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
++ dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 4b71040ae5be5..b3e1b288fc0c2 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -3499,11 +3499,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
+ mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+ mode->vtotal = mode->vdisplay + vblank;
+
+- /* Some EDIDs have bogus h/vtotal values */
+- if (mode->hsync_end > mode->htotal)
+- mode->htotal = mode->hsync_end + 1;
+- if (mode->vsync_end > mode->vtotal)
+- mode->vtotal = mode->vsync_end + 1;
++ /* Some EDIDs have bogus h/vsync_end values */
++ if (mode->hsync_end > mode->htotal) {
++ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing hsync_end %d->%d\n",
++ connector->base.id, connector->name,
++ mode->hsync_end, mode->htotal);
++ mode->hsync_end = mode->htotal;
++ }
++ if (mode->vsync_end > mode->vtotal) {
++ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing vsync_end %d->%d\n",
++ connector->base.id, connector->name,
++ mode->vsync_end, mode->vtotal);
++ mode->vsync_end = mode->vtotal;
++ }
+
+ drm_mode_do_interlace_quirk(mode, pt);
+
+diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
+index 150fe15550680..94375c6a54256 100644
+--- a/drivers/gpu/drm/drm_lease.c
++++ b/drivers/gpu/drm/drm_lease.c
+@@ -510,8 +510,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
+ /* Handle leased objects, if any */
+ idr_init(&leases);
+ if (object_count != 0) {
+- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+- array_size(object_count, sizeof(__u32)));
++ object_ids = memdup_array_user(u64_to_user_ptr(cl->object_ids),
++ object_count, sizeof(__u32));
+ if (IS_ERR(object_ids)) {
+ ret = PTR_ERR(object_ids);
+ idr_destroy(&leases);
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index f7003d1ec5ef1..01da6789d0440 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -1069,7 +1069,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ fence = drm_syncobj_fence_get(syncobjs[i]);
+ if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+ dma_fence_put(fence);
+- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
++ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
+ continue;
+ } else {
+ timeout = -EINVAL;
+diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
+index f7f709df99b49..70d9adafa2333 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.h
++++ b/drivers/gpu/drm/gma500/psb_drv.h
+@@ -424,6 +424,7 @@ struct drm_psb_private {
+ uint32_t pipestat[PSB_NUM_PIPE];
+
+ spinlock_t irqmask_lock;
++ bool irq_enabled;
+
+ /* Power */
+ bool pm_initialized;
+diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
+index 343c51250207d..7bbb79b0497d8 100644
+--- a/drivers/gpu/drm/gma500/psb_irq.c
++++ b/drivers/gpu/drm/gma500/psb_irq.c
+@@ -327,6 +327,8 @@ int gma_irq_install(struct drm_device *dev)
+
+ gma_irq_postinstall(dev);
+
++ dev_priv->irq_enabled = true;
++
+ return 0;
+ }
+
+@@ -337,6 +339,9 @@ void gma_irq_uninstall(struct drm_device *dev)
+ unsigned long irqflags;
+ unsigned int i;
+
++ if (!dev_priv->irq_enabled)
++ return;
++
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (dev_priv->ops->hotplug_enable)
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
+index 2fb030b1ff1de..f99cf8037bd68 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
+@@ -2688,6 +2688,18 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
+ for_each_pipe(dev_priv, pipe)
+ min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+
++ /*
++ * Avoid glk_force_audio_cdclk() causing excessive screen
++ * blinking when multiple pipes are active by making sure
++ * CDCLK frequency is always high enough for audio. With a
++ * single active pipe we can always change CDCLK frequency
++ * by changing the cd2x divider (see glk_cdclk_table[]) and
++ * thus a full modeset won't be needed then.
++ */
++ if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
++ !is_power_of_2(cdclk_state->active_pipes))
++ min_cdclk = max(2 * 96000, min_cdclk);
++
+ if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
+ drm_dbg_kms(&dev_priv->drm,
+ "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index e0e4cb5292846..119a4de7fe6f7 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -430,7 +430,7 @@ static int mtl_max_source_rate(struct intel_dp *intel_dp)
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ if (intel_is_c10phy(i915, phy))
+- return intel_dp_is_edp(intel_dp) ? 675000 : 810000;
++ return 810000;
+
+ return 2000000;
+ }
+@@ -5517,8 +5517,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ * (eg. Acer Chromebook C710), so we'll check it only if multiple
+ * ports are attempting to use the same AUX CH, according to VBT.
+ */
+- if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
+- !intel_digital_port_connected(encoder)) {
++ if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
+ /*
+ * If this fails, presume the DPCD answer came
+ * from some other port using the same AUX CH.
+@@ -5526,10 +5525,27 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ * FIXME maybe cleaner to check this before the
+ * DPCD read? Would need sort out the VDD handling...
+ */
+- drm_info(&dev_priv->drm,
+- "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
+- encoder->base.base.id, encoder->base.name);
+- goto out_vdd_off;
++ if (!intel_digital_port_connected(encoder)) {
++ drm_info(&dev_priv->drm,
++ "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
++ encoder->base.base.id, encoder->base.name);
++ goto out_vdd_off;
++ }
++
++ /*
++ * Unfortunately even the HPD based detection fails on
++ * eg. Asus B360M-A (CFL+CNP), so as a last resort fall
++ * back to checking for a VGA branch device. Only do this
++ * on known affected platforms to minimize false positives.
++ */
++ if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
++ (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
++ DP_DWN_STRM_PORT_TYPE_ANALOG) {
++ drm_info(&dev_priv->drm,
++ "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
++ encoder->base.base.id, encoder->base.name);
++ goto out_vdd_off;
++ }
+ }
+
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
+diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
+index 3ebf41859043e..cdf2455440bea 100644
+--- a/drivers/gpu/drm/i915/display/intel_tc.c
++++ b/drivers/gpu/drm/i915/display/intel_tc.c
+@@ -58,7 +58,7 @@ struct intel_tc_port {
+ struct delayed_work link_reset_work;
+ int link_refcount;
+ bool legacy_port:1;
+- char port_name[8];
++ const char *port_name;
+ enum tc_port_mode mode;
+ enum tc_port_mode init_mode;
+ enum phy_fia phy_fia;
+@@ -1841,8 +1841,12 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
+ else
+ tc->phy_ops = &icl_tc_phy_ops;
+
+- snprintf(tc->port_name, sizeof(tc->port_name),
+- "%c/TC#%d", port_name(port), tc_port + 1);
++ tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
++ tc_port + 1);
++ if (!tc->port_name) {
++ kfree(tc);
++ return -ENOMEM;
++ }
+
+ mutex_init(&tc->lock);
+ /* TODO: Combine the two works */
+@@ -1863,6 +1867,7 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
+ {
+ intel_tc_port_suspend(dig_port);
+
++ kfree(dig_port->tc->port_name);
+ kfree(dig_port->tc);
+ dig_port->tc = NULL;
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 9a9ff84c90d7e..e38f06a6e56eb 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -844,6 +844,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
+ if (idx >= pc->num_user_engines)
+ return -EINVAL;
+
++ idx = array_index_nospec(idx, pc->num_user_engines);
+ pe = &pc->user_engines[idx];
+
+ /* Only render engine supports RPCS configuration. */
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
+index dcedff41a825f..d304e0a948f0d 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
+@@ -42,12 +42,15 @@ void intel_engine_add_user(struct intel_engine_cs *engine)
+ (struct llist_head *)&engine->i915->uabi_engines);
+ }
+
+-static const u8 uabi_classes[] = {
++#define I915_NO_UABI_CLASS ((u16)(-1))
++
++static const u16 uabi_classes[] = {
+ [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
+ [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
+ [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
+ [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
+ [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
++ [OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
+ };
+
+ static int engine_cmp(void *priv, const struct list_head *A,
+@@ -202,6 +205,7 @@ static void engine_rename(struct intel_engine_cs *engine, const char *name, u16
+
+ void intel_engines_driver_register(struct drm_i915_private *i915)
+ {
++ u16 name_instance, other_instance = 0;
+ struct legacy_ring ring = {};
+ struct list_head *it, *next;
+ struct rb_node **p, *prev;
+@@ -219,27 +223,28 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
+ if (intel_gt_has_unrecoverable_error(engine->gt))
+ continue; /* ignore incomplete engines */
+
+- /*
+- * We don't want to expose the GSC engine to the users, but we
+- * still rename it so it is easier to identify in the debug logs
+- */
+- if (engine->id == GSC0) {
+- engine_rename(engine, "gsc", 0);
+- continue;
+- }
+-
+ GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
+ engine->uabi_class = uabi_classes[engine->class];
++ if (engine->uabi_class == I915_NO_UABI_CLASS) {
++ name_instance = other_instance++;
++ } else {
++ GEM_BUG_ON(engine->uabi_class >=
++ ARRAY_SIZE(i915->engine_uabi_class_count));
++ name_instance =
++ i915->engine_uabi_class_count[engine->uabi_class]++;
++ }
++ engine->uabi_instance = name_instance;
+
+- GEM_BUG_ON(engine->uabi_class >=
+- ARRAY_SIZE(i915->engine_uabi_class_count));
+- engine->uabi_instance =
+- i915->engine_uabi_class_count[engine->uabi_class]++;
+-
+- /* Replace the internal name with the final user facing name */
++ /*
++ * Replace the internal name with the final user and log facing
++ * name.
++ */
+ engine_rename(engine,
+ intel_engine_class_repr(engine->class),
+- engine->uabi_instance);
++ name_instance);
++
++ if (engine->uabi_class == I915_NO_UABI_CLASS)
++ continue;
+
+ rb_link_node(&engine->uabi_node, prev, p);
+ rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+index da21f2786b5d7..b20d8fe8aa95d 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+@@ -190,6 +190,21 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
+ spin_unlock_irq(&uncore->lock);
+ }
+
++static bool needs_wc_ggtt_mapping(struct drm_i915_private *i915)
++{
++ /*
++ * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
++ * will be dropped. For WC mappings in general we have 64 byte burst
++ * writes when the WC buffer is flushed, so we can't use it, but have to
++ * resort to an uncached mapping. The WC issue is easily caught by the
++ * readback check when writing GTT PTE entries.
++ */
++ if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
++ return true;
++
++ return false;
++}
++
+ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+ {
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+@@ -197,8 +212,12 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+ /*
+ * Note that as an uncached mmio write, this will flush the
+ * WCB of the writes into the GGTT before it triggers the invalidate.
++ *
++ * Only perform this when GGTT is mapped as WC, see ggtt_probe_common().
+ */
+- intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
++ if (needs_wc_ggtt_mapping(ggtt->vm.i915))
++ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6,
++ GFX_FLSH_CNTL_EN);
+ }
+
+ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+@@ -902,17 +921,11 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+ GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
+
+- /*
+- * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
+- * will be dropped. For WC mappings in general we have 64 byte burst
+- * writes when the WC buffer is flushed, so we can't use it, but have to
+- * resort to an uncached mapping. The WC issue is easily caught by the
+- * readback check when writing GTT PTE entries.
+- */
+- if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
+- ggtt->gsm = ioremap(phys_addr, size);
+- else
++ if (needs_wc_ggtt_mapping(i915))
+ ggtt->gsm = ioremap_wc(phys_addr, size);
++ else
++ ggtt->gsm = ioremap(phys_addr, size);
++
+ if (!ggtt->gsm) {
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
+index 449f0b7fc8434..95631e8f39e7b 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt.c
+@@ -967,8 +967,6 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
+
+ err:
+ i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
+- intel_gt_release_all(i915);
+-
+ return ret;
+ }
+
+@@ -987,15 +985,6 @@ int intel_gt_tiles_init(struct drm_i915_private *i915)
+ return 0;
+ }
+
+-void intel_gt_release_all(struct drm_i915_private *i915)
+-{
+- struct intel_gt *gt;
+- unsigned int id;
+-
+- for_each_gt(gt, i915, id)
+- i915->gt[id] = NULL;
+-}
+-
+ void intel_gt_info_print(const struct intel_gt_info *info,
+ struct drm_printer *p)
+ {
+diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
+index 58bb1c55294c9..ccdc1afbf11b5 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
++++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
+@@ -584,19 +584,23 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
+
+ static void rc6_res_reg_init(struct intel_rc6 *rc6)
+ {
+- memset(rc6->res_reg, INVALID_MMIO_REG.reg, sizeof(rc6->res_reg));
++ i915_reg_t res_reg[INTEL_RC6_RES_MAX] = {
++ [0 ... INTEL_RC6_RES_MAX - 1] = INVALID_MMIO_REG,
++ };
+
+ switch (rc6_to_gt(rc6)->type) {
+ case GT_MEDIA:
+- rc6->res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
++ res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
+ break;
+ default:
+- rc6->res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
+- rc6->res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
+- rc6->res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
+- rc6->res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
++ res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
++ res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
++ res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
++ res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
+ break;
+ }
++
++ memcpy(rc6->res_reg, res_reg, sizeof(res_reg));
+ }
+
+ void intel_rc6_init(struct intel_rc6 *rc6)
+diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
+index ec4d26b3c17cc..8dc5f85b7747b 100644
+--- a/drivers/gpu/drm/i915/i915_driver.c
++++ b/drivers/gpu/drm/i915/i915_driver.c
+@@ -777,7 +777,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ ret = i915_driver_mmio_probe(i915);
+ if (ret < 0)
+- goto out_tiles_cleanup;
++ goto out_runtime_pm_put;
+
+ ret = i915_driver_hw_probe(i915);
+ if (ret < 0)
+@@ -837,8 +837,6 @@ out_cleanup_hw:
+ i915_ggtt_driver_late_release(i915);
+ out_cleanup_mmio:
+ i915_driver_mmio_release(i915);
+-out_tiles_cleanup:
+- intel_gt_release_all(i915);
+ out_runtime_pm_put:
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ i915_driver_late_release(i915);
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 59e1e21df2710..109135fcfca28 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -4286,11 +4286,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ u32 known_open_flags;
+ int ret;
+
+- if (!perf->i915) {
+- drm_dbg(&perf->i915->drm,
+- "i915 perf interface not available for this system\n");
++ if (!perf->i915)
+ return -ENOTSUPP;
+- }
+
+ known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
+ I915_PERF_FLAG_FD_NONBLOCK |
+@@ -4666,11 +4663,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ struct i915_oa_reg *regs;
+ int err, id;
+
+- if (!perf->i915) {
+- drm_dbg(&perf->i915->drm,
+- "i915 perf interface not available for this system\n");
++ if (!perf->i915)
+ return -ENOTSUPP;
+- }
+
+ if (!perf->metrics_kobj) {
+ drm_dbg(&perf->i915->drm,
+@@ -4832,11 +4826,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ struct i915_oa_config *oa_config;
+ int ret;
+
+- if (!perf->i915) {
+- drm_dbg(&perf->i915->drm,
+- "i915 perf interface not available for this system\n");
++ if (!perf->i915)
+ return -ENOTSUPP;
+- }
+
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
+ drm_dbg(&perf->i915->drm,
+diff --git a/drivers/gpu/drm/loongson/lsdc_pixpll.c b/drivers/gpu/drm/loongson/lsdc_pixpll.c
+index 04c15b4697e21..2609a2256da4b 100644
+--- a/drivers/gpu/drm/loongson/lsdc_pixpll.c
++++ b/drivers/gpu/drm/loongson/lsdc_pixpll.c
+@@ -120,12 +120,14 @@ static int lsdc_pixel_pll_setup(struct lsdc_pixpll * const this)
+ struct lsdc_pixpll_parms *pparms;
+
+ this->mmio = ioremap(this->reg_base, this->reg_size);
+- if (IS_ERR_OR_NULL(this->mmio))
++ if (!this->mmio)
+ return -ENOMEM;
+
+ pparms = kzalloc(sizeof(*pparms), GFP_KERNEL);
+- if (IS_ERR_OR_NULL(pparms))
++ if (!pparms) {
++ iounmap(this->mmio);
+ return -ENOMEM;
++ }
+
+ pparms->ref_clock = LSDC_PLL_REF_CLK_KHZ;
+
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 2cb47f6637568..0e285df6577ea 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -2034,7 +2034,6 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ bool enabled = mtk_dp->enabled;
+ struct edid *new_edid = NULL;
+ struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
+- struct cea_sad *sads;
+
+ if (!enabled) {
+ drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
+@@ -2049,11 +2048,16 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ */
+ if (mtk_dp_parse_capabilities(mtk_dp)) {
+ drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
++ kfree(new_edid);
+ new_edid = NULL;
+ }
+
+ if (new_edid) {
++ struct cea_sad *sads;
++
+ audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
++ kfree(sads);
++
+ audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
+ }
+
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index b6fa4ad2f94dc..0a511d7688a3a 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -408,6 +408,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
+ unsigned int local_layer;
+
+ plane_state = to_mtk_plane_state(plane->state);
++
++ /* should not enable layer before crtc enabled */
++ plane_state->pending.enable = false;
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 93552d76b6e77..2d6a979afe8f9 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -288,6 +288,7 @@ static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
+ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
+ .main_path = mt8188_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8188_mtk_ddp_main),
++ .mmsys_dev_num = 1,
+ };
+
+ static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index 0e0a41b2f57f0..4f2e3feabc0f8 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -121,7 +121,14 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ int ret;
+
+ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+- args->size = args->pitch * args->height;
++
++ /*
++ * Multiply 2 variables of different types,
++ * for example: args->size = args->spacing * args->height;
++ * may cause coverity issue with unintentional overflow.
++ */
++ args->size = args->pitch;
++ args->size *= args->height;
+
+ mtk_gem = mtk_drm_gem_create(dev, args->size, false);
+ if (IS_ERR(mtk_gem))
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+index db2f70ae060d6..ddc9355b06d51 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+@@ -141,6 +141,7 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ dma_addr_t addr;
+ dma_addr_t hdr_addr = 0;
+ unsigned int hdr_pitch = 0;
++ int offset;
+
+ gem = fb->obj[0];
+ mtk_gem = to_mtk_gem_obj(gem);
+@@ -150,8 +151,15 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ modifier = fb->modifier;
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+- addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
+- addr += (new_state->src.y1 >> 16) * pitch;
++ /*
++ * Using dma_addr_t variable to calculate with multiplier of different types,
++ * for example: addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
++ * may cause coverity issue with unintentional overflow.
++ */
++ offset = (new_state->src.x1 >> 16) * fb->format->cpp[0];
++ addr += offset;
++ offset = (new_state->src.y1 >> 16) * pitch;
++ addr += offset;
+ } else {
+ int width_in_blocks = ALIGN(fb->width, AFBC_DATA_BLOCK_WIDTH)
+ / AFBC_DATA_BLOCK_WIDTH;
+@@ -159,21 +167,34 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ / AFBC_DATA_BLOCK_HEIGHT;
+ int x_offset_in_blocks = (new_state->src.x1 >> 16) / AFBC_DATA_BLOCK_WIDTH;
+ int y_offset_in_blocks = (new_state->src.y1 >> 16) / AFBC_DATA_BLOCK_HEIGHT;
+- int hdr_size;
++ int hdr_size, hdr_offset;
+
+ hdr_pitch = width_in_blocks * AFBC_HEADER_BLOCK_SIZE;
+ pitch = width_in_blocks * AFBC_DATA_BLOCK_WIDTH *
+ AFBC_DATA_BLOCK_HEIGHT * fb->format->cpp[0];
+
+ hdr_size = ALIGN(hdr_pitch * height_in_blocks, AFBC_HEADER_ALIGNMENT);
++ hdr_offset = hdr_pitch * y_offset_in_blocks +
++ AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks;
++
++ /*
++ * Using dma_addr_t variable to calculate with multiplier of different types,
++ * for example: addr += hdr_pitch * y_offset_in_blocks;
++ * may cause coverity issue with unintentional overflow.
++ */
++ hdr_addr = addr + hdr_offset;
+
+- hdr_addr = addr + hdr_pitch * y_offset_in_blocks +
+- AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks;
+ /* The data plane is offset by 1 additional block. */
+- addr = addr + hdr_size +
+- pitch * y_offset_in_blocks +
+- AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT *
+- fb->format->cpp[0] * (x_offset_in_blocks + 1);
++ offset = pitch * y_offset_in_blocks +
++ AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT *
++ fb->format->cpp[0] * (x_offset_in_blocks + 1);
++
++ /*
++ * Using dma_addr_t variable to calculate with multiplier of different types,
++ * for example: addr += pitch * y_offset_in_blocks;
++ * may cause coverity issue with unintentional overflow.
++ */
++ addr = addr + hdr_size + offset;
+ }
+
+ mtk_plane_state->pending.enable = true;
+@@ -206,9 +227,9 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_h = new_state->src_h;
+ plane->state->src_w = new_state->src_w;
+- swap(plane->state->fb, new_state->fb);
+
+ mtk_plane_update_new_state(new_state, new_plane_state);
++ swap(plane->state->fb, new_state->fb);
+ wmb(); /* Make sure the above parameters are set before update */
+ new_plane_state->pending.async_dirty = true;
+ mtk_drm_crtc_async_update(new_state->crtc, plane, state);
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index d8bfc2cce54dc..290f328c6a421 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -407,7 +407,7 @@ static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
+ if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ tmp_reg |= HSTX_CKLP_EN;
+
+- if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
++ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ tmp_reg |= DIS_EOT;
+
+ writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
+@@ -484,7 +484,7 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+ timing->da_hs_zero + timing->da_hs_exit + 3;
+
+ delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
+- delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 2 : 0;
++ delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 0 : 2;
+
+ horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
+ horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
+diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
+index abddf37f0ea11..2fb18b782b053 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
+@@ -10,6 +10,7 @@
+ #include <linux/pci.h>
+
+ #include <drm/drm_aperture.h>
++#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_generic.h>
+ #include <drm/drm_file.h>
+@@ -278,6 +279,12 @@ static void mgag200_pci_remove(struct pci_dev *pdev)
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(dev);
++ drm_atomic_helper_shutdown(dev);
++}
++
++static void mgag200_pci_shutdown(struct pci_dev *pdev)
++{
++ drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+ }
+
+ static struct pci_driver mgag200_pci_driver = {
+@@ -285,6 +292,7 @@ static struct pci_driver mgag200_pci_driver = {
+ .id_table = mgag200_pciidlist,
+ .probe = mgag200_pci_probe,
+ .remove = mgag200_pci_remove,
++ .shutdown = mgag200_pci_shutdown,
+ };
+
+ drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index d4e85e24002fb..522ca7fe67625 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -2237,7 +2237,7 @@ static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *i
+ DRM_DEV_ERROR(dev,
+ "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
+ speedbin);
+- return UINT_MAX;
++ supp_hw = BIT(0); /* Default */
+ }
+
+ ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index 575e7c56219ff..f2d9d34ed50f9 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -331,7 +331,7 @@ static const struct adreno_info gpulist[] = {
+ ),
+ }, {
+ .machine = "qcom,sm6375",
+- .chip_ids = ADRENO_CHIP_IDS(0x06010900),
++ .chip_ids = ADRENO_CHIP_IDS(0x06010901),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 619,
+ .fw = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+index 58f5e25679b15..ff9adb8000acd 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+@@ -419,6 +419,7 @@ static const struct dpu_perf_cfg sc8280xp_perf_data = {
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
++ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc8180x_qos_linear),
+ .entries = sc8180x_qos_linear
+diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
+index 42d52510ffd4a..86a8e06c7a60f 100644
+--- a/drivers/gpu/drm/msm/dp/dp_panel.c
++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
+@@ -289,26 +289,9 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
+
+ static u8 dp_panel_get_edid_checksum(struct edid *edid)
+ {
+- struct edid *last_block;
+- u8 *raw_edid;
+- bool is_edid_corrupt = false;
++ edid += edid->extensions;
+
+- if (!edid) {
+- DRM_ERROR("invalid edid input\n");
+- return 0;
+- }
+-
+- raw_edid = (u8 *)edid;
+- raw_edid += (edid->extensions * EDID_LENGTH);
+- last_block = (struct edid *)raw_edid;
+-
+- /* block type extension */
+- drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
+- if (!is_edid_corrupt)
+- return last_block->checksum;
+-
+- DRM_ERROR("Invalid block, no checksum\n");
+- return 0;
++ return edid->checksum;
+ }
+
+ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
+index baab79ab6e745..32f965bacdc30 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.c
++++ b/drivers/gpu/drm/msm/dsi/dsi.c
+@@ -126,6 +126,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+
++ msm_dsi_tx_buf_free(msm_dsi->host);
+ priv->dsi[msm_dsi->id] = NULL;
+ }
+
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
+index bd3763a5d7234..3b46617a59f20 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.h
++++ b/drivers/gpu/drm/msm/dsi/dsi.h
+@@ -125,6 +125,7 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+ void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+ void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+ void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
++void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host);
+ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 3d6fb708dc223..470866896b9b8 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -147,6 +147,7 @@ struct msm_dsi_host {
+
+ /* DSI 6G TX buffer*/
+ struct drm_gem_object *tx_gem_obj;
++ struct msm_gem_address_space *aspace;
+
+ /* DSI v2 TX buffer */
+ void *tx_buf;
+@@ -1111,8 +1112,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
+ uint64_t iova;
+ u8 *data;
+
++ msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
++
+ data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
+- priv->kms->aspace,
++ msm_host->aspace,
+ &msm_host->tx_gem_obj, &iova);
+
+ if (IS_ERR(data)) {
+@@ -1141,10 +1144,10 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+ return 0;
+ }
+
+-static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
++void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
+ {
++ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct drm_device *dev = msm_host->dev;
+- struct msm_drm_private *priv;
+
+ /*
+ * This is possible if we're tearing down before we've had a chance to
+@@ -1155,11 +1158,11 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+ if (!dev)
+ return;
+
+- priv = dev->dev_private;
+ if (msm_host->tx_gem_obj) {
+- msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+- drm_gem_object_put(msm_host->tx_gem_obj);
++ msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
++ msm_gem_address_space_put(msm_host->aspace);
+ msm_host->tx_gem_obj = NULL;
++ msm_host->aspace = NULL;
+ }
+
+ if (msm_host->tx_buf)
+@@ -1945,7 +1948,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ DBG("");
+- dsi_tx_buf_free(msm_host);
+ if (msm_host->workqueue) {
+ destroy_workqueue(msm_host->workqueue);
+ msm_host->workqueue = NULL;
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+index 3b1ed02f644d2..89a6344bc8653 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+@@ -918,7 +918,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
+ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ if (phy->cphy_mode) {
+ vreg_ctrl_0 = 0x45;
+- vreg_ctrl_1 = 0x45;
++ vreg_ctrl_1 = 0x41;
+ glbl_rescode_top_ctrl = 0x00;
+ glbl_rescode_bot_ctrl = 0x00;
+ } else {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 0f3bd187ede67..280d1d9a559ba 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -318,8 +318,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+ continue;
+
+- if (pi < 0)
+- pi = i;
++ /* pick the last one as it will be smallest. */
++ pi = i;
++
+ /* Stop once the buffer is larger than the current page size. */
+ if (*size >= 1ULL << vmm->page[i].shift)
+ break;
+diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
+index abb0788843c60..503ecea72c5ea 100644
+--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
++++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
+@@ -267,6 +267,8 @@ static int versatile_panel_get_modes(struct drm_panel *panel,
+ connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
+
+ mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
++ if (!mode)
++ return -ENOMEM;
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+index c9087f474cbc5..29e63cdfb8954 100644
+--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
++++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+@@ -2049,6 +2049,7 @@ static const struct panel_desc auo_b101uan08_3_desc = {
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_b101uan08_3_init_cmd,
++ .lp11_before_reset = true,
+ };
+
+ static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
+@@ -2103,14 +2104,15 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = starry_qfh032011_53g_init_cmd,
++ .lp11_before_reset = true,
+ };
+
+ static const struct drm_display_mode starry_himax83102_j02_default_mode = {
+- .clock = 161600,
++ .clock = 162850,
+ .hdisplay = 1200,
+- .hsync_start = 1200 + 40,
+- .hsync_end = 1200 + 40 + 20,
+- .htotal = 1200 + 40 + 20 + 40,
++ .hsync_start = 1200 + 50,
++ .hsync_end = 1200 + 50 + 20,
++ .htotal = 1200 + 50 + 20 + 50,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 116,
+ .vsync_end = 1920 + 116 + 8,
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+index 9632b9e95b715..c4a804c5d6aac 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+@@ -1266,9 +1266,9 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
+ return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
+
+ pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
+- if (!pinfo->dsi[1]) {
++ if (IS_ERR(pinfo->dsi[1])) {
+ dev_err(dev, "cannot get secondary DSI device\n");
+- return -ENODEV;
++ return PTR_ERR(pinfo->dsi[1]);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index dd7928d9570f7..6e46e55d29a9a 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2326,13 +2326,13 @@ static const struct panel_desc innolux_g070y2_t02 = {
+ static const struct display_timing innolux_g101ice_l01_timing = {
+ .pixelclock = { 60400000, 71100000, 74700000 },
+ .hactive = { 1280, 1280, 1280 },
+- .hfront_porch = { 41, 80, 100 },
+- .hback_porch = { 40, 79, 99 },
+- .hsync_len = { 1, 1, 1 },
++ .hfront_porch = { 30, 60, 70 },
++ .hback_porch = { 30, 60, 70 },
++ .hsync_len = { 22, 40, 60 },
+ .vactive = { 800, 800, 800 },
+- .vfront_porch = { 5, 11, 14 },
+- .vback_porch = { 4, 11, 14 },
+- .vsync_len = { 1, 1, 1 },
++ .vfront_porch = { 3, 8, 14 },
++ .vback_porch = { 3, 8, 14 },
++ .vsync_len = { 4, 7, 12 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+ };
+
+@@ -2349,6 +2349,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
+ .disable = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+index 6a39456395350..7bb723d445ade 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+@@ -506,29 +506,30 @@ static int st7703_prepare(struct drm_panel *panel)
+ return 0;
+
+ dev_dbg(ctx->dev, "Resetting the panel\n");
+- ret = regulator_enable(ctx->vcc);
++ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
++
++ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+- dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
++ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ return ret;
+ }
+- ret = regulator_enable(ctx->iovcc);
++
++ ret = regulator_enable(ctx->vcc);
+ if (ret < 0) {
+- dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+- goto disable_vcc;
++ dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
++ regulator_disable(ctx->iovcc);
++ return ret;
+ }
+
+- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+- usleep_range(20, 40);
++ /* Give power supplies time to stabilize before deasserting reset. */
++ usleep_range(10000, 20000);
++
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+- msleep(20);
++ usleep_range(15000, 20000);
+
+ ctx->prepared = true;
+
+ return 0;
+-
+-disable_vcc:
+- regulator_disable(ctx->vcc);
+- return ret;
+ }
+
+ static const u32 mantix_bus_formats[] = {
+diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+index 845304435e235..f6a212e542cb9 100644
+--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
++++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+@@ -379,6 +379,8 @@ static int tpg110_get_modes(struct drm_panel *panel,
+ connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
+
+ mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
++ if (!mode)
++ return -ENOMEM;
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
+index ba3b5b5f0cdfe..02e6b74d50166 100644
+--- a/drivers/gpu/drm/pl111/pl111_drv.c
++++ b/drivers/gpu/drm/pl111/pl111_drv.c
+@@ -323,12 +323,18 @@ static void pl111_amba_remove(struct amba_device *amba_dev)
+ struct pl111_drm_dev_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
++ drm_atomic_helper_shutdown(drm);
+ if (priv->panel)
+ drm_panel_bridge_remove(priv->bridge);
+ drm_dev_put(drm);
+ of_reserved_mem_device_release(dev);
+ }
+
++static void pl111_amba_shutdown(struct amba_device *amba_dev)
++{
++ drm_atomic_helper_shutdown(amba_get_drvdata(amba_dev));
++}
++
+ /*
+ * This early variant lacks the 565 and 444 pixel formats.
+ */
+@@ -431,6 +437,7 @@ static struct amba_driver pl111_amba_driver __maybe_unused = {
+ },
+ .probe = pl111_amba_probe,
+ .remove = pl111_amba_remove,
++ .shutdown = pl111_amba_shutdown,
+ .id_table = pl111_id_table,
+ };
+
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 6492a70e3c396..404b0483bb7cb 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -1229,6 +1229,9 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev)
+ if (!qdev->monitors_config_bo)
+ return 0;
+
++ kfree(qdev->dumb_heads);
++ qdev->dumb_heads = NULL;
++
+ qdev->monitors_config = NULL;
+ qdev->ram_header->monitors_config = 0;
+
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 4f06356d9ce2e..f0ae087be914e 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4821,14 +4821,15 @@ restart_ih:
+ break;
+ case 44: /* hdmi */
+ afmt_idx = src_data;
+- if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
+- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+-
+ if (afmt_idx > 5) {
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ src_id, src_data);
+ break;
+ }
++
++ if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
+ afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 8afb03bbce298..3d3d2109dfebc 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -2215,10 +2215,6 @@ int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+-int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index d2f02c3dfce29..b84b58926106a 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1119,6 +1119,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
+ else {
+ /* only 800x600 is supported right now on pre-avivo chips */
+ tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
++ if (!tv_mode)
++ return 0;
+ tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, tv_mode);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
+index fa531493b1113..7bf08164140ef 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -555,8 +555,6 @@ static const struct drm_ioctl_desc radeon_ioctls_kms[] = {
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+- DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
+- DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
+index 358d19242f4ba..3fec3acdaf284 100644
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -311,22 +311,6 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+ return 0;
+ }
+
+-int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp)
+-{
+- /* TODO: implement */
+- DRM_ERROR("unimplemented %s\n", __func__);
+- return -EOPNOTSUPP;
+-}
+-
+-int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp)
+-{
+- /* TODO: implement */
+- DRM_ERROR("unimplemented %s\n", __func__);
+- return -EOPNOTSUPP;
+-}
+-
+ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+ {
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index a29fbafce3936..3793863c210eb 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -1177,6 +1177,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
+ struct cdn_dp_device *dp;
+ struct extcon_dev *extcon;
+ struct phy *phy;
++ int ret;
+ int i;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+@@ -1217,9 +1218,19 @@ static int cdn_dp_probe(struct platform_device *pdev)
+ mutex_init(&dp->lock);
+ dev_set_drvdata(dev, dp);
+
+- cdn_dp_audio_codec_init(dp, dev);
++ ret = cdn_dp_audio_codec_init(dp, dev);
++ if (ret)
++ return ret;
++
++ ret = component_add(dev, &cdn_dp_component_ops);
++ if (ret)
++ goto err_audio_deinit;
+
+- return component_add(dev, &cdn_dp_component_ops);
++ return 0;
++
++err_audio_deinit:
++ platform_device_unregister(dp->audio_pdev);
++ return ret;
+ }
+
+ static void cdn_dp_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+index b8f8b45ebf594..93ed841f5dcea 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+@@ -40,7 +40,7 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
+
+ ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
+ prot);
+- if (ret < rk_obj->base.size) {
++ if (ret < (ssize_t)rk_obj->base.size) {
+ DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
+ ret, rk_obj->base.size);
+ ret = -ENOMEM;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index 14320bc73e5bf..4b338cb89d32d 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -247,14 +247,22 @@ static inline void vop_cfg_done(struct vop *vop)
+ VOP_REG_SET(vop, common, cfg_done, 1);
+ }
+
+-static bool has_rb_swapped(uint32_t format)
++static bool has_rb_swapped(uint32_t version, uint32_t format)
+ {
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+- case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_BGR565:
+ return true;
++ /*
++ * full framework (IP version 3.x) only need rb swapped for RGB888 and
++ * little framework (IP version 2.x) only need rb swapped for BGR888,
++ * check for 3.x to also only rb swap BGR888 for unknown vop version
++ */
++ case DRM_FORMAT_RGB888:
++ return VOP_MAJOR(version) == 3;
++ case DRM_FORMAT_BGR888:
++ return VOP_MAJOR(version) != 3;
+ default:
+ return false;
+ }
+@@ -1013,7 +1021,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
+ VOP_WIN_SET(vop, win, dsp_info, dsp_info);
+ VOP_WIN_SET(vop, win, dsp_st, dsp_st);
+
+- rb_swap = has_rb_swapped(fb->format->format);
++ rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
+ VOP_WIN_SET(vop, win, rb_swap, rb_swap);
+
+ /*
+@@ -1614,7 +1622,8 @@ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+- rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
++ rockchip_state = kmemdup(to_rockchip_crtc_state(crtc->state),
++ sizeof(*rockchip_state), GFP_KERNEL);
+ if (!rockchip_state)
+ return NULL;
+
+@@ -1639,7 +1648,10 @@ static void vop_crtc_reset(struct drm_crtc *crtc)
+ if (crtc->state)
+ vop_crtc_destroy_state(crtc, crtc->state);
+
+- __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
++ if (crtc_state)
++ __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
++ else
++ __drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
+
+ #ifdef CONFIG_DRM_ANALOGIX_DP
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 583df4d22f7e9..c306806aa3dea 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -2079,30 +2079,15 @@ static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = {
+ .atomic_disable = vop2_crtc_atomic_disable,
+ };
+
+-static void vop2_crtc_reset(struct drm_crtc *crtc)
+-{
+- struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+-
+- if (crtc->state) {
+- __drm_atomic_helper_crtc_destroy_state(crtc->state);
+- kfree(vcstate);
+- }
+-
+- vcstate = kzalloc(sizeof(*vcstate), GFP_KERNEL);
+- if (!vcstate)
+- return;
+-
+- crtc->state = &vcstate->base;
+- crtc->state->crtc = crtc;
+-}
+-
+ static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc)
+ {
+- struct rockchip_crtc_state *vcstate, *old_vcstate;
++ struct rockchip_crtc_state *vcstate;
+
+- old_vcstate = to_rockchip_crtc_state(crtc->state);
++ if (WARN_ON(!crtc->state))
++ return NULL;
+
+- vcstate = kmemdup(old_vcstate, sizeof(*old_vcstate), GFP_KERNEL);
++ vcstate = kmemdup(to_rockchip_crtc_state(crtc->state),
++ sizeof(*vcstate), GFP_KERNEL);
+ if (!vcstate)
+ return NULL;
+
+@@ -2120,6 +2105,20 @@ static void vop2_crtc_destroy_state(struct drm_crtc *crtc,
+ kfree(vcstate);
+ }
+
++static void vop2_crtc_reset(struct drm_crtc *crtc)
++{
++ struct rockchip_crtc_state *vcstate =
++ kzalloc(sizeof(*vcstate), GFP_KERNEL);
++
++ if (crtc->state)
++ vop2_crtc_destroy_state(crtc, crtc->state);
++
++ if (vcstate)
++ __drm_atomic_helper_crtc_reset(crtc, &vcstate->base);
++ else
++ __drm_atomic_helper_crtc_reset(crtc, NULL);
++}
++
+ static const struct drm_crtc_funcs vop2_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index 5a80b228d18ca..78272b1f9d5b1 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -553,14 +553,45 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x,
+ struct ssd130x_plane_state *ssd130x_state)
+ {
+- struct drm_rect fullscreen = {
+- .x1 = 0,
+- .x2 = ssd130x->width,
+- .y1 = 0,
+- .y2 = ssd130x->height,
+- };
+-
+- ssd130x_update_rect(ssd130x, ssd130x_state, &fullscreen);
++ unsigned int page_height = ssd130x->device_info->page_height;
++ unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
++ u8 *data_array = ssd130x_state->data_array;
++ unsigned int width = ssd130x->width;
++ int ret, i;
++
++ if (!ssd130x->page_address_mode) {
++ memset(data_array, 0, width * pages);
++
++ /* Set address range for horizontal addressing mode */
++ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset, width);
++ if (ret < 0)
++ return;
++
++ ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset, pages);
++ if (ret < 0)
++ return;
++
++ /* Write out update in one go if we aren't using page addressing mode */
++ ssd130x_write_data(ssd130x, data_array, width * pages);
++ } else {
++ /*
++ * In page addressing mode, the start address needs to be reset,
++ * and each page then needs to be written out separately.
++ */
++ memset(data_array, 0, width);
++
++ for (i = 0; i < pages; i++) {
++ ret = ssd130x_set_page_pos(ssd130x,
++ ssd130x->page_offset + i,
++ ssd130x->col_offset);
++ if (ret < 0)
++ return;
++
++ ret = ssd130x_write_data(ssd130x, data_array, width);
++ if (ret < 0)
++ return;
++ }
++ }
+ }
+
+ static int ssd130x_fb_blit_rect(struct drm_plane_state *state,
+diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
+index c68c831136c9b..e8523abef27a5 100644
+--- a/drivers/gpu/drm/stm/drv.c
++++ b/drivers/gpu/drm/stm/drv.c
+@@ -114,6 +114,7 @@ static void drv_unload(struct drm_device *ddev)
+ DRM_DEBUG("%s\n", __func__);
+
+ drm_kms_helper_poll_fini(ddev);
++ drm_atomic_helper_shutdown(ddev);
+ ltdc_unload(ddev);
+ }
+
+@@ -225,6 +226,11 @@ static void stm_drm_platform_remove(struct platform_device *pdev)
+ drm_dev_put(ddev);
+ }
+
++static void stm_drm_platform_shutdown(struct platform_device *pdev)
++{
++ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
++}
++
+ static const struct of_device_id drv_dt_ids[] = {
+ { .compatible = "st,stm32-ltdc"},
+ { /* end node */ },
+@@ -234,6 +240,7 @@ MODULE_DEVICE_TABLE(of, drv_dt_ids);
+ static struct platform_driver stm_drm_platform_driver = {
+ .probe = stm_drm_platform_probe,
+ .remove_new = stm_drm_platform_remove,
++ .shutdown = stm_drm_platform_shutdown,
+ .driver = {
+ .name = "stm32-display",
+ .of_match_table = drv_dt_ids,
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index fe56beea3e93f..8ebd7134ee21b 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -175,6 +175,7 @@ static void tilcdc_fini(struct drm_device *dev)
+ drm_dev_unregister(dev);
+
+ drm_kms_helper_poll_fini(dev);
++ drm_atomic_helper_shutdown(dev);
+ tilcdc_irq_uninstall(dev);
+ drm_mode_config_cleanup(dev);
+
+@@ -389,6 +390,7 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
+
+ init_failed:
+ tilcdc_fini(ddev);
++ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+ }
+@@ -537,7 +539,8 @@ static void tilcdc_unbind(struct device *dev)
+ if (!ddev->dev_private)
+ return;
+
+- tilcdc_fini(dev_get_drvdata(dev));
++ tilcdc_fini(ddev);
++ dev_set_drvdata(dev, NULL);
+ }
+
+ static const struct component_master_ops tilcdc_comp_ops = {
+@@ -582,6 +585,11 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
+ return 0;
+ }
+
++static void tilcdc_pdev_shutdown(struct platform_device *pdev)
++{
++ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
++}
++
+ static const struct of_device_id tilcdc_of_match[] = {
+ { .compatible = "ti,am33xx-tilcdc", },
+ { .compatible = "ti,da850-tilcdc", },
+@@ -592,6 +600,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match);
+ static struct platform_driver tilcdc_platform_driver = {
+ .probe = tilcdc_pdev_probe,
+ .remove = tilcdc_pdev_remove,
++ .shutdown = tilcdc_pdev_shutdown,
+ .driver = {
+ .name = "tilcdc",
+ .pm = pm_sleep_ptr(&tilcdc_pm_ops),
+diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
+index 0bb56d0635366..acce210e25547 100644
+--- a/drivers/gpu/drm/tve200/tve200_drv.c
++++ b/drivers/gpu/drm/tve200/tve200_drv.c
+@@ -242,6 +242,7 @@ static void tve200_remove(struct platform_device *pdev)
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
++ drm_atomic_helper_shutdown(drm);
+ if (priv->panel)
+ drm_panel_bridge_remove(priv->bridge);
+ drm_mode_config_cleanup(drm);
+@@ -249,6 +250,11 @@ static void tve200_remove(struct platform_device *pdev)
+ drm_dev_put(drm);
+ }
+
++static void tve200_shutdown(struct platform_device *pdev)
++{
++ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
++}
++
+ static const struct of_device_id tve200_of_match[] = {
+ {
+ .compatible = "faraday,tve200",
+@@ -263,6 +269,7 @@ static struct platform_driver tve200_driver = {
+ },
+ .probe = tve200_probe,
+ .remove_new = tve200_remove,
++ .shutdown = tve200_shutdown,
+ };
+ drm_module_platform_driver(tve200_driver);
+
+diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
+index 4fee15c97c341..047b958123341 100644
+--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
++++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
+@@ -12,6 +12,7 @@
+ #include <linux/vt_kern.h>
+
+ #include <drm/drm_aperture.h>
++#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_generic.h>
+ #include <drm/drm_file.h>
+@@ -97,11 +98,19 @@ static void vbox_pci_remove(struct pci_dev *pdev)
+ struct vbox_private *vbox = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(&vbox->ddev);
++ drm_atomic_helper_shutdown(&vbox->ddev);
+ vbox_irq_fini(vbox);
+ vbox_mode_fini(vbox);
+ vbox_hw_fini(vbox);
+ }
+
++static void vbox_pci_shutdown(struct pci_dev *pdev)
++{
++ struct vbox_private *vbox = pci_get_drvdata(pdev);
++
++ drm_atomic_helper_shutdown(&vbox->ddev);
++}
++
+ static int vbox_pm_suspend(struct device *dev)
+ {
+ struct vbox_private *vbox = dev_get_drvdata(dev);
+@@ -165,6 +174,7 @@ static struct pci_driver vbox_pci_driver = {
+ .id_table = pciidlist,
+ .probe = vbox_pci_probe,
+ .remove = vbox_pci_remove,
++ .shutdown = vbox_pci_shutdown,
+ .driver.pm = pm_sleep_ptr(&vbox_pm_ops),
+ };
+
+diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
+index 5d12d7beef0eb..ade3309ae042f 100644
+--- a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
++++ b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
+@@ -26,7 +26,7 @@ struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test,
+ struct vc4_crtc *vc4_crtc;
+ int ret;
+
+- dummy_crtc = kunit_kzalloc(test, sizeof(*dummy_crtc), GFP_KERNEL);
++ dummy_crtc = drmm_kzalloc(drm, sizeof(*dummy_crtc), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, dummy_crtc);
+
+ vc4_crtc = &dummy_crtc->crtc;
+diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+index 6e11fcc9ef45e..e70d7c3076acf 100644
+--- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
++++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+@@ -32,7 +32,7 @@ struct vc4_dummy_output *vc4_dummy_output(struct kunit *test,
+ struct drm_encoder *enc;
+ int ret;
+
+- dummy_output = kunit_kzalloc(test, sizeof(*dummy_output), GFP_KERNEL);
++ dummy_output = drmm_kzalloc(drm, sizeof(*dummy_output), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output);
+ dummy_output->encoder.type = vc4_encoder_type;
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 3829be282ff00..17463aeeef28f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -774,9 +774,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ sizeof(metadata->mip_levels));
+ metadata->num_sizes = num_sizes;
+ metadata->sizes =
+- memdup_user((struct drm_vmw_size __user *)(unsigned long)
++ memdup_array_user((struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr,
+- sizeof(*metadata->sizes) * metadata->num_sizes);
++ metadata->num_sizes, sizeof(*metadata->sizes));
+ if (IS_ERR(metadata->sizes)) {
+ ret = PTR_ERR(metadata->sizes);
+ goto out_no_sizes;
+diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
+index a3f336edd991b..955c971c528d4 100644
+--- a/drivers/gpu/host1x/context.c
++++ b/drivers/gpu/host1x/context.c
+@@ -34,10 +34,10 @@ int host1x_memory_context_list_init(struct host1x *host1x)
+ if (err < 0)
+ return 0;
+
+- cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
++ cdl->len = err / 4;
++ cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL);
+ if (!cdl->devs)
+ return -ENOMEM;
+- cdl->len = err / 4;
+
+ for (i = 0; i < cdl->len; i++) {
+ ctx = &cdl->devs[i];
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 8992e3c1e7698..e0181218ad857 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -702,15 +702,22 @@ static void hid_close_report(struct hid_device *device)
+ * Free a device structure, all reports, and all fields.
+ */
+
+-static void hid_device_release(struct device *dev)
++void hiddev_free(struct kref *ref)
+ {
+- struct hid_device *hid = to_hid_device(dev);
++ struct hid_device *hid = container_of(ref, struct hid_device, ref);
+
+ hid_close_report(hid);
+ kfree(hid->dev_rdesc);
+ kfree(hid);
+ }
+
++static void hid_device_release(struct device *dev)
++{
++ struct hid_device *hid = to_hid_device(dev);
++
++ kref_put(&hid->ref, hiddev_free);
++}
++
+ /*
+ * Fetch a report description item from the data stream. We support long
+ * items, though they are not used yet.
+@@ -2846,6 +2853,7 @@ struct hid_device *hid_allocate_device(void)
+ spin_lock_init(&hdev->debug_list_lock);
+ sema_init(&hdev->driver_input_lock, 1);
+ mutex_init(&hdev->ll_open_lock);
++ kref_init(&hdev->ref);
+
+ hid_bpf_device_init(hdev);
+
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 54c33a24f8442..20a0d1315d90f 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -1151,8 +1151,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+- INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+-
+ if (!dev->gpio_poll) {
+ dev->gpio_poll = true;
+ schedule_delayed_work(&dev->gpio_poll_worker, 0);
+@@ -1168,7 +1166,11 @@ static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ cp2112_gpio_irq_mask(d);
+- cancel_delayed_work_sync(&dev->gpio_poll_worker);
++
++ if (!dev->irq_mask) {
++ dev->gpio_poll = false;
++ cancel_delayed_work_sync(&dev->gpio_poll_worker);
++ }
+ }
+
+ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+@@ -1307,6 +1309,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
++ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
++
+ ret = gpiochip_add_data(&dev->gc, dev);
+ if (ret < 0) {
+ hid_err(hdev, "error registering gpio chip\n");
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index e7ef1ea107c9e..7dd83ec74f8a9 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -1135,6 +1135,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
+ goto out;
+ }
+ list->hdev = (struct hid_device *) inode->i_private;
++ kref_get(&list->hdev->ref);
+ file->private_data = list;
+ mutex_init(&list->read_mutex);
+
+@@ -1227,6 +1228,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
+ list_del(&list->node);
+ spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
+ kfifo_free(&list->hid_debug_fifo);
++
++ kref_put(&list->hdev->ref, hiddev_free);
+ kfree(list);
+
+ return 0;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index e4d2dfd5d2536..d10ccfa17e168 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -366,6 +366,7 @@
+
+ #define USB_VENDOR_ID_DELL 0x413c
+ #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
++#define USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W 0x4503
+
+ #define USB_VENDOR_ID_DELORME 0x1163
+ #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
+@@ -868,7 +869,6 @@
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
+-#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc547
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
+ #define USB_DEVICE_ID_SPACETRAVELLER 0xc623
+ #define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index 44763c0da4441..7c1b33be9d134 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -51,7 +51,12 @@ struct lenovo_drvdata {
+ int select_right;
+ int sensitivity;
+ int press_speed;
+- u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */
++ /* 0: Up
++ * 1: Down (undecided)
++ * 2: Scrolling
++ * 3: Patched firmware, disable workaround
++ */
++ u8 middlebutton_state;
+ bool fn_lock;
+ };
+
+@@ -521,6 +526,19 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
+ int ret;
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+
++ /*
++ * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
++ * regular keys
++ */
++ ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
++ if (ret)
++ hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
++
++ /* Switch middle button to native mode */
++ ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
++ if (ret)
++ hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
++
+ ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
+ if (ret)
+ hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
+@@ -668,31 +686,48 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
+ {
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+
+- /* "wheel" scroll events */
+- if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
+- usage->code == REL_HWHEEL)) {
+- /* Scroll events disable middle-click event */
+- cptkbd_data->middlebutton_state = 2;
+- return 0;
+- }
++ if (cptkbd_data->middlebutton_state != 3) {
++ /* REL_X and REL_Y events during middle button pressed
++ * are only possible on patched, bug-free firmware
++ * so set middlebutton_state to 3
++ * to never apply workaround anymore
++ */
++ if (cptkbd_data->middlebutton_state == 1 &&
++ usage->type == EV_REL &&
++ (usage->code == REL_X || usage->code == REL_Y)) {
++ cptkbd_data->middlebutton_state = 3;
++ /* send middle button press which was hold before */
++ input_event(field->hidinput->input,
++ EV_KEY, BTN_MIDDLE, 1);
++ input_sync(field->hidinput->input);
++ }
++
++ /* "wheel" scroll events */
++ if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
++ usage->code == REL_HWHEEL)) {
++ /* Scroll events disable middle-click event */
++ cptkbd_data->middlebutton_state = 2;
++ return 0;
++ }
+
+- /* Middle click events */
+- if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
+- if (value == 1) {
+- cptkbd_data->middlebutton_state = 1;
+- } else if (value == 0) {
+- if (cptkbd_data->middlebutton_state == 1) {
+- /* No scrolling inbetween, send middle-click */
+- input_event(field->hidinput->input,
+- EV_KEY, BTN_MIDDLE, 1);
+- input_sync(field->hidinput->input);
+- input_event(field->hidinput->input,
+- EV_KEY, BTN_MIDDLE, 0);
+- input_sync(field->hidinput->input);
++ /* Middle click events */
++ if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
++ if (value == 1) {
++ cptkbd_data->middlebutton_state = 1;
++ } else if (value == 0) {
++ if (cptkbd_data->middlebutton_state == 1) {
++ /* No scrolling inbetween, send middle-click */
++ input_event(field->hidinput->input,
++ EV_KEY, BTN_MIDDLE, 1);
++ input_sync(field->hidinput->input);
++ input_event(field->hidinput->input,
++ EV_KEY, BTN_MIDDLE, 0);
++ input_sync(field->hidinput->input);
++ }
++ cptkbd_data->middlebutton_state = 0;
+ }
+- cptkbd_data->middlebutton_state = 0;
++ return 1;
+ }
+- return 1;
+ }
+
+ if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) {
+@@ -1126,22 +1161,6 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
+ }
+ hid_set_drvdata(hdev, cptkbd_data);
+
+- /*
+- * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
+- * regular keys (Compact only)
+- */
+- if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD ||
+- hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) {
+- ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
+- if (ret)
+- hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
+- }
+-
+- /* Switch middle button to native mode */
+- ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
+- if (ret)
+- hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
+-
+ /* Set keyboard settings to known state */
+ cptkbd_data->middlebutton_state = 0;
+ cptkbd_data->fn_lock = true;
+@@ -1264,6 +1283,24 @@ err:
+ return ret;
+ }
+
++#ifdef CONFIG_PM
++static int lenovo_reset_resume(struct hid_device *hdev)
++{
++ switch (hdev->product) {
++ case USB_DEVICE_ID_LENOVO_CUSBKBD:
++ case USB_DEVICE_ID_LENOVO_TPIIUSBKBD:
++ if (hdev->type == HID_TYPE_USBMOUSE)
++ lenovo_features_set_cptkbd(hdev);
++
++ break;
++ default:
++ break;
++ }
++
++ return 0;
++}
++#endif
++
+ static void lenovo_remove_tpkbd(struct hid_device *hdev)
+ {
+ struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
+@@ -1380,6 +1417,9 @@ static struct hid_driver lenovo_driver = {
+ .raw_event = lenovo_raw_event,
+ .event = lenovo_event,
+ .report_fixup = lenovo_report_fixup,
++#ifdef CONFIG_PM
++ .reset_resume = lenovo_reset_resume,
++#endif
+ };
+ module_hid_driver(lenovo_driver);
+
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 8afe3be683ba2..e6a8b6d8eab70 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -1695,12 +1695,11 @@ static int logi_dj_raw_event(struct hid_device *hdev,
+ }
+ /*
+ * Mouse-only receivers send unnumbered mouse data. The 27 MHz
+- * receiver uses 6 byte packets, the nano receiver 8 bytes,
+- * the lightspeed receiver (Pro X Superlight) 13 bytes.
++ * receiver uses 6 byte packets, the nano receiver 8 bytes.
+ */
+ if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
+- size <= 13){
+- u8 mouse_report[14];
++ size <= 8) {
++ u8 mouse_report[9];
+
+ /* Prepend report id */
+ mouse_report[0] = REPORT_TYPE_MOUSE;
+@@ -1984,10 +1983,6 @@ static const struct hid_device_id logi_dj_receivers[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
+ .driver_data = recvr_type_gaming_hidpp},
+- { /* Logitech lightspeed receiver (0xc547) */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+- USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
+- .driver_data = recvr_type_gaming_hidpp},
+
+ { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index a209d51bd2476..7bf12ca0eb4a9 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -1835,15 +1835,14 @@ static int hidpp_battery_get_property(struct power_supply *psy,
+ /* -------------------------------------------------------------------------- */
+ #define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b
+
+-static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
++static int hidpp_get_wireless_feature_index(struct hidpp_device *hidpp, u8 *feature_index)
+ {
+ u8 feature_type;
+ int ret;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+- &hidpp->wireless_feature_index,
+- &feature_type);
++ feature_index, &feature_type);
+
+ return ret;
+ }
+@@ -4249,6 +4248,13 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
+ }
+ }
+
++ if (hidpp->protocol_major >= 2) {
++ u8 feature_index;
++
++ if (!hidpp_get_wireless_feature_index(hidpp, &feature_index))
++ hidpp->wireless_feature_index = feature_index;
++ }
++
+ if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) {
+ name = hidpp_get_device_name(hidpp);
+ if (name) {
+@@ -4394,7 +4400,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ bool connected;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ struct hidpp_ff_private_data data;
+- bool will_restart = false;
+
+ /* report_fixup needs drvdata to be set before we call hid_parse */
+ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
+@@ -4445,10 +4450,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ return ret;
+ }
+
+- if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT ||
+- hidpp->quirks & HIDPP_QUIRK_UNIFYING)
+- will_restart = true;
+-
+ INIT_WORK(&hidpp->work, delayed_work_cb);
+ mutex_init(&hidpp->send_mutex);
+ init_waitqueue_head(&hidpp->wait);
+@@ -4460,10 +4461,12 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ hdev->name);
+
+ /*
+- * Plain USB connections need to actually call start and open
+- * on the transport driver to allow incoming data.
++ * First call hid_hw_start(hdev, 0) to allow IO without connecting any
++ * hid subdrivers (hid-input, hidraw). This allows retrieving the dev's
++ * name and serial number and store these in hdev->name and hdev->uniq,
++ * before the hid-input and hidraw drivers expose these to userspace.
+ */
+- ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask);
++ ret = hid_hw_start(hdev, 0);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ goto hid_hw_start_fail;
+@@ -4496,15 +4499,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ hidpp_overwrite_name(hdev);
+ }
+
+- if (connected && hidpp->protocol_major >= 2) {
+- ret = hidpp_set_wireless_feature_index(hidpp);
+- if (ret == -ENOENT)
+- hidpp->wireless_feature_index = 0;
+- else if (ret)
+- goto hid_hw_init_fail;
+- ret = 0;
+- }
+-
+ if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
+ ret = wtp_get_config(hidpp);
+ if (ret)
+@@ -4518,21 +4512,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ schedule_work(&hidpp->work);
+ flush_work(&hidpp->work);
+
+- if (will_restart) {
+- /* Reset the HID node state */
+- hid_device_io_stop(hdev);
+- hid_hw_close(hdev);
+- hid_hw_stop(hdev);
+-
+- if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
+- connect_mask &= ~HID_CONNECT_HIDINPUT;
++ if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
++ connect_mask &= ~HID_CONNECT_HIDINPUT;
+
+- /* Now export the actual inputs and hidraw nodes to the world */
+- ret = hid_hw_start(hdev, connect_mask);
+- if (ret) {
+- hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+- goto hid_hw_start_fail;
+- }
++ /* Now export the actual inputs and hidraw nodes to the world */
++ ret = hid_connect(hdev, connect_mask);
++ if (ret) {
++ hid_err(hdev, "%s:hid_connect returned error %d\n", __func__, ret);
++ goto hid_hw_init_fail;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+@@ -4543,6 +4530,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ ret);
+ }
+
++ /*
++ * This relies on logi_dj_ll_close() being a no-op so that DJ connection
++ * events will still be received.
++ */
++ hid_hw_close(hdev);
+ return ret;
+
+ hid_hw_init_fail:
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 3983b4f282f8f..5a48fcaa32f00 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -66,6 +66,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/hid/hid-uclogic-core-test.c b/drivers/hid/hid-uclogic-core-test.c
+index 2bb916226a389..cb274cde3ad23 100644
+--- a/drivers/hid/hid-uclogic-core-test.c
++++ b/drivers/hid/hid-uclogic-core-test.c
+@@ -56,6 +56,11 @@ static struct uclogic_raw_event_hook_test test_events[] = {
+ },
+ };
+
++static void fake_work(struct work_struct *work)
++{
++
++}
++
+ static void hid_test_uclogic_exec_event_hook_test(struct kunit *test)
+ {
+ struct uclogic_params p = {0, };
+@@ -77,6 +82,8 @@ static void hid_test_uclogic_exec_event_hook_test(struct kunit *test)
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filter->event);
+ memcpy(filter->event, &hook_events[n].event[0], filter->size);
+
++ INIT_WORK(&filter->work, fake_work);
++
+ list_add_tail(&filter->list, &p.event_hooks->list);
+ }
+
+diff --git a/drivers/hid/hid-uclogic-params-test.c b/drivers/hid/hid-uclogic-params-test.c
+index 678f50cbb160b..a30121419a292 100644
+--- a/drivers/hid/hid-uclogic-params-test.c
++++ b/drivers/hid/hid-uclogic-params-test.c
+@@ -174,12 +174,26 @@ static void hid_test_uclogic_parse_ugee_v2_desc(struct kunit *test)
+ KUNIT_EXPECT_EQ(test, params->frame_type, frame_type);
+ }
+
++struct fake_device {
++ unsigned long quirks;
++};
++
+ static void hid_test_uclogic_params_cleanup_event_hooks(struct kunit *test)
+ {
+ int res, n;
++ struct hid_device *hdev;
++ struct fake_device *fake_dev;
+ struct uclogic_params p = {0, };
+
+- res = uclogic_params_ugee_v2_init_event_hooks(NULL, &p);
++ hdev = kunit_kzalloc(test, sizeof(struct hid_device), GFP_KERNEL);
++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hdev);
++
++ fake_dev = kunit_kzalloc(test, sizeof(struct fake_device), GFP_KERNEL);
++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fake_dev);
++
++ hid_set_drvdata(hdev, fake_dev);
++
++ res = uclogic_params_ugee_v2_init_event_hooks(hdev, &p);
+ KUNIT_ASSERT_EQ(test, res, 0);
+
+ /* Check that the function can be called repeatedly */
+diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
+index ba37a5efbf820..ab2edff018eb6 100644
+--- a/drivers/hte/hte-tegra194-test.c
++++ b/drivers/hte/hte-tegra194-test.c
+@@ -153,8 +153,10 @@ static int tegra_hte_test_probe(struct platform_device *pdev)
+ }
+
+ cnt = of_hte_req_count(hte.pdev);
+- if (cnt < 0)
++ if (cnt < 0) {
++ ret = cnt;
+ goto free_irq;
++ }
+
+ dev_info(&pdev->dev, "Total requested lines:%d\n", cnt);
+
+diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
+index 5fd136baf1cd3..19b9bf3d75ef9 100644
+--- a/drivers/hwmon/axi-fan-control.c
++++ b/drivers/hwmon/axi-fan-control.c
+@@ -496,6 +496,21 @@ static int axi_fan_control_probe(struct platform_device *pdev)
+ return -ENODEV;
+ }
+
++ ret = axi_fan_control_init(ctl, pdev->dev.of_node);
++ if (ret) {
++ dev_err(&pdev->dev, "Failed to initialize device\n");
++ return ret;
++ }
++
++ ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
++ name,
++ ctl,
++ &axi_chip_info,
++ axi_fan_control_groups);
++
++ if (IS_ERR(ctl->hdev))
++ return PTR_ERR(ctl->hdev);
++
+ ctl->irq = platform_get_irq(pdev, 0);
+ if (ctl->irq < 0)
+ return ctl->irq;
+@@ -509,19 +524,7 @@ static int axi_fan_control_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- ret = axi_fan_control_init(ctl, pdev->dev.of_node);
+- if (ret) {
+- dev_err(&pdev->dev, "Failed to initialize device\n");
+- return ret;
+- }
+-
+- ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
+- name,
+- ctl,
+- &axi_chip_info,
+- axi_fan_control_groups);
+-
+- return PTR_ERR_OR_ZERO(ctl->hdev);
++ return 0;
+ }
+
+ static struct platform_driver axi_fan_control_driver = {
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index eba94f68585a8..ba82d1e79c131 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -42,7 +42,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+ #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
+ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
+ #define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
+-#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
++#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
+ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
+ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+ #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index b5b81bd83bb15..d928eb8ae5a37 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -1614,17 +1614,21 @@ struct nct6775_data *nct6775_update_device(struct device *dev)
+ data->fan_div[i]);
+
+ if (data->has_fan_min & BIT(i)) {
+- err = nct6775_read_value(data, data->REG_FAN_MIN[i], &reg);
++ u16 tmp;
++
++ err = nct6775_read_value(data, data->REG_FAN_MIN[i], &tmp);
+ if (err)
+ goto out;
+- data->fan_min[i] = reg;
++ data->fan_min[i] = tmp;
+ }
+
+ if (data->REG_FAN_PULSES[i]) {
+- err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &reg);
++ u16 tmp;
++
++ err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &tmp);
+ if (err)
+ goto out;
+- data->fan_pulses[i] = (reg >> data->FAN_PULSE_SHIFT[i]) & 0x03;
++ data->fan_pulses[i] = (tmp >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+ }
+
+ err = nct6775_select_fan_div(dev, data, i, reg);
+diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
+index 26ba506331007..b9bb469e2d8fe 100644
+--- a/drivers/hwmon/pmbus/mp2975.c
++++ b/drivers/hwmon/pmbus/mp2975.c
+@@ -297,6 +297,11 @@ static int mp2973_read_word_data(struct i2c_client *client, int page,
+ int ret;
+
+ switch (reg) {
++ case PMBUS_STATUS_WORD:
++ /* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */
++ ret = pmbus_read_word_data(client, page, phase, reg);
++ ret ^= PB_STATUS_POWER_GOOD_N;
++ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase, reg,
+ GENMASK(7, 0));
+@@ -380,11 +385,6 @@ static int mp2975_read_word_data(struct i2c_client *client, int page,
+ int ret;
+
+ switch (reg) {
+- case PMBUS_STATUS_WORD:
+- /* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */
+- ret = pmbus_read_word_data(client, page, phase, reg);
+- ret ^= PB_STATUS_POWER_GOOD_N;
+- break;
+ case PMBUS_OT_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase, reg,
+ GENMASK(7, 0));
+diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
+index 1bbda3b05532e..bf408e35e2c32 100644
+--- a/drivers/hwmon/sch5627.c
++++ b/drivers/hwmon/sch5627.c
+@@ -6,6 +6,7 @@
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <linux/bits.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/init.h>
+@@ -32,6 +33,10 @@
+ #define SCH5627_REG_PRIMARY_ID 0x3f
+ #define SCH5627_REG_CTRL 0x40
+
++#define SCH5627_CTRL_START BIT(0)
++#define SCH5627_CTRL_LOCK BIT(1)
++#define SCH5627_CTRL_VBAT BIT(4)
++
+ #define SCH5627_NO_TEMPS 8
+ #define SCH5627_NO_FANS 4
+ #define SCH5627_NO_IN 5
+@@ -147,7 +152,8 @@ static int sch5627_update_in(struct sch5627_data *data)
+
+ /* Trigger a Vbat voltage measurement every 5 minutes */
+ if (time_after(jiffies, data->last_battery + 300 * HZ)) {
+- sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | 0x10);
++ sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
++ data->control | SCH5627_CTRL_VBAT);
+ data->last_battery = jiffies;
+ }
+
+@@ -226,6 +232,14 @@ static int reg_to_rpm(u16 reg)
+ static umode_t sch5627_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+ {
++ const struct sch5627_data *data = drvdata;
++
++ /* Once the lock bit is set, the virtual registers become read-only
++ * until the next power cycle.
++ */
++ if (data->control & SCH5627_CTRL_LOCK)
++ return 0444;
++
+ if (type == hwmon_pwm && attr == hwmon_pwm_auto_channels_temp)
+ return 0644;
+
+@@ -483,14 +497,13 @@ static int sch5627_probe(struct platform_device *pdev)
+ return val;
+
+ data->control = val;
+- if (!(data->control & 0x01)) {
++ if (!(data->control & SCH5627_CTRL_START)) {
+ pr_err("hardware monitoring not enabled\n");
+ return -ENODEV;
+ }
+ /* Trigger a Vbat voltage measurement, so that we get a valid reading
+ the first time we read Vbat */
+- sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
+- data->control | 0x10);
++ sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | SCH5627_CTRL_VBAT);
+ data->last_battery = jiffies;
+
+ /*
+diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
+index de3a0886c2f72..ac1f725807155 100644
+--- a/drivers/hwmon/sch56xx-common.c
++++ b/drivers/hwmon/sch56xx-common.c
+@@ -7,10 +7,8 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/module.h>
+-#include <linux/mod_devicetable.h>
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+-#include <linux/dmi.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/acpi.h>
+@@ -21,10 +19,7 @@
+ #include <linux/slab.h>
+ #include "sch56xx-common.h"
+
+-static bool ignore_dmi;
+-module_param(ignore_dmi, bool, 0);
+-MODULE_PARM_DESC(ignore_dmi, "Omit DMI check for supported devices (default=0)");
+-
++/* Insmod parameters */
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+ module_param(nowayout, bool, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+@@ -523,66 +518,11 @@ static int __init sch56xx_device_add(int address, const char *name)
+ return PTR_ERR_OR_ZERO(sch56xx_pdev);
+ }
+
+-static const struct dmi_system_id sch56xx_dmi_override_table[] __initconst = {
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS W380"),
+- },
+- },
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO P710"),
+- },
+- },
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO E9900"),
+- },
+- },
+- { }
+-};
+-
+-/* For autoloading only */
+-static const struct dmi_system_id sch56xx_dmi_table[] __initconst = {
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- },
+- },
+- { }
+-};
+-MODULE_DEVICE_TABLE(dmi, sch56xx_dmi_table);
+-
+ static int __init sch56xx_init(void)
+ {
+- const char *name = NULL;
+ int address;
++ const char *name = NULL;
+
+- if (!ignore_dmi) {
+- if (!dmi_check_system(sch56xx_dmi_table))
+- return -ENODEV;
+-
+- if (!dmi_check_system(sch56xx_dmi_override_table)) {
+- /*
+- * Some machines like the Esprimo P720 and Esprimo C700 have
+- * onboard devices named " Antiope"/" Theseus" instead of
+- * "Antiope"/"Theseus", so we need to check for both.
+- */
+- if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) &&
+- !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) &&
+- !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) &&
+- !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL))
+- return -ENODEV;
+- }
+- }
+-
+- /*
+- * Some devices like the Esprimo C700 have both onboard devices,
+- * so we still have to check manually
+- */
+ address = sch56xx_find(0x4e, &name);
+ if (address < 0)
+ address = sch56xx_find(0x2e, &name);
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 6644eebedaf3b..97d27e01a6ee2 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -158,6 +158,7 @@ config I2C_I801
+ Alder Lake (PCH)
+ Raptor Lake (PCH)
+ Meteor Lake (SOC and PCH)
++ Birch Stream (SOC)
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-i801.
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index 51aab662050b1..e905734c26a04 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -316,26 +316,44 @@ static void bcm_iproc_i2c_slave_init(
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ }
+
+-static void bcm_iproc_i2c_check_slave_status(
+- struct bcm_iproc_i2c_dev *iproc_i2c)
++static bool bcm_iproc_i2c_check_slave_status
++ (struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
+ {
+ u32 val;
++ bool recover = false;
+
+- val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
+- /* status is valid only when START_BUSY is cleared after it was set */
+- if (val & BIT(S_CMD_START_BUSY_SHIFT))
+- return;
++ /* check slave transmit status only if slave is transmitting */
++ if (!iproc_i2c->slave_rx_only) {
++ val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
++ /* status is valid only when START_BUSY is cleared */
++ if (!(val & BIT(S_CMD_START_BUSY_SHIFT))) {
++ val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
++ if (val == S_CMD_STATUS_TIMEOUT ||
++ val == S_CMD_STATUS_MASTER_ABORT) {
++ dev_warn(iproc_i2c->device,
++ (val == S_CMD_STATUS_TIMEOUT) ?
++ "slave random stretch time timeout\n" :
++ "Master aborted read transaction\n");
++ recover = true;
++ }
++ }
++ }
++
++ /* RX_EVENT is not valid when START_BUSY is set */
++ if ((status & BIT(IS_S_RX_EVENT_SHIFT)) &&
++ (status & BIT(IS_S_START_BUSY_SHIFT))) {
++ dev_warn(iproc_i2c->device, "Slave aborted read transaction\n");
++ recover = true;
++ }
+
+- val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
+- if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) {
+- dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ?
+- "slave random stretch time timeout\n" :
+- "Master aborted read transaction\n");
++ if (recover) {
+ /* re-initialize i2c for recovery */
+ bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+ bcm_iproc_i2c_slave_init(iproc_i2c, true);
+ bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+ }
++
++ return recover;
+ }
+
+ static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
+@@ -420,48 +438,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 val;
+ u8 value;
+
+- /*
+- * Slave events in case of master-write, master-write-read and,
+- * master-read
+- *
+- * Master-write : only IS_S_RX_EVENT_SHIFT event
+- * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+- * events
+- * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+- * events or only IS_S_RD_EVENT_SHIFT
+- *
+- * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
+- * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
+- * full. This can happen if Master issues write requests of more than
+- * 64 bytes.
+- */
+- if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
+- status & BIT(IS_S_RD_EVENT_SHIFT) ||
+- status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+- /* disable slave interrupts */
+- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+- val &= ~iproc_i2c->slave_int_mask;
+- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+-
+- if (status & BIT(IS_S_RD_EVENT_SHIFT))
+- /* Master-write-read request */
+- iproc_i2c->slave_rx_only = false;
+- else
+- /* Master-write request only */
+- iproc_i2c->slave_rx_only = true;
+-
+- /* schedule tasklet to read data later */
+- tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
+-
+- /*
+- * clear only IS_S_RX_EVENT_SHIFT and
+- * IS_S_RX_FIFO_FULL_SHIFT interrupt.
+- */
+- val = BIT(IS_S_RX_EVENT_SHIFT);
+- if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT))
+- val |= BIT(IS_S_RX_FIFO_FULL_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
+- }
+
+ if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
+ iproc_i2c->tx_underrun++;
+@@ -493,8 +469,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ * less than PKT_LENGTH bytes were output on the SMBUS
+ */
+ iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
+- iproc_i2c->slave_int_mask);
++ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++ val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+
+ /* End of SMBUS for Master Read */
+ val = BIT(S_TX_WR_STATUS_SHIFT);
+@@ -515,9 +492,49 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ BIT(IS_S_START_BUSY_SHIFT));
+ }
+
+- /* check slave transmit status only if slave is transmitting */
+- if (!iproc_i2c->slave_rx_only)
+- bcm_iproc_i2c_check_slave_status(iproc_i2c);
++ /* if the controller has been reset, immediately return from the ISR */
++ if (bcm_iproc_i2c_check_slave_status(iproc_i2c, status))
++ return true;
++
++ /*
++ * Slave events in case of master-write, master-write-read and,
++ * master-read
++ *
++ * Master-write : only IS_S_RX_EVENT_SHIFT event
++ * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++ * events
++ * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++ * events or only IS_S_RD_EVENT_SHIFT
++ *
++ * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
++ * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
++ * full. This can happen if Master issues write requests of more than
++ * 64 bytes.
++ */
++ if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
++ status & BIT(IS_S_RD_EVENT_SHIFT) ||
++ status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
++ /* disable slave interrupts */
++ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++ val &= ~iproc_i2c->slave_int_mask;
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++
++ if (status & BIT(IS_S_RD_EVENT_SHIFT))
++ /* Master-write-read request */
++ iproc_i2c->slave_rx_only = false;
++ else
++ /* Master-write request only */
++ iproc_i2c->slave_rx_only = true;
++
++ /* schedule tasklet to read data later */
++ tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
++
++ /* clear IS_S_RX_FIFO_FULL_SHIFT interrupt */
++ if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
++ val = BIT(IS_S_RX_FIFO_FULL_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
++ }
++ }
+
+ return true;
+ }
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index ca1035e010c72..85dbd0eb5392c 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -519,10 +519,16 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
+
+ /*
+ * Because we don't know the buffer length in the
+- * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
+- * the transaction here.
++ * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
++ * transaction here. Also disable the TX_EMPTY IRQ
++ * while waiting for the data length byte to avoid the
++ * bogus interrupts flood.
+ */
+- if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
++ if (flags & I2C_M_RECV_LEN) {
++ dev->status |= STATUS_WRITE_IN_PROGRESS;
++ intr_mask &= ~DW_IC_INTR_TX_EMPTY;
++ break;
++ } else if (buf_len > 0) {
+ /* more bytes to be written */
+ dev->status |= STATUS_WRITE_IN_PROGRESS;
+ break;
+@@ -558,6 +564,13 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
+ msgs[dev->msg_read_idx].len = len;
+ msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
+
++ /*
++ * Received buffer length, re-enable TX_EMPTY interrupt
++ * to resume the SMBUS transaction.
++ */
++ regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
++ DW_IC_INTR_TX_EMPTY);
++
+ return len;
+ }
+
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 1d855258a45dc..a87e3c15e5fc6 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -79,6 +79,7 @@
+ * Meteor Lake-P (SOC) 0x7e22 32 hard yes yes yes
+ * Meteor Lake SoC-S (SOC) 0xae22 32 hard yes yes yes
+ * Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes
++ * Birch Stream (SOC) 0x5796 32 hard yes yes yes
+ *
+ * Features supported by this driver:
+ * Software PEC no
+@@ -231,6 +232,7 @@
+ #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
++#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796
+ #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
+ #define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
+@@ -679,15 +681,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ return result ? priv->status : -ETIMEDOUT;
+ }
+
+- for (i = 1; i <= len; i++) {
+- if (i == len && read_write == I2C_SMBUS_READ)
+- smbcmd |= SMBHSTCNT_LAST_BYTE;
+- outb_p(smbcmd, SMBHSTCNT(priv));
+-
+- if (i == 1)
+- outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
+- SMBHSTCNT(priv));
++ if (len == 1 && read_write == I2C_SMBUS_READ)
++ smbcmd |= SMBHSTCNT_LAST_BYTE;
++ outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
+
++ for (i = 1; i <= len; i++) {
+ status = i801_wait_byte_done(priv);
+ if (status)
+ return status;
+@@ -710,9 +708,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ data->block[0] = len;
+ }
+
+- /* Retrieve/store value in SMBBLKDAT */
+- if (read_write == I2C_SMBUS_READ)
++ if (read_write == I2C_SMBUS_READ) {
+ data->block[i] = inb_p(SMBBLKDAT(priv));
++ if (i == len - 1)
++ outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
++ }
++
+ if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
+ outb_p(data->block[i+1], SMBBLKDAT(priv));
+
+@@ -1044,6 +1045,7 @@ static const struct pci_device_id i801_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
++ { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { 0, }
+ };
+
+diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
+index 29be05af826b0..3bd406470940f 100644
+--- a/drivers/i2c/busses/i2c-pxa.c
++++ b/drivers/i2c/busses/i2c-pxa.c
+@@ -264,6 +264,9 @@ struct pxa_i2c {
+ u32 hs_mask;
+
+ struct i2c_bus_recovery_info recovery;
++ struct pinctrl *pinctrl;
++ struct pinctrl_state *pinctrl_default;
++ struct pinctrl_state *pinctrl_recovery;
+ };
+
+ #define _IBMR(i2c) ((i2c)->reg_ibmr)
+@@ -1300,12 +1303,13 @@ static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap)
+ */
+ gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS);
+ gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS);
++
++ WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery));
+ }
+
+ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
+ {
+ struct pxa_i2c *i2c = adap->algo_data;
+- struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ u32 isr;
+
+ /*
+@@ -1319,7 +1323,7 @@ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
+ i2c_pxa_do_reset(i2c);
+ }
+
+- WARN_ON(pinctrl_select_state(bri->pinctrl, bri->pins_default));
++ WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default));
+
+ dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n",
+ readl(_IBMR(i2c)), readl(_ISR(i2c)));
+@@ -1341,20 +1345,76 @@ static int i2c_pxa_init_recovery(struct pxa_i2c *i2c)
+ if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE))
+ return 0;
+
+- bri->pinctrl = devm_pinctrl_get(dev);
+- if (PTR_ERR(bri->pinctrl) == -ENODEV) {
+- bri->pinctrl = NULL;
++ i2c->pinctrl = devm_pinctrl_get(dev);
++ if (PTR_ERR(i2c->pinctrl) == -ENODEV)
++ i2c->pinctrl = NULL;
++ if (IS_ERR(i2c->pinctrl))
++ return PTR_ERR(i2c->pinctrl);
++
++ if (!i2c->pinctrl)
++ return 0;
++
++ i2c->pinctrl_default = pinctrl_lookup_state(i2c->pinctrl,
++ PINCTRL_STATE_DEFAULT);
++ i2c->pinctrl_recovery = pinctrl_lookup_state(i2c->pinctrl, "recovery");
++
++ if (IS_ERR(i2c->pinctrl_default) || IS_ERR(i2c->pinctrl_recovery)) {
++ dev_info(dev, "missing pinmux recovery information: %ld %ld\n",
++ PTR_ERR(i2c->pinctrl_default),
++ PTR_ERR(i2c->pinctrl_recovery));
++ return 0;
++ }
++
++ /*
++ * Claiming GPIOs can influence the pinmux state, and may glitch the
++ * I2C bus. Do this carefully.
++ */
++ bri->scl_gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
++ if (bri->scl_gpiod == ERR_PTR(-EPROBE_DEFER))
++ return -EPROBE_DEFER;
++ if (IS_ERR(bri->scl_gpiod)) {
++ dev_info(dev, "missing scl gpio recovery information: %pe\n",
++ bri->scl_gpiod);
++ return 0;
++ }
++
++ /*
++ * We have SCL. Pull SCL low and wait a bit so that SDA glitches
++ * have no effect.
++ */
++ gpiod_direction_output(bri->scl_gpiod, 0);
++ udelay(10);
++ bri->sda_gpiod = devm_gpiod_get(dev, "sda", GPIOD_OUT_HIGH_OPEN_DRAIN);
++
++ /* Wait a bit in case of a SDA glitch, and then release SCL. */
++ udelay(10);
++ gpiod_direction_output(bri->scl_gpiod, 1);
++
++ if (bri->sda_gpiod == ERR_PTR(-EPROBE_DEFER))
++ return -EPROBE_DEFER;
++
++ if (IS_ERR(bri->sda_gpiod)) {
++ dev_info(dev, "missing sda gpio recovery information: %pe\n",
++ bri->sda_gpiod);
+ return 0;
+ }
+- if (IS_ERR(bri->pinctrl))
+- return PTR_ERR(bri->pinctrl);
+
+ bri->prepare_recovery = i2c_pxa_prepare_recovery;
+ bri->unprepare_recovery = i2c_pxa_unprepare_recovery;
++ bri->recover_bus = i2c_generic_scl_recovery;
+
+ i2c->adap.bus_recovery_info = bri;
+
+- return 0;
++ /*
++ * Claiming GPIOs can change the pinmux state, which confuses the
++ * pinctrl since pinctrl's idea of the current setting is unaffected
++ * by the pinmux change caused by claiming the GPIO. Work around that
++ * by switching pinctrl to the GPIO state here. We do it this way to
++ * avoid glitching the I2C bus.
++ */
++ pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery);
++
++ return pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default);
+ }
+
+ static int i2c_pxa_probe(struct platform_device *dev)
+diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
+index fa6020dced595..85e035e7a1d75 100644
+--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
++++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
+@@ -201,6 +201,11 @@ static int p2wi_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++ if (clk_freq == 0) {
++ dev_err(dev, "clock-frequency is set to 0 in DT\n");
++ return -EINVAL;
++ }
++
+ if (of_get_child_count(np) > 1) {
+ dev_err(dev, "P2WI only supports one slave device\n");
+ return -EINVAL;
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 60746652fd525..7f30bcceebaed 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -931,8 +931,9 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+ struct i2c_client *
+ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+ {
+- struct i2c_client *client;
+- int status;
++ struct i2c_client *client;
++ bool need_put = false;
++ int status;
+
+ client = kzalloc(sizeof *client, GFP_KERNEL);
+ if (!client)
+@@ -970,7 +971,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ client->dev.fwnode = info->fwnode;
+
+ device_enable_async_suspend(&client->dev);
+- i2c_dev_set_name(adap, client, info);
+
+ if (info->swnode) {
+ status = device_add_software_node(&client->dev, info->swnode);
+@@ -982,6 +982,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ }
+ }
+
++ i2c_dev_set_name(adap, client, info);
+ status = device_register(&client->dev);
+ if (status)
+ goto out_remove_swnode;
+@@ -993,6 +994,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+
+ out_remove_swnode:
+ device_remove_software_node(&client->dev);
++ need_put = true;
+ out_err_put_of_node:
+ of_node_put(info->of_node);
+ out_err:
+@@ -1000,7 +1002,10 @@ out_err:
+ "Failed to register i2c client %s at 0x%02x (%d)\n",
+ client->name, client->addr, status);
+ out_err_silent:
+- kfree(client);
++ if (need_put)
++ put_device(&client->dev);
++ else
++ kfree(client);
+ return ERR_PTR(status);
+ }
+ EXPORT_SYMBOL_GPL(i2c_new_client_device);
+diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
+index 1247e6e6e9751..05b8b8dfa9bdd 100644
+--- a/drivers/i2c/i2c-core.h
++++ b/drivers/i2c/i2c-core.h
+@@ -29,7 +29,7 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+ */
+ static inline bool i2c_in_atomic_xfer_mode(void)
+ {
+- return system_state > SYSTEM_RUNNING && irqs_disabled();
++ return system_state > SYSTEM_RUNNING && !preemptible();
+ }
+
+ static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index a01b59e3599b5..7d337380a05d9 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -450,8 +450,8 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ return -EINVAL;
+
+- rdwr_pa = memdup_user(rdwr_arg.msgs,
+- rdwr_arg.nmsgs * sizeof(struct i2c_msg));
++ rdwr_pa = memdup_array_user(rdwr_arg.msgs,
++ rdwr_arg.nmsgs, sizeof(struct i2c_msg));
+ if (IS_ERR(rdwr_pa))
+ return PTR_ERR(rdwr_pa);
+
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 87283e4a46076..0e9ff5500a777 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1525,9 +1525,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+ desc->dev->dev.of_node = desc->boardinfo->of_node;
+
+ ret = device_register(&desc->dev->dev);
+- if (ret)
++ if (ret) {
+ dev_err(&master->dev,
+ "Failed to add I3C device (err = %d)\n", ret);
++ put_device(&desc->dev->dev);
++ }
+ }
+ }
+
+diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
+index 49551db71bc96..8f1fda3c7ac52 100644
+--- a/drivers/i3c/master/i3c-master-cdns.c
++++ b/drivers/i3c/master/i3c-master-cdns.c
+@@ -191,7 +191,7 @@
+ #define SLV_STATUS1_HJ_DIS BIT(18)
+ #define SLV_STATUS1_MR_DIS BIT(17)
+ #define SLV_STATUS1_PROT_ERR BIT(16)
+-#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
++#define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
+ #define SLV_STATUS1_HAS_DA BIT(8)
+ #define SLV_STATUS1_DDR_RX_FULL BIT(7)
+ #define SLV_STATUS1_DDR_TX_FULL BIT(6)
+@@ -1623,13 +1623,13 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
+ /* Device ID0 is reserved to describe this master. */
+ master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
+ master->free_rr_slots = GENMASK(master->maxdevs, 1);
++ master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
++ master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+
+ val = readl(master->regs + CONF_STATUS1);
+ master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
+ master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
+ master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
+- master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+- master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+index 97bb49ff5b53b..47b9b4d4ed3fc 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+@@ -64,15 +64,17 @@ static int hci_dat_v1_init(struct i3c_hci *hci)
+ return -EOPNOTSUPP;
+ }
+
+- /* use a bitmap for faster free slot search */
+- hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+- if (!hci->DAT_data)
+- return -ENOMEM;
+-
+- /* clear them */
+- for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+- dat_w0_write(dat_idx, 0);
+- dat_w1_write(dat_idx, 0);
++ if (!hci->DAT_data) {
++ /* use a bitmap for faster free slot search */
++ hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
++ if (!hci->DAT_data)
++ return -ENOMEM;
++
++ /* clear them */
++ for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
++ dat_w0_write(dat_idx, 0);
++ dat_w1_write(dat_idx, 0);
++ }
+ }
+
+ return 0;
+@@ -87,7 +89,13 @@ static void hci_dat_v1_cleanup(struct i3c_hci *hci)
+ static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
+ {
+ unsigned int dat_idx;
++ int ret;
+
++ if (!hci->DAT_data) {
++ ret = hci_dat_v1_init(hci);
++ if (ret)
++ return ret;
++ }
+ dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
+ if (dat_idx >= hci->DAT_entries)
+ return -ENOENT;
+@@ -103,7 +111,8 @@ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
+ {
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+- __clear_bit(dat_idx, hci->DAT_data);
++ if (hci->DAT_data)
++ __clear_bit(dat_idx, hci->DAT_data);
+ }
+
+ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index 2990ac9eaade7..71b5dbe45c45c 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -734,7 +734,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+ unsigned int i;
+ bool handled = false;
+
+- for (i = 0; mask && i < 8; i++) {
++ for (i = 0; mask && i < rings->total; i++) {
+ struct hci_rh_data *rh;
+ u32 status;
+
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 8f8295acdadb3..c395e52294140 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -93,6 +93,7 @@
+ #define SVC_I3C_MINTMASKED 0x098
+ #define SVC_I3C_MERRWARN 0x09C
+ #define SVC_I3C_MERRWARN_NACK BIT(2)
++#define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
+ #define SVC_I3C_MDMACTRL 0x0A0
+ #define SVC_I3C_MDATACTRL 0x0AC
+ #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
+@@ -175,6 +176,7 @@ struct svc_i3c_regs_save {
+ * @ibi.slots: Available IBI slots
+ * @ibi.tbq_slot: To be queued IBI slot
+ * @ibi.lock: IBI lock
++ * @lock: Transfer lock, protect between IBI work thread and callbacks from master
+ */
+ struct svc_i3c_master {
+ struct i3c_master_controller base;
+@@ -203,6 +205,7 @@ struct svc_i3c_master {
+ /* Prevent races within IBI handlers */
+ spinlock_t lock;
+ } ibi;
++ struct mutex lock;
+ };
+
+ /**
+@@ -225,6 +228,14 @@ static bool svc_i3c_master_error(struct svc_i3c_master *master)
+ if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
+ merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
+ writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
++
++ /* Ignore timeout error */
++ if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
++ dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
++ mstatus, merrwarn);
++ return false;
++ }
++
+ dev_err(master->dev,
+ "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ mstatus, merrwarn);
+@@ -331,6 +342,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ struct i3c_ibi_slot *slot;
+ unsigned int count;
+ u32 mdatactrl;
++ int ret, val;
+ u8 *buf;
+
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+@@ -340,6 +352,13 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ slot->len = 0;
+ buf = slot->data;
+
++ ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
++ SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
++ if (ret) {
++ dev_err(master->dev, "Timeout when polling for COMPLETE\n");
++ return ret;
++ }
++
+ while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
+ slot->len < SVC_I3C_FIFO_SIZE) {
+ mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
+@@ -384,6 +403,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ u32 status, val;
+ int ret;
+
++ mutex_lock(&master->lock);
+ /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+ writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+ SVC_I3C_MCTRL_IBIRESP_AUTO,
+@@ -394,6 +414,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
+ if (ret) {
+ dev_err(master->dev, "Timeout when polling for IBIWON\n");
++ svc_i3c_master_emit_stop(master);
+ goto reenable_ibis;
+ }
+
+@@ -460,12 +481,13 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+
+ reenable_ibis:
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
++ mutex_unlock(&master->lock);
+ }
+
+ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
+ {
+ struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
+- u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
++ u32 active = readl(master->regs + SVC_I3C_MSTATUS);
+
+ if (!SVC_I3C_MSTATUS_SLVSTART(active))
+ return IRQ_NONE;
+@@ -1007,6 +1029,9 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ u32 reg;
+ int ret;
+
++ /* clean SVC_I3C_MINT_IBIWON w1c bits */
++ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
++
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ xfer_type |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+@@ -1025,6 +1050,23 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ goto emit_stop;
+ }
+
++ /*
++ * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
++ * with I3C Target Address.
++ *
++ * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
++ * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
++ * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
++ * a Hot-Join Request has been made.
++ *
++ * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
++ * and yield the above events handler.
++ */
++ if (SVC_I3C_MSTATUS_IBIWON(reg)) {
++ ret = -ENXIO;
++ goto emit_stop;
++ }
++
+ if (rnw)
+ ret = svc_i3c_master_read(master, in, xfer_len);
+ else
+@@ -1204,9 +1246,11 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
+ cmd->read_len = 0;
+ cmd->continued = false;
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ kfree(buf);
+@@ -1250,9 +1294,11 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
+ cmd->read_len = read_len;
+ cmd->continued = false;
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ if (cmd->read_len != xfer_len)
+ ccc->dests[0].payload.len = cmd->read_len;
+@@ -1309,9 +1355,11 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ cmd->continued = (i + 1) < nxfers;
+ }
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+@@ -1347,9 +1395,11 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ cmd->continued = (i + 1 < nxfers);
+ }
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+@@ -1540,6 +1590,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
+
+ INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
+ INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
++ mutex_init(&master->lock);
++
+ ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
+ IRQF_NO_SUSPEND, "svc-i3c-irq", master);
+ if (ret)
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index 2f082006550fd..bbd5bdd732f01 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -708,6 +708,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ struct stm32_adc_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
++ const struct of_device_id *of_id;
++
+ struct resource *res;
+ u32 max_rate;
+ int ret;
+@@ -720,8 +722,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, &priv->common);
+
+- priv->cfg = (const struct stm32_adc_priv_cfg *)
+- of_match_device(dev->driver->of_match_table, dev)->data;
++ of_id = of_match_device(dev->driver->of_match_table, dev);
++ if (!of_id)
++ return -ENODEV;
++
++ priv->cfg = (const struct stm32_adc_priv_cfg *)of_id->data;
+ priv->nb_adc_max = priv->cfg->num_adcs;
+ spin_lock_init(&priv->common.lock);
+
+diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
+index 85e289700c3c5..4abf80f75ef5d 100644
+--- a/drivers/iio/frequency/adf4350.c
++++ b/drivers/iio/frequency/adf4350.c
+@@ -33,7 +33,6 @@ enum {
+
+ struct adf4350_state {
+ struct spi_device *spi;
+- struct regulator *reg;
+ struct gpio_desc *lock_detect_gpiod;
+ struct adf4350_platform_data *pdata;
+ struct clk *clk;
+@@ -469,6 +468,15 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
+ return pdata;
+ }
+
++static void adf4350_power_down(void *data)
++{
++ struct iio_dev *indio_dev = data;
++ struct adf4350_state *st = iio_priv(indio_dev);
++
++ st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
++ adf4350_sync_config(st);
++}
++
+ static int adf4350_probe(struct spi_device *spi)
+ {
+ struct adf4350_platform_data *pdata;
+@@ -491,31 +499,21 @@ static int adf4350_probe(struct spi_device *spi)
+ }
+
+ if (!pdata->clkin) {
+- clk = devm_clk_get(&spi->dev, "clkin");
++ clk = devm_clk_get_enabled(&spi->dev, "clkin");
+ if (IS_ERR(clk))
+- return -EPROBE_DEFER;
+-
+- ret = clk_prepare_enable(clk);
+- if (ret < 0)
+- return ret;
++ return PTR_ERR(clk);
+ }
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+- if (indio_dev == NULL) {
+- ret = -ENOMEM;
+- goto error_disable_clk;
+- }
++ if (indio_dev == NULL)
++ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+- st->reg = devm_regulator_get(&spi->dev, "vcc");
+- if (!IS_ERR(st->reg)) {
+- ret = regulator_enable(st->reg);
+- if (ret)
+- goto error_disable_clk;
+- }
++ ret = devm_regulator_get_enable(&spi->dev, "vcc");
++ if (ret)
++ return ret;
+
+- spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+ st->pdata = pdata;
+
+@@ -544,47 +542,21 @@ static int adf4350_probe(struct spi_device *spi)
+
+ st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
+ GPIOD_IN);
+- if (IS_ERR(st->lock_detect_gpiod)) {
+- ret = PTR_ERR(st->lock_detect_gpiod);
+- goto error_disable_reg;
+- }
++ if (IS_ERR(st->lock_detect_gpiod))
++ return PTR_ERR(st->lock_detect_gpiod);
+
+ if (pdata->power_up_frequency) {
+ ret = adf4350_set_freq(st, pdata->power_up_frequency);
+ if (ret)
+- goto error_disable_reg;
++ return ret;
+ }
+
+- ret = iio_device_register(indio_dev);
++ ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev);
+ if (ret)
+- goto error_disable_reg;
+-
+- return 0;
+-
+-error_disable_reg:
+- if (!IS_ERR(st->reg))
+- regulator_disable(st->reg);
+-error_disable_clk:
+- clk_disable_unprepare(clk);
+-
+- return ret;
+-}
+-
+-static void adf4350_remove(struct spi_device *spi)
+-{
+- struct iio_dev *indio_dev = spi_get_drvdata(spi);
+- struct adf4350_state *st = iio_priv(indio_dev);
+- struct regulator *reg = st->reg;
+-
+- st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
+- adf4350_sync_config(st);
+-
+- iio_device_unregister(indio_dev);
+-
+- clk_disable_unprepare(st->clk);
++ return dev_err_probe(&spi->dev, ret,
++ "Failed to add action to managed power down\n");
+
+- if (!IS_ERR(reg))
+- regulator_disable(reg);
++ return devm_iio_device_register(&spi->dev, indio_dev);
+ }
+
+ static const struct of_device_id adf4350_of_match[] = {
+@@ -607,7 +579,6 @@ static struct spi_driver adf4350_driver = {
+ .of_match_table = adf4350_of_match,
+ },
+ .probe = adf4350_probe,
+- .remove = adf4350_remove,
+ .id_table = adf4350_id,
+ };
+ module_spi_driver(adf4350_driver);
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index a666847bd7143..010718738d04c 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -804,7 +804,7 @@ static int alloc_port_data(struct ib_device *device)
+ * empty slots at the beginning.
+ */
+ pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
+- rdma_end_port(device) + 1),
++ size_add(rdma_end_port(device), 1)),
+ GFP_KERNEL);
+ if (!pdata_rcu)
+ return -ENOMEM;
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index 59179cfc20ef9..8175dde60b0a8 100644
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -2159,7 +2159,9 @@ static int ib_sa_add_one(struct ib_device *device)
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
+
+- sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
++ sa_dev = kzalloc(struct_size(sa_dev, port,
++ size_add(size_sub(e, s), 1)),
++ GFP_KERNEL);
+ if (!sa_dev)
+ return -ENOMEM;
+
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index ee59d73915689..ec5efdc166601 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -903,7 +903,7 @@ alloc_hw_stats_device(struct ib_device *ibdev)
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
+- data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
++ data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_stats;
+@@ -1009,7 +1009,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
+- data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
++ data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_stats;
+@@ -1140,7 +1140,7 @@ static int setup_gid_attrs(struct ib_port *port,
+ int ret;
+
+ gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list,
+- attr->gid_tbl_len * 2),
++ size_mul(attr->gid_tbl_len, 2)),
+ GFP_KERNEL);
+ if (!gid_attr_group)
+ return -ENOMEM;
+@@ -1205,8 +1205,8 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ int ret;
+
+ p = kvzalloc(struct_size(p, attrs_list,
+- attr->gid_tbl_len + attr->pkey_tbl_len),
+- GFP_KERNEL);
++ size_add(attr->gid_tbl_len, attr->pkey_tbl_len)),
++ GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ p->ibdev = device;
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index 7e5c33aad1619..f5feca7fa9b9c 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -1378,7 +1378,9 @@ static int ib_umad_add_one(struct ib_device *device)
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
+
+- umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
++ umad_dev = kzalloc(struct_size(umad_dev, ports,
++ size_add(size_sub(e, s), 1)),
++ GFP_KERNEL);
+ if (!umad_dev)
+ return -ENOMEM;
+
+diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
+index 7741a1d69097c..2b5d264f41e51 100644
+--- a/drivers/infiniband/hw/hfi1/efivar.c
++++ b/drivers/infiniband/hw/hfi1/efivar.c
+@@ -112,7 +112,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
+ unsigned long *size, void **return_data)
+ {
+ char prefix_name[64];
+- char name[64];
++ char name[128];
+ int result;
+
+ /* create a common prefix */
+diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
+index 08732e1ac9662..c132a9c073bff 100644
+--- a/drivers/infiniband/hw/hfi1/pcie.c
++++ b/drivers/infiniband/hw/hfi1/pcie.c
+@@ -3,6 +3,7 @@
+ * Copyright(c) 2015 - 2019 Intel Corporation.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/pci.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
+@@ -210,12 +211,6 @@ static u32 extract_speed(u16 linkstat)
+ return speed;
+ }
+
+-/* return the PCIe link speed from the given link status */
+-static u32 extract_width(u16 linkstat)
+-{
+- return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
+-}
+-
+ /* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
+ static void update_lbus_info(struct hfi1_devdata *dd)
+ {
+@@ -228,7 +223,7 @@ static void update_lbus_info(struct hfi1_devdata *dd)
+ return;
+ }
+
+- dd->lbus_width = extract_width(linkstat);
++ dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat);
+ dd->lbus_speed = extract_speed(linkstat);
+ snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+ "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
+index e77fcc74f15c4..3df032ddda189 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -33,7 +33,9 @@
+ #include <linux/pci.h>
+ #include <rdma/ib_addr.h>
+ #include <rdma/ib_cache.h>
++#include "hnae3.h"
+ #include "hns_roce_device.h"
++#include "hns_roce_hw_v2.h"
+
+ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
+ {
+@@ -57,6 +59,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
+ struct hns_roce_ah *ah = to_hr_ah(ibah);
+ int ret = 0;
++ u32 max_sl;
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
+ return -EOPNOTSUPP;
+@@ -70,9 +73,17 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ ah->av.hop_limit = grh->hop_limit;
+ ah->av.flowlabel = grh->flow_label;
+ ah->av.udp_sport = get_ah_udp_sport(ah_attr);
+- ah->av.sl = rdma_ah_get_sl(ah_attr);
+ ah->av.tclass = get_tclass(grh);
+
++ ah->av.sl = rdma_ah_get_sl(ah_attr);
++ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
++ if (unlikely(ah->av.sl > max_sl)) {
++ ibdev_err_ratelimited(&hr_dev->ib_dev,
++ "failed to set sl, sl (%u) shouldn't be larger than %u.\n",
++ ah->av.sl, max_sl);
++ return -EINVAL;
++ }
++
+ memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
+ memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index d82daff2d9bd5..58d14f1562b9a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -270,7 +270,7 @@ static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ int mtu = ib_mtu_enum_to_int(qp->path_mtu);
+
+- if (len > qp->max_inline_data || len > mtu) {
++ if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
+ len, qp->max_inline_data, mtu);
+@@ -4725,6 +4725,9 @@ static int check_cong_type(struct ib_qp *ibqp,
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+
++ if (ibqp->qp_type == IB_QPT_UD)
++ hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
++
+ /* different congestion types match different configurations */
+ switch (hr_dev->caps.cong_type) {
+ case CONG_TYPE_DCQCN:
+@@ -4821,22 +4824,32 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ const struct ib_gid_attr *gid_attr = NULL;
++ u8 sl = rdma_ah_get_sl(&attr->ah_attr);
+ int is_roce_protocol;
+ u16 vlan_id = 0xffff;
+ bool is_udp = false;
++ u32 max_sl;
+ u8 ib_port;
+ u8 hr_port;
+ int ret;
+
++ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
++ if (unlikely(sl > max_sl)) {
++ ibdev_err_ratelimited(ibdev,
++ "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
++ sl, max_sl);
++ return -EINVAL;
++ }
++
+ /*
+ * If free_mr_en of qp is set, it means that this qp comes from
+ * free mr. This qp will perform the loopback operation.
+ * In the loopback scenario, only sl needs to be set.
+ */
+ if (hr_qp->free_mr_en) {
+- hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
++ hr_reg_write(context, QPC_SL, sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
+- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
++ hr_qp->sl = sl;
+ return 0;
+ }
+
+@@ -4903,14 +4916,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+
+- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+- if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
+- ibdev_err(ibdev,
+- "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
+- hr_qp->sl, MAX_SERVICE_LEVEL);
+- return -EINVAL;
+- }
+-
++ hr_qp->sl = sl;
+ hr_reg_write(context, QPC_SL, hr_qp->sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
+
+@@ -5804,7 +5810,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+- ibdev_warn(ibdev, "send queue drained.\n");
++ ibdev_dbg(ibdev, "send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
+@@ -5819,10 +5825,10 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ irq_work->queue_num, irq_work->sub_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+- ibdev_warn(ibdev, "SRQ limit reach.\n");
++ ibdev_dbg(ibdev, "SRQ limit reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+- ibdev_warn(ibdev, "SRQ last wqe reach.\n");
++ ibdev_dbg(ibdev, "SRQ last wqe reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ ibdev_err(ibdev, "SRQ catas error.\n");
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index d9d546cdef525..4a9cd4d21bc99 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -547,17 +547,12 @@ static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats(
+ struct ib_device *device, u32 port_num)
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+- u32 port = port_num - 1;
+
+- if (port > hr_dev->caps.num_ports) {
++ if (port_num > hr_dev->caps.num_ports) {
+ ibdev_err(device, "invalid port num.\n");
+ return NULL;
+ }
+
+- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
+- hr_dev->is_vf)
+- return NULL;
+-
+ return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs,
+ ARRAY_SIZE(hns_roce_port_stats_descs),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+@@ -577,10 +572,6 @@ static int hns_roce_get_hw_stats(struct ib_device *device,
+ if (port > hr_dev->caps.num_ports)
+ return -EINVAL;
+
+- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
+- hr_dev->is_vf)
+- return -EOPNOTSUPP;
+-
+ ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port,
+ &num_counters);
+ if (ret) {
+@@ -634,8 +625,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
+ .query_pkey = hns_roce_query_pkey,
+ .query_port = hns_roce_query_port,
+ .reg_user_mr = hns_roce_reg_user_mr,
+- .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
+- .get_hw_stats = hns_roce_get_hw_stats,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
+@@ -644,6 +633,11 @@ static const struct ib_device_ops hns_roce_dev_ops = {
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
+ };
+
++static const struct ib_device_ops hns_roce_dev_hw_stats_ops = {
++ .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
++ .get_hw_stats = hns_roce_get_hw_stats,
++};
++
+ static const struct ib_device_ops hns_roce_dev_mr_ops = {
+ .rereg_user_mr = hns_roce_rereg_user_mr,
+ };
+@@ -720,6 +714,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
+
++ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09 &&
++ !hr_dev->is_vf)
++ ib_set_device_ops(ib_dev, &hns_roce_dev_hw_stats_ops);
++
+ ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
+ ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
+ ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index cdc1c6de43a17..828b58534aa97 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -1064,7 +1064,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ {
+ struct hns_roce_ib_create_qp_resp resp = {};
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+- struct hns_roce_ib_create_qp ucmd;
++ struct hns_roce_ib_create_qp ucmd = {};
+ int ret;
+
+ mutex_init(&hr_qp->mutex);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 555629b798b95..5d963abb7e609 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4071,10 +4071,8 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
+ return ret;
+
+ ret = mlx5_mkey_cache_init(dev);
+- if (ret) {
++ if (ret)
+ mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
+- mlx5r_umr_resource_cleanup(dev);
+- }
+ return ret;
+ }
+
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 78b96bfb4e6ac..2340baaba8e67 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4045,6 +4045,30 @@ static unsigned int get_tx_affinity(struct ib_qp *qp,
+ return tx_affinity;
+ }
+
++static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id,
++ struct mlx5_core_dev *mdev)
++{
++ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
++ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
++ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
++ void *rqc;
++
++ if (!qp->rq.wqe_cnt)
++ return 0;
++
++ MLX5_SET(modify_rq_in, in, rq_state, rq->state);
++ MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid);
++
++ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
++ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
++
++ MLX5_SET64(modify_rq_in, in, modify_bitmask,
++ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
++ MLX5_SET(rqc, rqc, counter_set_id, set_id);
++
++ return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
++}
++
+ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ struct rdma_counter *counter)
+ {
+@@ -4060,6 +4084,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ else
+ set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
+
++ if (mqp->type == IB_QPT_RAW_PACKET)
++ return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev);
++
+ base = &mqp->trans_qp.base;
+ MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
+ MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
+diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
+index f2e093b0b9982..1b45b1d3077de 100644
+--- a/drivers/input/rmi4/rmi_bus.c
++++ b/drivers/input/rmi4/rmi_bus.c
+@@ -277,11 +277,11 @@ void rmi_unregister_function(struct rmi_function *fn)
+
+ device_del(&fn->dev);
+ of_node_put(fn->dev.of_node);
+- put_device(&fn->dev);
+
+ for (i = 0; i < fn->num_of_irqs; i++)
+ irq_dispose_mapping(fn->irq[i]);
+
++ put_device(&fn->dev);
+ }
+
+ /**
+diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
+index 2c16917ba1fda..e76356f91125f 100644
+--- a/drivers/interconnect/qcom/icc-rpm.c
++++ b/drivers/interconnect/qcom/icc-rpm.c
+@@ -497,7 +497,7 @@ regmap_done:
+
+ ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks);
+ if (ret)
+- return ret;
++ goto err_disable_unprepare_clk;
+
+ provider = &qp->provider;
+ provider->dev = dev;
+@@ -512,13 +512,15 @@ regmap_done:
+ /* If this fails, bus accesses will crash the platform! */
+ ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks);
+ if (ret)
+- return ret;
++ goto err_disable_unprepare_clk;
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
++ clk_bulk_disable_unprepare(qp->num_intf_clks,
++ qp->intf_clks);
+ ret = PTR_ERR(node);
+ goto err_remove_nodes;
+ }
+@@ -534,8 +536,11 @@ regmap_done:
+ if (qnodes[i]->qos.ap_owned &&
+ qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) {
+ ret = qcom_icc_qos_set(node);
+- if (ret)
+- return ret;
++ if (ret) {
++ clk_bulk_disable_unprepare(qp->num_intf_clks,
++ qp->intf_clks);
++ goto err_remove_nodes;
++ }
+ }
+
+ data->nodes[i] = node;
+@@ -563,6 +568,7 @@ err_deregister_provider:
+ icc_provider_deregister(provider);
+ err_remove_nodes:
+ icc_nodes_remove(provider);
++err_disable_unprepare_clk:
+ clk_disable_unprepare(qp->bus_clk);
+
+ return ret;
+diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
+index dc321bb86d0be..e97478bbc2825 100644
+--- a/drivers/interconnect/qcom/osm-l3.c
++++ b/drivers/interconnect/qcom/osm-l3.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
++#include <linux/args.h>
+ #include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/interconnect-provider.h>
+@@ -78,7 +79,7 @@ enum {
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
++ .num_links = COUNT_ARGS(__VA_ARGS__), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+diff --git a/drivers/interconnect/qcom/qdu1000.c b/drivers/interconnect/qcom/qdu1000.c
+index bf800dd7d4ba1..a7392eb73d4a9 100644
+--- a/drivers/interconnect/qcom/qdu1000.c
++++ b/drivers/interconnect/qcom/qdu1000.c
+@@ -769,6 +769,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
+index d94ab9b39f3db..af2be15438403 100644
+--- a/drivers/interconnect/qcom/sc7180.c
++++ b/drivers/interconnect/qcom/sc7180.c
+@@ -1238,6 +1238,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
+index 6592839b4d94b..a626dbc719995 100644
+--- a/drivers/interconnect/qcom/sc7280.c
++++ b/drivers/interconnect/qcom/sc7280.c
+@@ -1285,6 +1285,7 @@ static struct qcom_icc_node srvc_snoc = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
+index 0fb4898dabcfe..bdd3471d4ac89 100644
+--- a/drivers/interconnect/qcom/sc8180x.c
++++ b/drivers/interconnect/qcom/sc8180x.c
+@@ -1345,6 +1345,7 @@ static struct qcom_icc_node slv_qup_core_2 = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &slv_ebi }
+ };
+diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
+index b82c5493cbb56..0270f6c64481a 100644
+--- a/drivers/interconnect/qcom/sc8280xp.c
++++ b/drivers/interconnect/qcom/sc8280xp.c
+@@ -1712,6 +1712,7 @@ static struct qcom_icc_node srvc_snoc = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sdm670.c b/drivers/interconnect/qcom/sdm670.c
+index 540a2108b77c1..907e1ff4ff817 100644
+--- a/drivers/interconnect/qcom/sdm670.c
++++ b/drivers/interconnect/qcom/sdm670.c
+@@ -1047,6 +1047,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
+index b9243c0aa626c..855802be93fea 100644
+--- a/drivers/interconnect/qcom/sdm845.c
++++ b/drivers/interconnect/qcom/sdm845.c
+@@ -1265,6 +1265,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c
+index 49aed492e9b80..f41d7e19ba269 100644
+--- a/drivers/interconnect/qcom/sm6350.c
++++ b/drivers/interconnect/qcom/sm6350.c
+@@ -1164,6 +1164,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
+index c7c9cf7f746b0..edfe824cad353 100644
+--- a/drivers/interconnect/qcom/sm8150.c
++++ b/drivers/interconnect/qcom/sm8150.c
+@@ -1282,6 +1282,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
+index d4a4ecef11f01..661dc18d99dba 100644
+--- a/drivers/interconnect/qcom/sm8250.c
++++ b/drivers/interconnect/qcom/sm8250.c
+@@ -1397,6 +1397,7 @@ static struct qcom_icc_node qup2_core_slave = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
+index bdf75839e6d17..562322d4fc3c4 100644
+--- a/drivers/interconnect/qcom/sm8350.c
++++ b/drivers/interconnect/qcom/sm8350.c
+@@ -1356,6 +1356,7 @@ static struct qcom_icc_node qns_mem_noc_sf_disp = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index a3414afe11b07..23cb80d62a9ab 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1522,6 +1522,15 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ {
+ struct qi_desc desc;
+
++ /*
++ * VT-d spec, section 4.3:
++ *
++ * Software is recommended to not submit any Device-TLB invalidation
++ * requests while address remapping hardware is disabled.
++ */
++ if (!(iommu->gcmd & DMA_GCMD_TE))
++ return;
++
+ if (mask) {
+ addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+@@ -1587,6 +1596,15 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
+ struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+
++ /*
++ * VT-d spec, section 4.3:
++ *
++ * Software is recommended to not submit any Device-TLB invalidation
++ * requests while address remapping hardware is disabled.
++ */
++ if (!(iommu->gcmd & DMA_GCMD_TE))
++ return;
++
+ desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
+ QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
+ QI_DEV_IOTLB_PFSID(pfsid);
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 3685ba90ec88e..4c3707384bd92 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -2487,7 +2487,8 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
+ return ret;
+ }
+
+- iommu_enable_pci_caps(info);
++ if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
++ iommu_enable_pci_caps(info);
+
+ return 0;
+ }
+@@ -3922,8 +3923,8 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
+ */
+ static void domain_context_clear(struct device_domain_info *info)
+ {
+- if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
+- return;
++ if (!dev_is_pci(info->dev))
++ domain_context_clear_one(info, info->bus, info->devfn);
+
+ pci_for_each_dma_alias(to_pci_dev(info->dev),
+ &domain_context_clear_one_cb, info);
+@@ -4928,7 +4929,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
+ ver = (dev->device >> 8) & 0xff;
+ if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
+ ver != 0x4e && ver != 0x8a && ver != 0x98 &&
+- ver != 0x9a && ver != 0xa7)
++ ver != 0x9a && ver != 0xa7 && ver != 0x7d)
+ return;
+
+ if (risky_device(dev))
+diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
+index 50a481c895b86..ac12f76c1212a 100644
+--- a/drivers/iommu/intel/svm.c
++++ b/drivers/iommu/intel/svm.c
+@@ -216,6 +216,27 @@ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
+ rcu_read_unlock();
+ }
+
++static void intel_flush_svm_all(struct intel_svm *svm)
++{
++ struct device_domain_info *info;
++ struct intel_svm_dev *sdev;
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(sdev, &svm->devs, list) {
++ info = dev_iommu_priv_get(sdev->dev);
++
++ qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
++ if (info->ats_enabled) {
++ qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
++ svm->pasid, sdev->qdep,
++ 0, 64 - VTD_PAGE_SHIFT);
++ quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
++ svm->pasid, sdev->qdep);
++ }
++ }
++ rcu_read_unlock();
++}
++
+ /* Pages have been freed at this point */
+ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+@@ -223,6 +244,11 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
+ {
+ struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+
++ if (start == 0 && end == -1UL) {
++ intel_flush_svm_all(svm);
++ return;
++ }
++
+ intel_flush_svm_range(svm, start,
+ (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
+ }
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index c146378c7d032..3a67e636287a7 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -479,11 +479,12 @@ static void iommu_deinit_device(struct device *dev)
+ dev_iommu_free(dev);
+ }
+
++DEFINE_MUTEX(iommu_probe_device_lock);
++
+ static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
+ {
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_group *group;
+- static DEFINE_MUTEX(iommu_probe_device_lock);
+ struct group_device *gdev;
+ int ret;
+
+@@ -496,17 +497,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ * probably be able to use device_lock() here to minimise the scope,
+ * but for now enforcing a simple global ordering is fine.
+ */
+- mutex_lock(&iommu_probe_device_lock);
++ lockdep_assert_held(&iommu_probe_device_lock);
+
+ /* Device is probed already if in a group */
+- if (dev->iommu_group) {
+- ret = 0;
+- goto out_unlock;
+- }
++ if (dev->iommu_group)
++ return 0;
+
+ ret = iommu_init_device(dev, ops);
+ if (ret)
+- goto out_unlock;
++ return ret;
+
+ group = dev->iommu_group;
+ gdev = iommu_group_alloc_device(group, dev);
+@@ -542,7 +541,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ list_add_tail(&group->entry, group_list);
+ }
+ mutex_unlock(&group->mutex);
+- mutex_unlock(&iommu_probe_device_lock);
+
+ if (dev_is_pci(dev))
+ iommu_dma_set_pci_32bit_workaround(dev);
+@@ -556,8 +554,6 @@ err_put_group:
+ iommu_deinit_device(dev);
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+-out_unlock:
+- mutex_unlock(&iommu_probe_device_lock);
+
+ return ret;
+ }
+@@ -567,7 +563,9 @@ int iommu_probe_device(struct device *dev)
+ const struct iommu_ops *ops;
+ int ret;
+
++ mutex_lock(&iommu_probe_device_lock);
+ ret = __iommu_probe_device(dev, NULL);
++ mutex_unlock(&iommu_probe_device_lock);
+ if (ret)
+ return ret;
+
+@@ -1783,7 +1781,9 @@ static int probe_iommu_group(struct device *dev, void *data)
+ struct list_head *group_list = data;
+ int ret;
+
++ mutex_lock(&iommu_probe_device_lock);
+ ret = __iommu_probe_device(dev, group_list);
++ mutex_unlock(&iommu_probe_device_lock);
+ if (ret == -ENODEV)
+ ret = 0;
+
+diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
+index 3a598182b7619..117a39ae2e4aa 100644
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -221,6 +221,18 @@ static int iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area,
+ return 0;
+ }
+
++static struct iopt_area *iopt_area_alloc(void)
++{
++ struct iopt_area *area;
++
++ area = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
++ if (!area)
++ return NULL;
++ RB_CLEAR_NODE(&area->node.rb);
++ RB_CLEAR_NODE(&area->pages_node.rb);
++ return area;
++}
++
+ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
+ struct list_head *pages_list,
+ unsigned long length, unsigned long *dst_iova,
+@@ -231,7 +243,7 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
+ int rc = 0;
+
+ list_for_each_entry(elm, pages_list, next) {
+- elm->area = kzalloc(sizeof(*elm->area), GFP_KERNEL_ACCOUNT);
++ elm->area = iopt_area_alloc();
+ if (!elm->area)
+ return -ENOMEM;
+ }
+@@ -1005,11 +1017,11 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova)
+ iopt_area_start_byte(area, new_start) & (alignment - 1))
+ return -EINVAL;
+
+- lhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
++ lhs = iopt_area_alloc();
+ if (!lhs)
+ return -ENOMEM;
+
+- rhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
++ rhs = iopt_area_alloc();
+ if (!rhs) {
+ rc = -ENOMEM;
+ goto err_free_lhs;
+@@ -1048,6 +1060,16 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova)
+ if (WARN_ON(rc))
+ goto err_remove_lhs;
+
++ /*
++ * If the original area has filled a domain, domains_itree has to be
++ * updated.
++ */
++ if (area->storage_domain) {
++ interval_tree_remove(&area->pages_node, &pages->domains_itree);
++ interval_tree_insert(&lhs->pages_node, &pages->domains_itree);
++ interval_tree_insert(&rhs->pages_node, &pages->domains_itree);
++ }
++
+ lhs->storage_domain = area->storage_domain;
+ lhs->pages = area->pages;
+ rhs->storage_domain = area->storage_domain;
+diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
+index 8d9aa297c117e..528f356238b34 100644
+--- a/drivers/iommu/iommufd/pages.c
++++ b/drivers/iommu/iommufd/pages.c
+@@ -1507,6 +1507,8 @@ void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages)
+ area, domain, iopt_area_index(area),
+ iopt_area_last_index(area));
+
++ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
++ WARN_ON(RB_EMPTY_NODE(&area->pages_node.rb));
+ interval_tree_remove(&area->pages_node, &pages->domains_itree);
+ iopt_area_unfill_domain(area, pages, area->storage_domain);
+ area->storage_domain = NULL;
+diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
+index 157b286e36bf3..35ba090f3b5e2 100644
+--- a/drivers/iommu/of_iommu.c
++++ b/drivers/iommu/of_iommu.c
+@@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
+ const u32 *id)
+ {
+ const struct iommu_ops *ops = NULL;
+- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
++ struct iommu_fwspec *fwspec;
+ int err = NO_IOMMU;
+
+ if (!master_np)
+ return NULL;
+
++ /* Serialise to make dev->iommu stable under our potential fwspec */
++ mutex_lock(&iommu_probe_device_lock);
++ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec) {
+- if (fwspec->ops)
++ if (fwspec->ops) {
++ mutex_unlock(&iommu_probe_device_lock);
+ return fwspec->ops;
+-
++ }
+ /* In the deferred case, start again from scratch */
+ iommu_fwspec_free(dev);
+ }
+@@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
+ fwspec = dev_iommu_fwspec_get(dev);
+ ops = fwspec->ops;
+ }
++ mutex_unlock(&iommu_probe_device_lock);
++
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+ * probe for dev, replay it to get things in order.
+@@ -191,7 +197,7 @@ iommu_resv_region_get_type(struct device *dev,
+ if (start == phys->start && end == phys->end)
+ return IOMMU_RESV_DIRECT;
+
+- dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys,
++ dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys,
+ &start, &end);
+ return IOMMU_RESV_RESERVED;
+ }
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index a8c89df1a9978..9a7a74239eabb 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -2379,12 +2379,12 @@ retry_baser:
+ break;
+ }
+
++ if (!shr)
++ gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
++
+ its_write_baser(its, baser, val);
+ tmp = baser->val;
+
+- if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
+- tmp &= ~GITS_BASER_SHAREABILITY_MASK;
+-
+ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
+ /*
+ * Shareability didn't stick. Just use
+@@ -2394,10 +2394,9 @@ retry_baser:
+ * non-cacheable as well.
+ */
+ shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+- if (!shr) {
++ if (!shr)
+ cache = GITS_BASER_nC;
+- gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
+- }
++
+ goto retry_baser;
+ }
+
+@@ -2609,6 +2608,11 @@ static int its_alloc_tables(struct its_node *its)
+ /* erratum 24313: ignore memory access type */
+ cache = GITS_BASER_nCnB;
+
++ if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
++ cache = GITS_BASER_nC;
++ shr = 0;
++ }
++
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ struct its_baser *baser = its->tables + i;
+ u64 val = its_read_baser(its, baser);
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index e1484905b7bdb..5b7bc4fd9517c 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -532,17 +532,18 @@ done:
+ }
+
+ /*
+- * We can have multiple PLIC instances so setup cpuhp state only
+- * when context handler for current/boot CPU is present.
++ * We can have multiple PLIC instances so setup cpuhp state
++ * and register syscore operations only when context handler
++ * for current/boot CPU is present.
+ */
+ handler = this_cpu_ptr(&plic_handlers);
+ if (handler->present && !plic_cpuhp_setup_done) {
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ "irqchip/sifive/plic:starting",
+ plic_starting_cpu, plic_dying_cpu);
++ register_syscore_ops(&plic_irq_syscore_ops);
+ plic_cpuhp_setup_done = true;
+ }
+- register_syscore_ops(&plic_irq_syscore_ops);
+
+ pr_info("%pOFP: mapped %d interrupts with %d handlers for"
+ " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index 974b84f6bd6af..ba1be15cfd8ea 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -75,19 +75,6 @@ static ssize_t max_brightness_show(struct device *dev,
+ }
+ static DEVICE_ATTR_RO(max_brightness);
+
+-static ssize_t color_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- const char *color_text = "invalid";
+- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+-
+- if (led_cdev->color < LED_COLOR_ID_MAX)
+- color_text = led_colors[led_cdev->color];
+-
+- return sysfs_emit(buf, "%s\n", color_text);
+-}
+-static DEVICE_ATTR_RO(color);
+-
+ #ifdef CONFIG_LEDS_TRIGGERS
+ static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
+ static struct bin_attribute *led_trigger_bin_attrs[] = {
+@@ -102,7 +89,6 @@ static const struct attribute_group led_trigger_group = {
+ static struct attribute *led_class_attrs[] = {
+ &dev_attr_brightness.attr,
+ &dev_attr_max_brightness.attr,
+- &dev_attr_color.attr,
+ NULL,
+ };
+
+diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
+index 419b710984ab6..2b3bf1353b707 100644
+--- a/drivers/leds/leds-pwm.c
++++ b/drivers/leds/leds-pwm.c
+@@ -53,7 +53,7 @@ static int led_pwm_set(struct led_classdev *led_cdev,
+ duty = led_dat->pwmstate.period - duty;
+
+ led_dat->pwmstate.duty_cycle = duty;
+- led_dat->pwmstate.enabled = duty > 0;
++ led_dat->pwmstate.enabled = true;
+ return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
+ }
+
+diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
+index b8a95a917cfa4..b13a547e72c49 100644
+--- a/drivers/leds/leds-turris-omnia.c
++++ b/drivers/leds/leds-turris-omnia.c
+@@ -2,7 +2,7 @@
+ /*
+ * CZ.NIC's Turris Omnia LEDs driver
+ *
+- * 2020 by Marek Behún <kabel@kernel.org>
++ * 2020, 2023 by Marek Behún <kabel@kernel.org>
+ */
+
+ #include <linux/i2c.h>
+@@ -41,6 +41,37 @@ struct omnia_leds {
+ struct omnia_led leds[];
+ };
+
++static int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, u8 val)
++{
++ u8 buf[2] = { cmd, val };
++
++ return i2c_master_send(client, buf, sizeof(buf));
++}
++
++static int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd)
++{
++ struct i2c_msg msgs[2];
++ u8 reply;
++ int ret;
++
++ msgs[0].addr = client->addr;
++ msgs[0].flags = 0;
++ msgs[0].len = 1;
++ msgs[0].buf = &cmd;
++ msgs[1].addr = client->addr;
++ msgs[1].flags = I2C_M_RD;
++ msgs[1].len = 1;
++ msgs[1].buf = &reply;
++
++ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
++ if (likely(ret == ARRAY_SIZE(msgs)))
++ return reply;
++ else if (ret < 0)
++ return ret;
++ else
++ return -EIO;
++}
++
+ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
+ enum led_brightness brightness)
+ {
+@@ -64,7 +95,7 @@ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
+ if (buf[2] || buf[3] || buf[4])
+ state |= CMD_LED_STATE_ON;
+
+- ret = i2c_smbus_write_byte_data(leds->client, CMD_LED_STATE, state);
++ ret = omnia_cmd_write_u8(leds->client, CMD_LED_STATE, state);
+ if (ret >= 0 && (state & CMD_LED_STATE_ON))
+ ret = i2c_master_send(leds->client, buf, 5);
+
+@@ -114,9 +145,9 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
+ cdev->brightness_set_blocking = omnia_led_brightness_set_blocking;
+
+ /* put the LED into software mode */
+- ret = i2c_smbus_write_byte_data(client, CMD_LED_MODE,
+- CMD_LED_MODE_LED(led->reg) |
+- CMD_LED_MODE_USER);
++ ret = omnia_cmd_write_u8(client, CMD_LED_MODE,
++ CMD_LED_MODE_LED(led->reg) |
++ CMD_LED_MODE_USER);
+ if (ret < 0) {
+ dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np,
+ ret);
+@@ -124,8 +155,8 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
+ }
+
+ /* disable the LED */
+- ret = i2c_smbus_write_byte_data(client, CMD_LED_STATE,
+- CMD_LED_STATE_LED(led->reg));
++ ret = omnia_cmd_write_u8(client, CMD_LED_STATE,
++ CMD_LED_STATE_LED(led->reg));
+ if (ret < 0) {
+ dev_err(dev, "Cannot set LED %pOF brightness: %i\n", np, ret);
+ return ret;
+@@ -158,7 +189,7 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+
+- ret = i2c_smbus_read_byte_data(client, CMD_LED_GET_BRIGHTNESS);
++ ret = omnia_cmd_read_u8(client, CMD_LED_GET_BRIGHTNESS);
+
+ if (ret < 0)
+ return ret;
+@@ -179,8 +210,7 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
+ if (brightness > 100)
+ return -EINVAL;
+
+- ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS,
+- (u8)brightness);
++ ret = omnia_cmd_write_u8(client, CMD_LED_SET_BRIGHTNESS, brightness);
+
+ return ret < 0 ? ret : count;
+ }
+@@ -237,8 +267,8 @@ static void omnia_leds_remove(struct i2c_client *client)
+ u8 buf[5];
+
+ /* put all LEDs into default (HW triggered) mode */
+- i2c_smbus_write_byte_data(client, CMD_LED_MODE,
+- CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
++ omnia_cmd_write_u8(client, CMD_LED_MODE,
++ CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
+
+ /* set all LEDs color to [255, 255, 255] */
+ buf[0] = CMD_LED_COLOR;
+diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
+index 8af4f9bb9cde8..05848a2fecff6 100644
+--- a/drivers/leds/trigger/ledtrig-cpu.c
++++ b/drivers/leds/trigger/ledtrig-cpu.c
+@@ -130,7 +130,7 @@ static int ledtrig_prepare_down_cpu(unsigned int cpu)
+
+ static int __init ledtrig_cpu_init(void)
+ {
+- int cpu;
++ unsigned int cpu;
+ int ret;
+
+ /* Supports up to 9999 cpu cores */
+@@ -152,7 +152,7 @@ static int __init ledtrig_cpu_init(void)
+ if (cpu >= 8)
+ continue;
+
+- snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
++ snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu);
+
+ led_trigger_register_simple(trig->name, &trig->_trig);
+ }
+diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
+index 58f3352539e8e..e358e77e4b38f 100644
+--- a/drivers/leds/trigger/ledtrig-netdev.c
++++ b/drivers/leds/trigger/ledtrig-netdev.c
+@@ -221,6 +221,9 @@ static ssize_t device_name_show(struct device *dev,
+ static int set_device_name(struct led_netdev_data *trigger_data,
+ const char *name, size_t size)
+ {
++ if (size >= IFNAMSIZ)
++ return -EINVAL;
++
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ mutex_lock(&trigger_data->lock);
+@@ -263,9 +266,6 @@ static ssize_t device_name_store(struct device *dev,
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
+ int ret;
+
+- if (size >= IFNAMSIZ)
+- return -EINVAL;
+-
+ ret = set_device_name(trigger_data, buf, size);
+
+ if (ret < 0)
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index 0cac5bead84fa..d4eec09009809 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -246,6 +246,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
+ return 0;
+
+ out:
++ put_device(&dev->dev);
+
+ return ret;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 656b6b71c7682..1ae37e693de04 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -106,7 +106,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ return 0;
+
+ err:
+- put_device(&mdev->dev);
++ mcb_free_dev(mdev);
+
+ return ret;
+ }
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index fd121a61f17cc..3084c57248f69 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1363,7 +1363,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ memset(new_nodes, 0, sizeof(new_nodes));
+ closure_init_stack(&cl);
+
+- while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
++ while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
+ keys += r[nodes++].keys;
+
+ blocks = btree_default_blocks(b->c) * 2 / 3;
+@@ -1510,7 +1510,7 @@ out_nocoalesce:
+ bch_keylist_free(&keylist);
+
+ for (i = 0; i < nodes; i++)
+- if (!IS_ERR(new_nodes[i])) {
++ if (!IS_ERR_OR_NULL(new_nodes[i])) {
+ btree_node_free(new_nodes[i]);
+ rw_unlock(true, new_nodes[i]);
+ }
+@@ -1527,6 +1527,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ return 0;
+
+ n = btree_node_alloc_replacement(replace, NULL);
++ if (IS_ERR(n))
++ return 0;
+
+ /* recheck reserve after allocating replacement node */
+ if (btree_check_reserve(b, NULL)) {
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 0e2c1880f60b2..18ac98dc89223 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -1103,7 +1103,7 @@ SHOW(__bch_cache)
+ sum += INITIAL_PRIO - cached[i];
+
+ if (n)
+- do_div(sum, n);
++ sum = div64_u64(sum, n);
+
+ for (i = 0; i < ARRAY_SIZE(q); i++)
+ q[i] = INITIAL_PRIO - cached[n * (i + 1) /
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 24c049067f61a..d4432b3a6f96e 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -977,24 +977,35 @@ static int bch_btre_dirty_init_thread_nr(void)
+ void bch_sectors_dirty_init(struct bcache_device *d)
+ {
+ int i;
++ struct btree *b = NULL;
+ struct bkey *k = NULL;
+ struct btree_iter iter;
+ struct sectors_dirty_init op;
+ struct cache_set *c = d->c;
+ struct bch_dirty_init_state state;
+
++retry_lock:
++ b = c->root;
++ rw_lock(0, b, b->level);
++ if (b != c->root) {
++ rw_unlock(0, b);
++ goto retry_lock;
++ }
++
+ /* Just count root keys if no leaf node */
+- rw_lock(0, c->root, c->root->level);
+ if (c->root->level == 0) {
+ bch_btree_op_init(&op.op, -1);
+ op.inode = d->id;
+ op.count = 0;
+
+ for_each_key_filter(&c->root->keys,
+- k, &iter, bch_ptr_invalid)
++ k, &iter, bch_ptr_invalid) {
++ if (KEY_INODE(k) != op.inode)
++ continue;
+ sectors_dirty_init_fn(&op.op, c->root, k);
++ }
+
+- rw_unlock(0, c->root);
++ rw_unlock(0, b);
+ return;
+ }
+
+@@ -1014,23 +1025,24 @@ void bch_sectors_dirty_init(struct bcache_device *d)
+ if (atomic_read(&state.enough))
+ break;
+
++ atomic_inc(&state.started);
+ state.infos[i].state = &state;
+ state.infos[i].thread =
+ kthread_run(bch_dirty_init_thread, &state.infos[i],
+ "bch_dirtcnt[%d]", i);
+ if (IS_ERR(state.infos[i].thread)) {
+ pr_err("fails to run thread bch_dirty_init[%d]\n", i);
++ atomic_dec(&state.started);
+ for (--i; i >= 0; i--)
+ kthread_stop(state.infos[i].thread);
+ goto out;
+ }
+- atomic_inc(&state.started);
+ }
+
+ out:
+ /* Must wait for all threads to stop. */
+ wait_event(state.wait, atomic_read(&state.started) == 0);
+- rw_unlock(0, c->root);
++ rw_unlock(0, b);
+ }
+
+ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index bc309e41d074a..486e1180cc3a3 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -254,7 +254,7 @@ enum evict_result {
+
+ typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
+
+-static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context)
++static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
+ {
+ unsigned long tested = 0;
+ struct list_head *h = lru->cursor;
+@@ -295,7 +295,8 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con
+
+ h = h->next;
+
+- cond_resched();
++ if (!no_sleep)
++ cond_resched();
+ }
+
+ return NULL;
+@@ -382,7 +383,10 @@ struct dm_buffer {
+ */
+
+ struct buffer_tree {
+- struct rw_semaphore lock;
++ union {
++ struct rw_semaphore lock;
++ rwlock_t spinlock;
++ } u;
+ struct rb_root root;
+ } ____cacheline_aligned_in_smp;
+
+@@ -393,9 +397,12 @@ struct dm_buffer_cache {
+ * on the locks.
+ */
+ unsigned int num_locks;
++ bool no_sleep;
+ struct buffer_tree trees[];
+ };
+
++static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
++
+ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
+ {
+ return dm_hash_locks_index(block, num_locks);
+@@ -403,22 +410,34 @@ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
+
+ static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- down_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- up_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- down_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- up_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ /*
+@@ -442,18 +461,32 @@ static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool
+
+ static void __lh_lock(struct lock_history *lh, unsigned int index)
+ {
+- if (lh->write)
+- down_write(&lh->cache->trees[index].lock);
+- else
+- down_read(&lh->cache->trees[index].lock);
++ if (lh->write) {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ write_lock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ down_write(&lh->cache->trees[index].u.lock);
++ } else {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ read_lock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ down_read(&lh->cache->trees[index].u.lock);
++ }
+ }
+
+ static void __lh_unlock(struct lock_history *lh, unsigned int index)
+ {
+- if (lh->write)
+- up_write(&lh->cache->trees[index].lock);
+- else
+- up_read(&lh->cache->trees[index].lock);
++ if (lh->write) {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ write_unlock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ up_write(&lh->cache->trees[index].u.lock);
++ } else {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ read_unlock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ up_read(&lh->cache->trees[index].u.lock);
++ }
+ }
+
+ /*
+@@ -502,14 +535,18 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
+ return le_to_buffer(le);
+ }
+
+-static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks)
++static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
+ {
+ unsigned int i;
+
+ bc->num_locks = num_locks;
++ bc->no_sleep = no_sleep;
+
+ for (i = 0; i < bc->num_locks; i++) {
+- init_rwsem(&bc->trees[i].lock);
++ if (no_sleep)
++ rwlock_init(&bc->trees[i].u.spinlock);
++ else
++ init_rwsem(&bc->trees[i].u.lock);
+ bc->trees[i].root = RB_ROOT;
+ }
+
+@@ -648,7 +685,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode
+ struct lru_entry *le;
+ struct dm_buffer *b;
+
+- le = lru_evict(&bc->lru[list_mode], __evict_pred, &w);
++ le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
+ if (!le)
+ return NULL;
+
+@@ -702,7 +739,7 @@ static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_
+ struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
+
+ while (true) {
+- le = lru_evict(&bc->lru[old_mode], __evict_pred, &w);
++ le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
+ if (!le)
+ break;
+
+@@ -915,10 +952,11 @@ static void cache_remove_range(struct dm_buffer_cache *bc,
+ {
+ unsigned int i;
+
++ BUG_ON(bc->no_sleep);
+ for (i = 0; i < bc->num_locks; i++) {
+- down_write(&bc->trees[i].lock);
++ down_write(&bc->trees[i].u.lock);
+ __remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
+- up_write(&bc->trees[i].lock);
++ up_write(&bc->trees[i].u.lock);
+ }
+ }
+
+@@ -979,8 +1017,6 @@ struct dm_bufio_client {
+ struct dm_buffer_cache cache; /* must be last member */
+ };
+
+-static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
+-
+ /*----------------------------------------------------------------*/
+
+ #define dm_bufio_in_request() (!!current->bio_list)
+@@ -1871,7 +1907,8 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
+ if (need_submit)
+ submit_io(b, REQ_OP_READ, read_endio);
+
+- wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
++ if (nf != NF_GET) /* we already tested this condition above */
++ wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
+
+ if (b->read_error) {
+ int error = blk_status_to_errno(b->read_error);
+@@ -2421,7 +2458,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
+ r = -ENOMEM;
+ goto bad_client;
+ }
+- cache_init(&c->cache, num_locks);
++ cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
+
+ c->bdev = bdev;
+ c->block_size = block_size;
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 5315fd261c23b..cef9353370b20 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1699,11 +1699,17 @@ retry:
+ order = min(order, remaining_order);
+
+ while (order > 0) {
++ if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) +
++ (1 << order) > dm_crypt_pages_per_client))
++ goto decrease_order;
+ pages = alloc_pages(gfp_mask
+ | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP,
+ order);
+- if (likely(pages != NULL))
++ if (likely(pages != NULL)) {
++ percpu_counter_add(&cc->n_allocated_pages, 1 << order);
+ goto have_pages;
++ }
++decrease_order:
+ order--;
+ }
+
+@@ -1741,10 +1747,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
+
+ if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
+ bio_for_each_folio_all(fi, clone) {
+- if (folio_test_large(fi.folio))
++ if (folio_test_large(fi.folio)) {
++ percpu_counter_sub(&cc->n_allocated_pages,
++ 1 << folio_order(fi.folio));
+ folio_put(fi.folio);
+- else
++ } else {
+ mempool_free(&fi.folio->page, &cc->page_pool);
++ }
+ }
+ }
+ }
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 7433525e59856..3726fae3006e3 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -31,7 +31,7 @@ struct delay_c {
+ struct workqueue_struct *kdelayd_wq;
+ struct work_struct flush_expired_bios;
+ struct list_head delayed_bios;
+- atomic_t may_delay;
++ bool may_delay;
+
+ struct delay_class read;
+ struct delay_class write;
+@@ -192,7 +192,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
+ INIT_LIST_HEAD(&dc->delayed_bios);
+ mutex_init(&dc->timer_lock);
+- atomic_set(&dc->may_delay, 1);
++ dc->may_delay = true;
+ dc->argc = argc;
+
+ ret = delay_class_ctr(ti, &dc->read, argv);
+@@ -247,7 +247,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
+ struct dm_delay_info *delayed;
+ unsigned long expires = 0;
+
+- if (!c->delay || !atomic_read(&dc->may_delay))
++ if (!c->delay)
+ return DM_MAPIO_REMAPPED;
+
+ delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
+@@ -256,6 +256,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
+ delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
+
+ mutex_lock(&delayed_bios_lock);
++ if (unlikely(!dc->may_delay)) {
++ mutex_unlock(&delayed_bios_lock);
++ return DM_MAPIO_REMAPPED;
++ }
+ c->ops++;
+ list_add_tail(&delayed->list, &dc->delayed_bios);
+ mutex_unlock(&delayed_bios_lock);
+@@ -269,7 +273,10 @@ static void delay_presuspend(struct dm_target *ti)
+ {
+ struct delay_c *dc = ti->private;
+
+- atomic_set(&dc->may_delay, 0);
++ mutex_lock(&delayed_bios_lock);
++ dc->may_delay = false;
++ mutex_unlock(&delayed_bios_lock);
++
+ del_timer_sync(&dc->delay_timer);
+ flush_bios(flush_delayed_bios(dc, 1));
+ }
+@@ -278,7 +285,7 @@ static void delay_resume(struct dm_target *ti)
+ {
+ struct delay_c *dc = ti->private;
+
+- atomic_set(&dc->may_delay, 1);
++ dc->may_delay = true;
+ }
+
+ static int delay_map(struct dm_target *ti, struct bio *bio)
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 3ef9f018da60c..b475200d8586a 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
+ */
+ static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
+ {
+- return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
++ return (struct dm_verity_fec_io *)
++ ((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
+ }
+
+ /*
+@@ -185,7 +186,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
+ {
+ if (unlikely(verity_hash(v, verity_io_hash_req(v, io),
+ data, 1 << v->data_dev_block_bits,
+- verity_io_real_digest(v, io))))
++ verity_io_real_digest(v, io), true)))
+ return 0;
+
+ return memcmp(verity_io_real_digest(v, io), want_digest,
+@@ -386,7 +387,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
+ /* Always re-validate the corrected block against the expected hash */
+ r = verity_hash(v, verity_io_hash_req(v, io), fio->output,
+ 1 << v->data_dev_block_bits,
+- verity_io_real_digest(v, io));
++ verity_io_real_digest(v, io), true);
+ if (unlikely(r < 0))
+ return r;
+
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 26adcfea03022..14e58ae705218 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -135,20 +135,21 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
+ * Wrapper for crypto_ahash_init, which handles verity salting.
+ */
+ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
+- struct crypto_wait *wait)
++ struct crypto_wait *wait, bool may_sleep)
+ {
+ int r;
+
+ ahash_request_set_tfm(req, v->tfm);
+- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+- CRYPTO_TFM_REQ_MAY_BACKLOG,
+- crypto_req_done, (void *)wait);
++ ahash_request_set_callback(req,
++ may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
++ crypto_req_done, (void *)wait);
+ crypto_init_wait(wait);
+
+ r = crypto_wait_req(crypto_ahash_init(req), wait);
+
+ if (unlikely(r < 0)) {
+- DMERR("crypto_ahash_init failed: %d", r);
++ if (r != -ENOMEM)
++ DMERR("crypto_ahash_init failed: %d", r);
+ return r;
+ }
+
+@@ -179,12 +180,12 @@ out:
+ }
+
+ int verity_hash(struct dm_verity *v, struct ahash_request *req,
+- const u8 *data, size_t len, u8 *digest)
++ const u8 *data, size_t len, u8 *digest, bool may_sleep)
+ {
+ int r;
+ struct crypto_wait wait;
+
+- r = verity_hash_init(v, req, &wait);
++ r = verity_hash_init(v, req, &wait, may_sleep);
+ if (unlikely(r < 0))
+ goto out;
+
+@@ -322,7 +323,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
+
+ r = verity_hash(v, verity_io_hash_req(v, io),
+ data, 1 << v->hash_dev_block_bits,
+- verity_io_real_digest(v, io));
++ verity_io_real_digest(v, io), !io->in_tasklet);
+ if (unlikely(r < 0))
+ goto release_ret_r;
+
+@@ -556,7 +557,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ continue;
+ }
+
+- r = verity_hash_init(v, req, &wait);
++ r = verity_hash_init(v, req, &wait, !io->in_tasklet);
+ if (unlikely(r < 0))
+ return r;
+
+@@ -641,7 +642,6 @@ static void verity_work(struct work_struct *w)
+
+ io->in_tasklet = false;
+
+- verity_fec_init_io(io);
+ verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
+ }
+
+@@ -652,7 +652,7 @@ static void verity_tasklet(unsigned long data)
+
+ io->in_tasklet = true;
+ err = verity_verify_io(io);
+- if (err == -EAGAIN) {
++ if (err == -EAGAIN || err == -ENOMEM) {
+ /* fallback to retrying with work-queue */
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+@@ -667,7 +667,9 @@ static void verity_end_io(struct bio *bio)
+ struct dm_verity_io *io = bio->bi_private;
+
+ if (bio->bi_status &&
+- (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
++ (!verity_fec_is_enabled(io->v) ||
++ verity_is_system_shutting_down() ||
++ (bio->bi_opf & REQ_RAHEAD))) {
+ verity_finish_io(io, bio->bi_status);
+ return;
+ }
+@@ -791,6 +793,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
+ bio->bi_private = io;
+ io->iter = bio->bi_iter;
+
++ verity_fec_init_io(io);
++
+ verity_submit_prefetch(v, io);
+
+ submit_bio_noacct(bio);
+@@ -1033,7 +1037,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
+ goto out;
+
+ r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
+- v->zero_digest);
++ v->zero_digest, true);
+
+ out:
+ kfree(req);
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index 2f555b4203679..f9d522c870e61 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -115,12 +115,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
+ return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
+ }
+
+-static inline u8 *verity_io_digest_end(struct dm_verity *v,
+- struct dm_verity_io *io)
+-{
+- return verity_io_want_digest(v, io) + v->digest_size;
+-}
+-
+ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ struct bvec_iter *iter,
+ int (*process)(struct dm_verity *v,
+@@ -128,7 +122,7 @@ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len));
+
+ extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
+- const u8 *data, size_t len, u8 *digest);
++ const u8 *data, size_t len, u8 *digest, bool may_sleep);
+
+ extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+ sector_t block, u8 *digest, bool *is_zero);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index a104a025084dc..2748b0b424cfe 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -449,7 +449,7 @@ void mddev_suspend(struct mddev *mddev)
+ set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ percpu_ref_kill(&mddev->active_io);
+
+- if (mddev->pers->prepare_suspend)
++ if (mddev->pers && mddev->pers->prepare_suspend)
+ mddev->pers->prepare_suspend(mddev);
+
+ wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
+@@ -8669,7 +8669,8 @@ static void md_end_clone_io(struct bio *bio)
+ struct bio *orig_bio = md_io_clone->orig_bio;
+ struct mddev *mddev = md_io_clone->mddev;
+
+- orig_bio->bi_status = bio->bi_status;
++ if (bio->bi_status && !orig_bio->bi_status)
++ orig_bio->bi_status = bio->bi_status;
+
+ if (md_io_clone->start_time)
+ bio_end_io_acct(orig_bio, md_io_clone->start_time);
+diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
+index 26d2bc7783944..a51e98ab4958d 100644
+--- a/drivers/media/cec/platform/Makefile
++++ b/drivers/media/cec/platform/Makefile
+@@ -6,7 +6,7 @@
+ # Please keep it in alphabetic order
+ obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
+ obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+-obj-$(CONFIG_CEC_MESON_AO) += meson/
++obj-y += meson/
+ obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
+ obj-$(CONFIG_CEC_SECO) += seco/
+ obj-$(CONFIG_CEC_STI) += sti/
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 74ff833ff48ca..53b443be5a59e 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -99,6 +99,7 @@ config VIDEO_IMX214
+
+ config VIDEO_IMX219
+ tristate "Sony IMX219 sensor support"
++ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX219 camera.
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index 49e0d9a095302..6f8fbd82e21c8 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3097,7 +3097,7 @@ static int ccs_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+ try_fmt->code = sensor->internal_csi_format->code;
+ try_fmt->field = V4L2_FIELD_NONE;
+
+- if (ssd != sensor->pixel_array)
++ if (ssd == sensor->pixel_array)
+ continue;
+
+ try_comp = v4l2_subdev_get_try_compose(sd, fh->state, i);
+diff --git a/drivers/media/i2c/ccs/ccs-quirk.h b/drivers/media/i2c/ccs/ccs-quirk.h
+index 5838fcda92fd4..0b1a64958d714 100644
+--- a/drivers/media/i2c/ccs/ccs-quirk.h
++++ b/drivers/media/i2c/ccs/ccs-quirk.h
+@@ -32,12 +32,10 @@ struct ccs_sensor;
+ * @reg: Pointer to the register to access
+ * @value: Register value, set by the caller on write, or
+ * by the quirk on read
+- *
+- * @flags: Quirk flags
+- *
+ * @return: 0 on success, -ENOIOCTLCMD if no register
+ * access may be done by the caller (default read
+ * value is zero), else negative error code on error
++ * @flags: Quirk flags
+ */
+ struct ccs_quirk {
+ int (*limits)(struct ccs_sensor *sensor);
+diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
+index ec53abe2e84e5..3afa3f79c8a26 100644
+--- a/drivers/media/i2c/imx219.c
++++ b/drivers/media/i2c/imx219.c
+@@ -21,40 +21,56 @@
+ #include <linux/module.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/regulator/consumer.h>
++
++#include <media/v4l2-cci.h>
+ #include <media/v4l2-ctrls.h>
+ #include <media/v4l2-device.h>
+ #include <media/v4l2-event.h>
+ #include <media/v4l2-fwnode.h>
+ #include <media/v4l2-mediabus.h>
+-#include <asm/unaligned.h>
+
+-#define IMX219_REG_VALUE_08BIT 1
+-#define IMX219_REG_VALUE_16BIT 2
++/* Chip ID */
++#define IMX219_REG_CHIP_ID CCI_REG16(0x0000)
++#define IMX219_CHIP_ID 0x0219
+
+-#define IMX219_REG_MODE_SELECT 0x0100
++#define IMX219_REG_MODE_SELECT CCI_REG8(0x0100)
+ #define IMX219_MODE_STANDBY 0x00
+ #define IMX219_MODE_STREAMING 0x01
+
+-/* Chip ID */
+-#define IMX219_REG_CHIP_ID 0x0000
+-#define IMX219_CHIP_ID 0x0219
++#define IMX219_REG_CSI_LANE_MODE CCI_REG8(0x0114)
++#define IMX219_CSI_2_LANE_MODE 0x01
++#define IMX219_CSI_4_LANE_MODE 0x03
+
+-/* External clock frequency is 24.0M */
+-#define IMX219_XCLK_FREQ 24000000
++#define IMX219_REG_DPHY_CTRL CCI_REG8(0x0128)
++#define IMX219_DPHY_CTRL_TIMING_AUTO 0
++#define IMX219_DPHY_CTRL_TIMING_MANUAL 1
+
+-/* Pixel rate is fixed for all the modes */
+-#define IMX219_PIXEL_RATE 182400000
+-#define IMX219_PIXEL_RATE_4LANE 280800000
++#define IMX219_REG_EXCK_FREQ CCI_REG16(0x012a)
++#define IMX219_EXCK_FREQ(n) ((n) * 256) /* n expressed in MHz */
+
+-#define IMX219_DEFAULT_LINK_FREQ 456000000
+-#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
++/* Analog gain control */
++#define IMX219_REG_ANALOG_GAIN CCI_REG8(0x0157)
++#define IMX219_ANA_GAIN_MIN 0
++#define IMX219_ANA_GAIN_MAX 232
++#define IMX219_ANA_GAIN_STEP 1
++#define IMX219_ANA_GAIN_DEFAULT 0x0
+
+-#define IMX219_REG_CSI_LANE_MODE 0x0114
+-#define IMX219_CSI_2_LANE_MODE 0x01
+-#define IMX219_CSI_4_LANE_MODE 0x03
++/* Digital gain control */
++#define IMX219_REG_DIGITAL_GAIN CCI_REG16(0x0158)
++#define IMX219_DGTL_GAIN_MIN 0x0100
++#define IMX219_DGTL_GAIN_MAX 0x0fff
++#define IMX219_DGTL_GAIN_DEFAULT 0x0100
++#define IMX219_DGTL_GAIN_STEP 1
++
++/* Exposure control */
++#define IMX219_REG_EXPOSURE CCI_REG16(0x015a)
++#define IMX219_EXPOSURE_MIN 4
++#define IMX219_EXPOSURE_STEP 1
++#define IMX219_EXPOSURE_DEFAULT 0x640
++#define IMX219_EXPOSURE_MAX 65535
+
+ /* V_TIMING internal */
+-#define IMX219_REG_VTS 0x0160
++#define IMX219_REG_VTS CCI_REG16(0x0160)
+ #define IMX219_VTS_15FPS 0x0dc6
+ #define IMX219_VTS_30FPS_1080P 0x06e3
+ #define IMX219_VTS_30FPS_BINNED 0x06e3
+@@ -72,37 +88,37 @@
+ /* HBLANK control - read only */
+ #define IMX219_PPL_DEFAULT 3448
+
+-/* Exposure control */
+-#define IMX219_REG_EXPOSURE 0x015a
+-#define IMX219_EXPOSURE_MIN 4
+-#define IMX219_EXPOSURE_STEP 1
+-#define IMX219_EXPOSURE_DEFAULT 0x640
+-#define IMX219_EXPOSURE_MAX 65535
+-
+-/* Analog gain control */
+-#define IMX219_REG_ANALOG_GAIN 0x0157
+-#define IMX219_ANA_GAIN_MIN 0
+-#define IMX219_ANA_GAIN_MAX 232
+-#define IMX219_ANA_GAIN_STEP 1
+-#define IMX219_ANA_GAIN_DEFAULT 0x0
+-
+-/* Digital gain control */
+-#define IMX219_REG_DIGITAL_GAIN 0x0158
+-#define IMX219_DGTL_GAIN_MIN 0x0100
+-#define IMX219_DGTL_GAIN_MAX 0x0fff
+-#define IMX219_DGTL_GAIN_DEFAULT 0x0100
+-#define IMX219_DGTL_GAIN_STEP 1
+-
+-#define IMX219_REG_ORIENTATION 0x0172
++#define IMX219_REG_LINE_LENGTH_A CCI_REG16(0x0162)
++#define IMX219_REG_X_ADD_STA_A CCI_REG16(0x0164)
++#define IMX219_REG_X_ADD_END_A CCI_REG16(0x0166)
++#define IMX219_REG_Y_ADD_STA_A CCI_REG16(0x0168)
++#define IMX219_REG_Y_ADD_END_A CCI_REG16(0x016a)
++#define IMX219_REG_X_OUTPUT_SIZE CCI_REG16(0x016c)
++#define IMX219_REG_Y_OUTPUT_SIZE CCI_REG16(0x016e)
++#define IMX219_REG_X_ODD_INC_A CCI_REG8(0x0170)
++#define IMX219_REG_Y_ODD_INC_A CCI_REG8(0x0171)
++#define IMX219_REG_ORIENTATION CCI_REG8(0x0172)
+
+ /* Binning Mode */
+-#define IMX219_REG_BINNING_MODE 0x0174
++#define IMX219_REG_BINNING_MODE CCI_REG16(0x0174)
+ #define IMX219_BINNING_NONE 0x0000
+ #define IMX219_BINNING_2X2 0x0101
+ #define IMX219_BINNING_2X2_ANALOG 0x0303
+
++#define IMX219_REG_CSI_DATA_FORMAT_A CCI_REG16(0x018c)
++
++/* PLL Settings */
++#define IMX219_REG_VTPXCK_DIV CCI_REG8(0x0301)
++#define IMX219_REG_VTSYCK_DIV CCI_REG8(0x0303)
++#define IMX219_REG_PREPLLCK_VT_DIV CCI_REG8(0x0304)
++#define IMX219_REG_PREPLLCK_OP_DIV CCI_REG8(0x0305)
++#define IMX219_REG_PLL_VT_MPY CCI_REG16(0x0306)
++#define IMX219_REG_OPPXCK_DIV CCI_REG8(0x0309)
++#define IMX219_REG_OPSYCK_DIV CCI_REG8(0x030b)
++#define IMX219_REG_PLL_OP_MPY CCI_REG16(0x030c)
++
+ /* Test Pattern Control */
+-#define IMX219_REG_TEST_PATTERN 0x0600
++#define IMX219_REG_TEST_PATTERN CCI_REG16(0x0600)
+ #define IMX219_TEST_PATTERN_DISABLE 0
+ #define IMX219_TEST_PATTERN_SOLID_COLOR 1
+ #define IMX219_TEST_PATTERN_COLOR_BARS 2
+@@ -110,10 +126,10 @@
+ #define IMX219_TEST_PATTERN_PN9 4
+
+ /* Test pattern colour components */
+-#define IMX219_REG_TESTP_RED 0x0602
+-#define IMX219_REG_TESTP_GREENR 0x0604
+-#define IMX219_REG_TESTP_BLUE 0x0606
+-#define IMX219_REG_TESTP_GREENB 0x0608
++#define IMX219_REG_TESTP_RED CCI_REG16(0x0602)
++#define IMX219_REG_TESTP_GREENR CCI_REG16(0x0604)
++#define IMX219_REG_TESTP_BLUE CCI_REG16(0x0606)
++#define IMX219_REG_TESTP_GREENB CCI_REG16(0x0608)
+ #define IMX219_TESTP_COLOUR_MIN 0
+ #define IMX219_TESTP_COLOUR_MAX 0x03ff
+ #define IMX219_TESTP_COLOUR_STEP 1
+@@ -122,6 +138,19 @@
+ #define IMX219_TESTP_BLUE_DEFAULT 0
+ #define IMX219_TESTP_GREENB_DEFAULT 0
+
++#define IMX219_REG_TP_WINDOW_WIDTH CCI_REG16(0x0624)
++#define IMX219_REG_TP_WINDOW_HEIGHT CCI_REG16(0x0626)
++
++/* External clock frequency is 24.0M */
++#define IMX219_XCLK_FREQ 24000000
++
++/* Pixel rate is fixed for all the modes */
++#define IMX219_PIXEL_RATE 182400000
++#define IMX219_PIXEL_RATE_4LANE 280800000
++
++#define IMX219_DEFAULT_LINK_FREQ 456000000
++#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
++
+ /* IMX219 native and active pixel array size. */
+ #define IMX219_NATIVE_WIDTH 3296U
+ #define IMX219_NATIVE_HEIGHT 2480U
+@@ -130,14 +159,9 @@
+ #define IMX219_PIXEL_ARRAY_WIDTH 3280U
+ #define IMX219_PIXEL_ARRAY_HEIGHT 2464U
+
+-struct imx219_reg {
+- u16 address;
+- u8 val;
+-};
+-
+ struct imx219_reg_list {
+ unsigned int num_of_regs;
+- const struct imx219_reg *regs;
++ const struct cci_reg_sequence *regs;
+ };
+
+ /* Mode : resolution and related config&values */
+@@ -160,53 +184,48 @@ struct imx219_mode {
+ bool binning;
+ };
+
+-static const struct imx219_reg imx219_common_regs[] = {
+- {0x0100, 0x00}, /* Mode Select */
++static const struct cci_reg_sequence imx219_common_regs[] = {
++ { IMX219_REG_MODE_SELECT, 0x00 }, /* Mode Select */
+
+ /* To Access Addresses 3000-5fff, send the following commands */
+- {0x30eb, 0x0c},
+- {0x30eb, 0x05},
+- {0x300a, 0xff},
+- {0x300b, 0xff},
+- {0x30eb, 0x05},
+- {0x30eb, 0x09},
++ { CCI_REG8(0x30eb), 0x0c },
++ { CCI_REG8(0x30eb), 0x05 },
++ { CCI_REG8(0x300a), 0xff },
++ { CCI_REG8(0x300b), 0xff },
++ { CCI_REG8(0x30eb), 0x05 },
++ { CCI_REG8(0x30eb), 0x09 },
+
+ /* PLL Clock Table */
+- {0x0301, 0x05}, /* VTPXCK_DIV */
+- {0x0303, 0x01}, /* VTSYSCK_DIV */
+- {0x0304, 0x03}, /* PREPLLCK_VT_DIV 0x03 = AUTO set */
+- {0x0305, 0x03}, /* PREPLLCK_OP_DIV 0x03 = AUTO set */
+- {0x0306, 0x00}, /* PLL_VT_MPY */
+- {0x0307, 0x39},
+- {0x030b, 0x01}, /* OP_SYS_CLK_DIV */
+- {0x030c, 0x00}, /* PLL_OP_MPY */
+- {0x030d, 0x72},
++ { IMX219_REG_VTPXCK_DIV, 5 },
++ { IMX219_REG_VTSYCK_DIV, 1 },
++ { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */
++ { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */
++ { IMX219_REG_PLL_VT_MPY, 57 },
++ { IMX219_REG_OPSYCK_DIV, 1 },
++ { IMX219_REG_PLL_OP_MPY, 114 },
+
+ /* Undocumented registers */
+- {0x455e, 0x00},
+- {0x471e, 0x4b},
+- {0x4767, 0x0f},
+- {0x4750, 0x14},
+- {0x4540, 0x00},
+- {0x47b4, 0x14},
+- {0x4713, 0x30},
+- {0x478b, 0x10},
+- {0x478f, 0x10},
+- {0x4793, 0x10},
+- {0x4797, 0x0e},
+- {0x479b, 0x0e},
++ { CCI_REG8(0x455e), 0x00 },
++ { CCI_REG8(0x471e), 0x4b },
++ { CCI_REG8(0x4767), 0x0f },
++ { CCI_REG8(0x4750), 0x14 },
++ { CCI_REG8(0x4540), 0x00 },
++ { CCI_REG8(0x47b4), 0x14 },
++ { CCI_REG8(0x4713), 0x30 },
++ { CCI_REG8(0x478b), 0x10 },
++ { CCI_REG8(0x478f), 0x10 },
++ { CCI_REG8(0x4793), 0x10 },
++ { CCI_REG8(0x4797), 0x0e },
++ { CCI_REG8(0x479b), 0x0e },
+
+ /* Frame Bank Register Group "A" */
+- {0x0162, 0x0d}, /* Line_Length_A */
+- {0x0163, 0x78},
+- {0x0170, 0x01}, /* X_ODD_INC_A */
+- {0x0171, 0x01}, /* Y_ODD_INC_A */
++ { IMX219_REG_LINE_LENGTH_A, 3448 },
++ { IMX219_REG_X_ODD_INC_A, 1 },
++ { IMX219_REG_Y_ODD_INC_A, 1 },
+
+ /* Output setup registers */
+- {0x0114, 0x01}, /* CSI 2-Lane Mode */
+- {0x0128, 0x00}, /* DPHY Auto Mode */
+- {0x012a, 0x18}, /* EXCK_Freq */
+- {0x012b, 0x00},
++ { IMX219_REG_DPHY_CTRL, IMX219_DPHY_CTRL_TIMING_AUTO },
++ { IMX219_REG_EXCK_FREQ, IMX219_EXCK_FREQ(IMX219_XCLK_FREQ / 1000000) },
+ };
+
+ /*
+@@ -214,92 +233,58 @@ static const struct imx219_reg imx219_common_regs[] = {
+ * driver.
+ * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
+ */
+-static const struct imx219_reg mode_3280x2464_regs[] = {
+- {0x0164, 0x00},
+- {0x0165, 0x00},
+- {0x0166, 0x0c},
+- {0x0167, 0xcf},
+- {0x0168, 0x00},
+- {0x0169, 0x00},
+- {0x016a, 0x09},
+- {0x016b, 0x9f},
+- {0x016c, 0x0c},
+- {0x016d, 0xd0},
+- {0x016e, 0x09},
+- {0x016f, 0xa0},
+- {0x0624, 0x0c},
+- {0x0625, 0xd0},
+- {0x0626, 0x09},
+- {0x0627, 0xa0},
++static const struct cci_reg_sequence mode_3280x2464_regs[] = {
++ { IMX219_REG_X_ADD_STA_A, 0 },
++ { IMX219_REG_X_ADD_END_A, 3279 },
++ { IMX219_REG_Y_ADD_STA_A, 0 },
++ { IMX219_REG_Y_ADD_END_A, 2463 },
++ { IMX219_REG_X_OUTPUT_SIZE, 3280 },
++ { IMX219_REG_Y_OUTPUT_SIZE, 2464 },
++ { IMX219_REG_TP_WINDOW_WIDTH, 3280 },
++ { IMX219_REG_TP_WINDOW_HEIGHT, 2464 },
+ };
+
+-static const struct imx219_reg mode_1920_1080_regs[] = {
+- {0x0164, 0x02},
+- {0x0165, 0xa8},
+- {0x0166, 0x0a},
+- {0x0167, 0x27},
+- {0x0168, 0x02},
+- {0x0169, 0xb4},
+- {0x016a, 0x06},
+- {0x016b, 0xeb},
+- {0x016c, 0x07},
+- {0x016d, 0x80},
+- {0x016e, 0x04},
+- {0x016f, 0x38},
+- {0x0624, 0x07},
+- {0x0625, 0x80},
+- {0x0626, 0x04},
+- {0x0627, 0x38},
++static const struct cci_reg_sequence mode_1920_1080_regs[] = {
++ { IMX219_REG_X_ADD_STA_A, 680 },
++ { IMX219_REG_X_ADD_END_A, 2599 },
++ { IMX219_REG_Y_ADD_STA_A, 692 },
++ { IMX219_REG_Y_ADD_END_A, 1771 },
++ { IMX219_REG_X_OUTPUT_SIZE, 1920 },
++ { IMX219_REG_Y_OUTPUT_SIZE, 1080 },
++ { IMX219_REG_TP_WINDOW_WIDTH, 1920 },
++ { IMX219_REG_TP_WINDOW_HEIGHT, 1080 },
+ };
+
+-static const struct imx219_reg mode_1640_1232_regs[] = {
+- {0x0164, 0x00},
+- {0x0165, 0x00},
+- {0x0166, 0x0c},
+- {0x0167, 0xcf},
+- {0x0168, 0x00},
+- {0x0169, 0x00},
+- {0x016a, 0x09},
+- {0x016b, 0x9f},
+- {0x016c, 0x06},
+- {0x016d, 0x68},
+- {0x016e, 0x04},
+- {0x016f, 0xd0},
+- {0x0624, 0x06},
+- {0x0625, 0x68},
+- {0x0626, 0x04},
+- {0x0627, 0xd0},
++static const struct cci_reg_sequence mode_1640_1232_regs[] = {
++ { IMX219_REG_X_ADD_STA_A, 0 },
++ { IMX219_REG_X_ADD_END_A, 3279 },
++ { IMX219_REG_Y_ADD_STA_A, 0 },
++ { IMX219_REG_Y_ADD_END_A, 2463 },
++ { IMX219_REG_X_OUTPUT_SIZE, 1640 },
++ { IMX219_REG_Y_OUTPUT_SIZE, 1232 },
++ { IMX219_REG_TP_WINDOW_WIDTH, 1640 },
++ { IMX219_REG_TP_WINDOW_HEIGHT, 1232 },
+ };
+
+-static const struct imx219_reg mode_640_480_regs[] = {
+- {0x0164, 0x03},
+- {0x0165, 0xe8},
+- {0x0166, 0x08},
+- {0x0167, 0xe7},
+- {0x0168, 0x02},
+- {0x0169, 0xf0},
+- {0x016a, 0x06},
+- {0x016b, 0xaf},
+- {0x016c, 0x02},
+- {0x016d, 0x80},
+- {0x016e, 0x01},
+- {0x016f, 0xe0},
+- {0x0624, 0x06},
+- {0x0625, 0x68},
+- {0x0626, 0x04},
+- {0x0627, 0xd0},
++static const struct cci_reg_sequence mode_640_480_regs[] = {
++ { IMX219_REG_X_ADD_STA_A, 1000 },
++ { IMX219_REG_X_ADD_END_A, 2279 },
++ { IMX219_REG_Y_ADD_STA_A, 752 },
++ { IMX219_REG_Y_ADD_END_A, 1711 },
++ { IMX219_REG_X_OUTPUT_SIZE, 640 },
++ { IMX219_REG_Y_OUTPUT_SIZE, 480 },
++ { IMX219_REG_TP_WINDOW_WIDTH, 1640 },
++ { IMX219_REG_TP_WINDOW_HEIGHT, 1232 },
+ };
+
+-static const struct imx219_reg raw8_framefmt_regs[] = {
+- {0x018c, 0x08},
+- {0x018d, 0x08},
+- {0x0309, 0x08},
++static const struct cci_reg_sequence raw8_framefmt_regs[] = {
++ { IMX219_REG_CSI_DATA_FORMAT_A, 0x0808 },
++ { IMX219_REG_OPPXCK_DIV, 8 },
+ };
+
+-static const struct imx219_reg raw10_framefmt_regs[] = {
+- {0x018c, 0x0a},
+- {0x018d, 0x0a},
+- {0x0309, 0x0a},
++static const struct cci_reg_sequence raw10_framefmt_regs[] = {
++ { IMX219_REG_CSI_DATA_FORMAT_A, 0x0a0a },
++ { IMX219_REG_OPPXCK_DIV, 10 },
+ };
+
+ static const s64 imx219_link_freq_menu[] = {
+@@ -460,6 +445,7 @@ struct imx219 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
++ struct regmap *regmap;
+ struct clk *xclk; /* system clock to IMX219 */
+ u32 xclk_freq;
+
+@@ -491,78 +477,6 @@ static inline struct imx219 *to_imx219(struct v4l2_subdev *_sd)
+ return container_of(_sd, struct imx219, sd);
+ }
+
+-/* Read registers up to 2 at a time */
+-static int imx219_read_reg(struct imx219 *imx219, u16 reg, u32 len, u32 *val)
+-{
+- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+- struct i2c_msg msgs[2];
+- u8 addr_buf[2] = { reg >> 8, reg & 0xff };
+- u8 data_buf[4] = { 0, };
+- int ret;
+-
+- if (len > 4)
+- return -EINVAL;
+-
+- /* Write register address */
+- msgs[0].addr = client->addr;
+- msgs[0].flags = 0;
+- msgs[0].len = ARRAY_SIZE(addr_buf);
+- msgs[0].buf = addr_buf;
+-
+- /* Read data from register */
+- msgs[1].addr = client->addr;
+- msgs[1].flags = I2C_M_RD;
+- msgs[1].len = len;
+- msgs[1].buf = &data_buf[4 - len];
+-
+- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+- if (ret != ARRAY_SIZE(msgs))
+- return -EIO;
+-
+- *val = get_unaligned_be32(data_buf);
+-
+- return 0;
+-}
+-
+-/* Write registers up to 2 at a time */
+-static int imx219_write_reg(struct imx219 *imx219, u16 reg, u32 len, u32 val)
+-{
+- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+- u8 buf[6];
+-
+- if (len > 4)
+- return -EINVAL;
+-
+- put_unaligned_be16(reg, buf);
+- put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
+- if (i2c_master_send(client, buf, len + 2) != len + 2)
+- return -EIO;
+-
+- return 0;
+-}
+-
+-/* Write a list of registers */
+-static int imx219_write_regs(struct imx219 *imx219,
+- const struct imx219_reg *regs, u32 len)
+-{
+- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+- unsigned int i;
+- int ret;
+-
+- for (i = 0; i < len; i++) {
+- ret = imx219_write_reg(imx219, regs[i].address, 1, regs[i].val);
+- if (ret) {
+- dev_err_ratelimited(&client->dev,
+- "Failed to write reg 0x%4.4x. error = %d\n",
+- regs[i].address, ret);
+-
+- return ret;
+- }
+- }
+-
+- return 0;
+-}
+-
+ /* Get bayer order based on flip setting. */
+ static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
+ {
+@@ -586,7 +500,7 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
+ struct imx219 *imx219 =
+ container_of(ctrl->handler, struct imx219, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+- int ret;
++ int ret = 0;
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ int exposure_max, exposure_def;
+@@ -610,48 +524,45 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+- ret = imx219_write_reg(imx219, IMX219_REG_ANALOG_GAIN,
+- IMX219_REG_VALUE_08BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_ANALOG_GAIN,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_EXPOSURE:
+- ret = imx219_write_reg(imx219, IMX219_REG_EXPOSURE,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_EXPOSURE,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+- ret = imx219_write_reg(imx219, IMX219_REG_DIGITAL_GAIN,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_DIGITAL_GAIN,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+- ret = imx219_write_reg(imx219, IMX219_REG_TEST_PATTERN,
+- IMX219_REG_VALUE_16BIT,
+- imx219_test_pattern_val[ctrl->val]);
++ cci_write(imx219->regmap, IMX219_REG_TEST_PATTERN,
++ imx219_test_pattern_val[ctrl->val], &ret);
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+- ret = imx219_write_reg(imx219, IMX219_REG_ORIENTATION, 1,
+- imx219->hflip->val |
+- imx219->vflip->val << 1);
++ cci_write(imx219->regmap, IMX219_REG_ORIENTATION,
++ imx219->hflip->val | imx219->vflip->val << 1, &ret);
+ break;
+ case V4L2_CID_VBLANK:
+- ret = imx219_write_reg(imx219, IMX219_REG_VTS,
+- IMX219_REG_VALUE_16BIT,
+- imx219->mode->height + ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_VTS,
++ imx219->mode->height + ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_RED:
+- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_RED,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_TESTP_RED,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENR:
+- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENR,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_TESTP_GREENR,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_BLUE:
+- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_BLUE,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_TESTP_BLUE,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENB:
+- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENB,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_TESTP_GREENB,
++ ctrl->val, &ret);
+ break;
+ default:
+ dev_info(&client->dev,
+@@ -802,15 +713,15 @@ static int imx219_set_framefmt(struct imx219 *imx219,
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+- return imx219_write_regs(imx219, raw8_framefmt_regs,
+- ARRAY_SIZE(raw8_framefmt_regs));
++ return cci_multi_reg_write(imx219->regmap, raw8_framefmt_regs,
++ ARRAY_SIZE(raw8_framefmt_regs), NULL);
+
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+- return imx219_write_regs(imx219, raw10_framefmt_regs,
+- ARRAY_SIZE(raw10_framefmt_regs));
++ return cci_multi_reg_write(imx219->regmap, raw10_framefmt_regs,
++ ARRAY_SIZE(raw10_framefmt_regs), NULL);
+ }
+
+ return -EINVAL;
+@@ -819,28 +730,24 @@ static int imx219_set_framefmt(struct imx219 *imx219,
+ static int imx219_set_binning(struct imx219 *imx219,
+ const struct v4l2_mbus_framefmt *format)
+ {
+- if (!imx219->mode->binning) {
+- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
+- IMX219_REG_VALUE_16BIT,
+- IMX219_BINNING_NONE);
+- }
++ if (!imx219->mode->binning)
++ return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE,
++ IMX219_BINNING_NONE, NULL);
+
+ switch (format->code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
+- IMX219_REG_VALUE_16BIT,
+- IMX219_BINNING_2X2_ANALOG);
++ return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE,
++ IMX219_BINNING_2X2_ANALOG, NULL);
+
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
+- IMX219_REG_VALUE_16BIT,
+- IMX219_BINNING_2X2);
++ return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE,
++ IMX219_BINNING_2X2, NULL);
+ }
+
+ return -EINVAL;
+@@ -879,9 +786,9 @@ static int imx219_get_selection(struct v4l2_subdev *sd,
+
+ static int imx219_configure_lanes(struct imx219 *imx219)
+ {
+- return imx219_write_reg(imx219, IMX219_REG_CSI_LANE_MODE,
+- IMX219_REG_VALUE_08BIT, (imx219->lanes == 2) ?
+- IMX219_CSI_2_LANE_MODE : IMX219_CSI_4_LANE_MODE);
++ return cci_write(imx219->regmap, IMX219_REG_CSI_LANE_MODE,
++ imx219->lanes == 2 ? IMX219_CSI_2_LANE_MODE :
++ IMX219_CSI_4_LANE_MODE, NULL);
+ };
+
+ static int imx219_start_streaming(struct imx219 *imx219,
+@@ -897,7 +804,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
+ return ret;
+
+ /* Send all registers that are common to all modes */
+- ret = imx219_write_regs(imx219, imx219_common_regs, ARRAY_SIZE(imx219_common_regs));
++ ret = cci_multi_reg_write(imx219->regmap, imx219_common_regs,
++ ARRAY_SIZE(imx219_common_regs), NULL);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to send mfg header\n", __func__);
+ goto err_rpm_put;
+@@ -912,7 +820,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
+
+ /* Apply default values of current mode */
+ reg_list = &imx219->mode->reg_list;
+- ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
++ ret = cci_multi_reg_write(imx219->regmap, reg_list->regs,
++ reg_list->num_of_regs, NULL);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ goto err_rpm_put;
+@@ -939,8 +848,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
+ goto err_rpm_put;
+
+ /* set stream on register */
+- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+- IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
++ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
++ IMX219_MODE_STREAMING, NULL);
+ if (ret)
+ goto err_rpm_put;
+
+@@ -961,8 +870,8 @@ static void imx219_stop_streaming(struct imx219 *imx219)
+ int ret;
+
+ /* set stream off register */
+- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+- IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
++ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
++ IMX219_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(&client->dev, "%s failed to set stream\n", __func__);
+
+@@ -1101,10 +1010,9 @@ static int imx219_identify_module(struct imx219 *imx219)
+ {
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ int ret;
+- u32 val;
++ u64 val;
+
+- ret = imx219_read_reg(imx219, IMX219_REG_CHIP_ID,
+- IMX219_REG_VALUE_16BIT, &val);
++ ret = cci_read(imx219->regmap, IMX219_REG_CHIP_ID, &val, NULL);
+ if (ret) {
+ dev_err(&client->dev, "failed to read chip id %x\n",
+ IMX219_CHIP_ID);
+@@ -1112,7 +1020,7 @@ static int imx219_identify_module(struct imx219 *imx219)
+ }
+
+ if (val != IMX219_CHIP_ID) {
+- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
++ dev_err(&client->dev, "chip id mismatch: %x!=%llx\n",
+ IMX219_CHIP_ID, val);
+ return -EIO;
+ }
+@@ -1336,6 +1244,13 @@ static int imx219_probe(struct i2c_client *client)
+ if (imx219_check_hwcfg(dev, imx219))
+ return -EINVAL;
+
++ imx219->regmap = devm_cci_regmap_init_i2c(client, 16);
++ if (IS_ERR(imx219->regmap)) {
++ ret = PTR_ERR(imx219->regmap);
++ dev_err(dev, "failed to initialize CCI: %d\n", ret);
++ return ret;
++ }
++
+ /* Get system clock (xclk) */
+ imx219->xclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(imx219->xclk)) {
+@@ -1379,17 +1294,19 @@ static int imx219_probe(struct i2c_client *client)
+ * streaming is started, so upon power up switch the modes to:
+ * streaming -> standby
+ */
+- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+- IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
++ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
++ IMX219_MODE_STREAMING, NULL);
+ if (ret < 0)
+ goto error_power_off;
++
+ usleep_range(100, 110);
+
+ /* put sensor back to standby mode */
+- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+- IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
++ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
++ IMX219_MODE_STANDBY, NULL);
+ if (ret < 0)
+ goto error_power_off;
++
+ usleep_range(100, 110);
+
+ ret = imx219_init_controls(imx219);
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index be84ff1e2b170..fc1cf196ef015 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -1449,7 +1449,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
+
+ i2c_mux_mask |= BIT(id);
+ }
+- of_node_put(node);
+ of_node_put(i2c_mux);
+
+ /* Parse the endpoints */
+@@ -1513,7 +1512,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
+ priv->source_mask |= BIT(ep.port);
+ priv->nsources++;
+ }
+- of_node_put(node);
+
+ of_property_read_u32(dev->of_node, "maxim,bus-width", &priv->bus_width);
+ switch (priv->bus_width) {
+diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
+index dbc642c5995b6..8ebdb32dd3dbc 100644
+--- a/drivers/media/i2c/ov13b10.c
++++ b/drivers/media/i2c/ov13b10.c
+@@ -1501,7 +1501,7 @@ static int ov13b10_probe(struct i2c_client *client)
+
+ full_power = acpi_dev_state_d0(&client->dev);
+ if (full_power) {
+- ov13b10_power_on(&client->dev);
++ ret = ov13b10_power_on(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to power on\n");
+ return ret;
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 5fe85aa2d2ec4..40532f7bcabea 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -2850,12 +2850,22 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
+ return 0;
+ }
+
++static void __v4l2_ctrl_vblank_update(struct ov5640_dev *sensor, u32 vblank)
++{
++ const struct ov5640_mode_info *mode = sensor->current_mode;
++
++ __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
++ OV5640_MAX_VTS - mode->height, 1, vblank);
++
++ __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
++}
++
+ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ {
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate;
+ struct v4l2_mbus_framefmt *fmt = &sensor->fmt;
+- const struct ov5640_timings *timings;
++ const struct ov5640_timings *timings = ov5640_timings(sensor, mode);
+ s32 exposure_val, exposure_max;
+ unsigned int hblank;
+ unsigned int i = 0;
+@@ -2874,6 +2884,8 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
+
++ __v4l2_ctrl_vblank_update(sensor, timings->vblank_def);
++
+ return 0;
+ }
+
+@@ -2916,15 +2928,12 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate);
+ __v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i);
+
+- timings = ov5640_timings(sensor, mode);
+ hblank = timings->htot - mode->width;
+ __v4l2_ctrl_modify_range(sensor->ctrls.hblank,
+ hblank, hblank, 1, hblank);
+
+ vblank = timings->vblank_def;
+- __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
+- OV5640_MAX_VTS - mode->height, 1, vblank);
+- __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
++ __v4l2_ctrl_vblank_update(sensor, vblank);
+
+ exposure_max = timings->crop.height + vblank - 4;
+ exposure_val = clamp_t(s32, sensor->ctrls.exposure->val,
+@@ -3919,7 +3928,7 @@ static int ov5640_probe(struct i2c_client *client)
+ ret = ov5640_sensor_resume(dev);
+ if (ret) {
+ dev_err(dev, "failed to power on\n");
+- goto entity_cleanup;
++ goto free_ctrls;
+ }
+
+ pm_runtime_set_active(dev);
+@@ -3944,8 +3953,9 @@ static int ov5640_probe(struct i2c_client *client)
+ err_pm_runtime:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+- v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ ov5640_sensor_suspend(dev);
++free_ctrls:
++ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ entity_cleanup:
+ media_entity_cleanup(&sensor->sd.entity);
+ mutex_destroy(&sensor->lock);
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index aa708a0e5eac6..09a193bb87df3 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -3474,6 +3474,7 @@ static void bttv_remove(struct pci_dev *pci_dev)
+
+ /* free resources */
+ free_irq(btv->c.pci->irq,btv);
++ del_timer_sync(&btv->timeout);
+ iounmap(btv->bt848_mmio);
+ release_mem_region(pci_resource_start(btv->c.pci,0),
+ pci_resource_len(btv->c.pci,0));
+diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
+index 74edcc76d12f4..6e1a0614e6d06 100644
+--- a/drivers/media/pci/cobalt/cobalt-driver.c
++++ b/drivers/media/pci/cobalt/cobalt-driver.c
+@@ -8,6 +8,7 @@
+ * All rights reserved.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <media/i2c/adv7604.h>
+ #include <media/i2c/adv7842.h>
+@@ -210,17 +211,17 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
+ pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
+ cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
+ capa, get_link_speed(capa),
+- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
++ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
+ cobalt_info("PCIe link control 0x%04x\n", ctrl);
+ cobalt_info("PCIe link status 0x%04x: %s per lane and %u lanes\n",
+ stat, get_link_speed(stat),
+- (stat & PCI_EXP_LNKSTA_NLW) >> 4);
++ FIELD_GET(PCI_EXP_LNKSTA_NLW, stat));
+
+ /* Bus */
+ pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
+ cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
+ capa, get_link_speed(capa),
+- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
++ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
+
+ /* Slot */
+ pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
+@@ -239,7 +240,7 @@ static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
+ if (!pci_is_pcie(pci_dev))
+ return 0;
+ pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
+- return (link & PCI_EXP_LNKSTA_NLW) >> 4;
++ return FIELD_GET(PCI_EXP_LNKSTA_NLW, link);
+ }
+
+ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
+@@ -250,7 +251,7 @@ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
+ if (!pci_is_pcie(pci_dev))
+ return 0;
+ pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
+- return (link & PCI_EXP_LNKCAP_MLW) >> 4;
++ return FIELD_GET(PCI_EXP_LNKCAP_MLW, link);
+ }
+
+ static void msi_config_show(struct cobalt *cobalt, struct pci_dev *pci_dev)
+diff --git a/drivers/media/platform/amphion/vpu_defs.h b/drivers/media/platform/amphion/vpu_defs.h
+index 667637eedb5d4..7320852668d64 100644
+--- a/drivers/media/platform/amphion/vpu_defs.h
++++ b/drivers/media/platform/amphion/vpu_defs.h
+@@ -71,6 +71,7 @@ enum {
+ VPU_MSG_ID_TIMESTAMP_INFO,
+ VPU_MSG_ID_FIRMWARE_XCPT,
+ VPU_MSG_ID_PIC_SKIPPED,
++ VPU_MSG_ID_DBG_MSG,
+ };
+
+ enum VPU_ENC_MEMORY_RESOURSE {
+diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
+index af3b336e5dc32..d12310af9ebce 100644
+--- a/drivers/media/platform/amphion/vpu_helpers.c
++++ b/drivers/media/platform/amphion/vpu_helpers.c
+@@ -489,6 +489,7 @@ const char *vpu_id_name(u32 id)
+ case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
+ case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
+ case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
++ case VPU_MSG_ID_DBG_MSG: return "debug msg";
+ }
+ return "<unknown>";
+ }
+diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
+index f771661980c01..d3425de7bccd3 100644
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -745,6 +745,7 @@ static struct vpu_pair malone_msgs[] = {
+ {VPU_MSG_ID_UNSUPPORTED, VID_API_EVENT_UNSUPPORTED_STREAM},
+ {VPU_MSG_ID_FIRMWARE_XCPT, VID_API_EVENT_FIRMWARE_XCPT},
+ {VPU_MSG_ID_PIC_SKIPPED, VID_API_EVENT_PIC_SKIPPED},
++ {VPU_MSG_ID_DBG_MSG, VID_API_EVENT_DBG_MSG_DEC},
+ };
+
+ static void vpu_malone_pack_fs_alloc(struct vpu_rpc_event *pkt,
+diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
+index d0ead051f7d18..b74a407a19f22 100644
+--- a/drivers/media/platform/amphion/vpu_msgs.c
++++ b/drivers/media/platform/amphion/vpu_msgs.c
+@@ -23,6 +23,7 @@
+ struct vpu_msg_handler {
+ u32 id;
+ void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
++ u32 is_str;
+ };
+
+ static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+@@ -154,7 +155,7 @@ static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event
+ {
+ char *str = (char *)pkt->data;
+
+- if (strlen(str))
++ if (*str)
+ dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
+ else
+ dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
+@@ -180,6 +181,21 @@ static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc
+ vpu_inst_unlock(inst);
+ }
+
++static void vpu_session_handle_dbg_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
++{
++ char *str = (char *)pkt->data;
++
++ if (*str)
++ dev_info(inst->dev, "instance %d firmware dbg msg : %s\n", inst->id, str);
++}
++
++static void vpu_terminate_string_msg(struct vpu_rpc_event *pkt)
++{
++ if (pkt->hdr.num == ARRAY_SIZE(pkt->data))
++ pkt->hdr.num--;
++ pkt->data[pkt->hdr.num] = 0;
++}
++
+ static struct vpu_msg_handler handlers[] = {
+ {VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
+ {VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
+@@ -193,9 +209,10 @@ static struct vpu_msg_handler handlers[] = {
+ {VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
+ {VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
+ {VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
+- {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
+- {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
++ {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error, true},
++ {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt, true},
+ {VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
++ {VPU_MSG_ID_DBG_MSG, vpu_session_handle_dbg_msg, true},
+ };
+
+ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
+@@ -219,8 +236,12 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
+ }
+ }
+
+- if (handler && handler->done)
+- handler->done(inst, msg);
++ if (handler) {
++ if (handler->is_str)
++ vpu_terminate_string_msg(msg);
++ if (handler->done)
++ handler->done(inst, msg);
++ }
+
+ vpu_response_cmd(inst, msg_id, 1);
+
+diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
+index 0d879d71d8185..9231ee7e9b3a9 100644
+--- a/drivers/media/platform/cadence/cdns-csi2rx.c
++++ b/drivers/media/platform/cadence/cdns-csi2rx.c
+@@ -479,8 +479,10 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
+ struct v4l2_async_connection);
+ of_node_put(ep);
+- if (IS_ERR(asd))
++ if (IS_ERR(asd)) {
++ v4l2_async_nf_cleanup(&csi2rx->notifier);
+ return PTR_ERR(asd);
++ }
+
+ csi2rx->notifier.ops = &csi2rx_notifier_ops;
+
+@@ -543,6 +545,7 @@ static int csi2rx_probe(struct platform_device *pdev)
+ return 0;
+
+ err_cleanup:
++ v4l2_async_nf_unregister(&csi2rx->notifier);
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
+ err_free_priv:
+ kfree(csi2rx);
+@@ -553,6 +556,8 @@ static void csi2rx_remove(struct platform_device *pdev)
+ {
+ struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
+
++ v4l2_async_nf_unregister(&csi2rx->notifier);
++ v4l2_async_nf_cleanup(&csi2rx->notifier);
+ v4l2_async_unregister_subdev(&csi2rx->subdev);
+ kfree(csi2rx);
+ }
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+index 2bbc48c7402ca..f8fa3b841ccfb 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+@@ -127,6 +127,7 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
+ u32 img_stride;
+ u32 mem_stride;
+ u32 i, enc_quality;
++ u32 nr_enc_quality = ARRAY_SIZE(mtk_jpeg_enc_quality);
+
+ value = width << 16 | height;
+ writel(value, base + JPEG_ENC_IMG_SIZE);
+@@ -157,8 +158,8 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
+ writel(img_stride, base + JPEG_ENC_IMG_STRIDE);
+ writel(mem_stride, base + JPEG_ENC_STRIDE);
+
+- enc_quality = mtk_jpeg_enc_quality[0].hardware_value;
+- for (i = 0; i < ARRAY_SIZE(mtk_jpeg_enc_quality); i++) {
++ enc_quality = mtk_jpeg_enc_quality[nr_enc_quality - 1].hardware_value;
++ for (i = 0; i < nr_enc_quality; i++) {
+ if (ctx->enc_quality <= mtk_jpeg_enc_quality[i].quality_param) {
+ enc_quality = mtk_jpeg_enc_quality[i].hardware_value;
+ break;
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+index 3177592490bee..6adac857a4779 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+@@ -261,11 +261,11 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose;
+ u32 out = 0;
+
++ ctx = &path->comps[index];
+ if (CFG_CHECK(MT8183, p_id))
+ out = CFG_COMP(MT8183, ctx->param, outputs[0]);
+
+ compose = path->composes[out];
+- ctx = &path->comps[index];
+ ret = call_op(ctx, config_frame, cmd, compose);
+ if (ret)
+ return ret;
+diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
+index 908602031fd0e..9ce34a3b5ee67 100644
+--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
++++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
+@@ -47,20 +47,32 @@ EXPORT_SYMBOL(mtk_vcodec_write_vdecsys);
+
+ int mtk_vcodec_mem_alloc(void *priv, struct mtk_vcodec_mem *mem)
+ {
++ enum mtk_instance_type inst_type = *((unsigned int *)priv);
++ struct platform_device *plat_dev;
+ unsigned long size = mem->size;
+- struct mtk_vcodec_dec_ctx *ctx = priv;
+- struct device *dev = &ctx->dev->plat_dev->dev;
++ int id;
+
+- mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
++ if (inst_type == MTK_INST_ENCODER) {
++ struct mtk_vcodec_enc_ctx *enc_ctx = priv;
++
++ plat_dev = enc_ctx->dev->plat_dev;
++ id = enc_ctx->id;
++ } else {
++ struct mtk_vcodec_dec_ctx *dec_ctx = priv;
++
++ plat_dev = dec_ctx->dev->plat_dev;
++ id = dec_ctx->id;
++ }
++
++ mem->va = dma_alloc_coherent(&plat_dev->dev, size, &mem->dma_addr, GFP_KERNEL);
+ if (!mem->va) {
+- mtk_v4l2_vdec_err(ctx, "%s dma_alloc size=%ld failed!", dev_name(dev), size);
++ mtk_v4l2_err(plat_dev, "%s dma_alloc size=%ld failed!",
++ dev_name(&plat_dev->dev), size);
+ return -ENOMEM;
+ }
+
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va);
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id,
+- (unsigned long)mem->dma_addr);
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size);
++ mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
++ (unsigned long)mem->dma_addr, size);
+
+ return 0;
+ }
+@@ -68,21 +80,33 @@ EXPORT_SYMBOL(mtk_vcodec_mem_alloc);
+
+ void mtk_vcodec_mem_free(void *priv, struct mtk_vcodec_mem *mem)
+ {
++ enum mtk_instance_type inst_type = *((unsigned int *)priv);
++ struct platform_device *plat_dev;
+ unsigned long size = mem->size;
+- struct mtk_vcodec_dec_ctx *ctx = priv;
+- struct device *dev = &ctx->dev->plat_dev->dev;
++ int id;
++
++ if (inst_type == MTK_INST_ENCODER) {
++ struct mtk_vcodec_enc_ctx *enc_ctx = priv;
++
++ plat_dev = enc_ctx->dev->plat_dev;
++ id = enc_ctx->id;
++ } else {
++ struct mtk_vcodec_dec_ctx *dec_ctx = priv;
++
++ plat_dev = dec_ctx->dev->plat_dev;
++ id = dec_ctx->id;
++ }
+
+ if (!mem->va) {
+- mtk_v4l2_vdec_err(ctx, "%s dma_free size=%ld failed!", dev_name(dev), size);
++ mtk_v4l2_err(plat_dev, "%s dma_free size=%ld failed!",
++ dev_name(&plat_dev->dev), size);
+ return;
+ }
+
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va);
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id,
+- (unsigned long)mem->dma_addr);
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size);
++ mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
++ (unsigned long)mem->dma_addr, size);
+
+- dma_free_coherent(dev, size, mem->va, mem->dma_addr);
++ dma_free_coherent(&plat_dev->dev, size, mem->va, mem->dma_addr);
+ mem->va = NULL;
+ mem->dma_addr = 0;
+ mem->size = 0;
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+index ae6290d28f8e9..84ad1cc6ad171 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+@@ -154,6 +154,11 @@ int vpu_enc_init(struct venc_vpu_inst *vpu)
+ return -EINVAL;
+ }
+
++ if (IS_ERR_OR_NULL(vpu->vsi)) {
++ mtk_venc_err(vpu->ctx, "invalid venc vsi");
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+index b7a720198ce57..0c8b204535ffc 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+@@ -1322,6 +1322,20 @@ static bool mxc_jpeg_compare_format(const struct mxc_jpeg_fmt *fmt1,
+ return false;
+ }
+
++static void mxc_jpeg_set_last_buffer(struct mxc_jpeg_ctx *ctx)
++{
++ struct vb2_v4l2_buffer *next_dst_buf;
++
++ next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
++ if (!next_dst_buf) {
++ ctx->fh.m2m_ctx->is_draining = true;
++ ctx->fh.m2m_ctx->next_buf_last = true;
++ return;
++ }
++
++ v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, next_dst_buf);
++}
++
+ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
+ struct mxc_jpeg_src_buf *jpeg_src_buf)
+ {
+@@ -1334,7 +1348,8 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
+ q_data_cap = mxc_jpeg_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (mxc_jpeg_compare_format(q_data_cap->fmt, jpeg_src_buf->fmt))
+ jpeg_src_buf->fmt = q_data_cap->fmt;
+- if (q_data_cap->fmt != jpeg_src_buf->fmt ||
++ if (ctx->need_initial_source_change_evt ||
++ q_data_cap->fmt != jpeg_src_buf->fmt ||
+ q_data_cap->w != jpeg_src_buf->w ||
+ q_data_cap->h != jpeg_src_buf->h) {
+ dev_dbg(dev, "Detected jpeg res=(%dx%d)->(%dx%d), pixfmt=%c%c%c%c\n",
+@@ -1378,6 +1393,9 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
+ mxc_jpeg_sizeimage(q_data_cap);
+ notify_src_chg(ctx);
+ ctx->source_change = 1;
++ ctx->need_initial_source_change_evt = false;
++ if (vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx)))
++ mxc_jpeg_set_last_buffer(ctx);
+ }
+
+ return ctx->source_change ? true : false;
+@@ -1595,6 +1613,9 @@ static int mxc_jpeg_queue_setup(struct vb2_queue *q,
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = mxc_jpeg_get_plane_size(q_data, i);
+
++ if (V4L2_TYPE_IS_OUTPUT(q->type))
++ ctx->need_initial_source_change_evt = true;
++
+ return 0;
+ }
+
+@@ -1638,8 +1659,13 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+
+- if (V4L2_TYPE_IS_OUTPUT(q->type) || !ctx->source_change)
+- v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
++ v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
++ /* if V4L2_DEC_CMD_STOP is sent before the source change triggered,
++ * restore the is_draining flag
++ */
++ if (V4L2_TYPE_IS_CAPTURE(q->type) && ctx->source_change && ctx->fh.m2m_ctx->last_src_buf)
++ ctx->fh.m2m_ctx->is_draining = true;
++
+ if (V4L2_TYPE_IS_OUTPUT(q->type) &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) {
+ notify_eos(ctx);
+@@ -1916,7 +1942,7 @@ static int mxc_jpeg_buf_prepare(struct vb2_buffer *vb)
+ return -EINVAL;
+ for (i = 0; i < q_data->fmt->mem_planes; i++) {
+ sizeimage = mxc_jpeg_get_plane_size(q_data, i);
+- if (vb2_plane_size(vb, i) < sizeimage) {
++ if (!ctx->source_change && vb2_plane_size(vb, i) < sizeimage) {
+ dev_err(dev, "plane %d too small (%lu < %lu)",
+ i, vb2_plane_size(vb, i), sizeimage);
+ return -EINVAL;
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+index d80e94cc9d992..dc4afeeff5b65 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+@@ -99,6 +99,7 @@ struct mxc_jpeg_ctx {
+ enum mxc_jpeg_enc_state enc_state;
+ int slot;
+ unsigned int source_change;
++ bool need_initial_source_change_evt;
+ bool header_parsed;
+ struct v4l2_ctrl_handler ctrl_handler;
+ u8 jpeg_quality;
+diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+index 0f8ac29d038db..23acc387be5f0 100644
+--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
++++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+@@ -355,9 +355,6 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+ u8 dt_id = vc;
+
+ if (tg->enabled) {
+- /* Config Test Generator */
+- vc = 0xa;
+-
+ /* configure one DT, infinite frames */
+ val = vc << TPG_VC_CFG0_VC_NUM;
+ val |= INTELEAVING_MODE_ONE_SHOT << TPG_VC_CFG0_LINE_INTERLEAVING_MODE;
+@@ -370,14 +367,14 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+
+ writel_relaxed(0x12345678, csid->base + CSID_TPG_LFSR_SEED);
+
+- val = input_format->height & 0x1fff << TPG_DT_n_CFG_0_FRAME_HEIGHT;
+- val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
++ val = (input_format->height & 0x1fff) << TPG_DT_n_CFG_0_FRAME_HEIGHT;
++ val |= (input_format->width & 0x1fff) << TPG_DT_n_CFG_0_FRAME_WIDTH;
+ writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
+
+ val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
+ writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
+
+- val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
++ val = (tg->mode - 1) << TPG_DT_n_CFG_2_PAYLOAD_MODE;
+ val |= 0xBE << TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD;
+ val |= format->decode_format << TPG_DT_n_CFG_2_ENCODE_FORMAT;
+ writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_2(0));
+@@ -449,6 +446,8 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
+
+ val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
++ if (vc > 3)
++ val |= 1 << CSI2_RX_CFG1_VC_MODE;
+ val |= 1 << CSI2_RX_CFG1_MISR_EN;
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
+
+diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+index 04baa80494c66..4dba61b8d3f2a 100644
+--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
++++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+@@ -476,7 +476,7 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+
+ settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
+
+- val = is_gen2 ? BIT(7) : CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
++ val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
+ for (i = 0; i < c->num_data; i++)
+ val |= BIT(c->data[i].pos * 2);
+
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+index 02494c89da91c..168baaa80d4e6 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+@@ -7,7 +7,6 @@
+ * Copyright (C) 2020-2021 Linaro Ltd.
+ */
+
+-#include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -494,35 +493,20 @@ static int vfe_enable_output(struct vfe_line *line)
+ return 0;
+ }
+
+-static int vfe_disable_output(struct vfe_line *line)
++static void vfe_disable_output(struct vfe_line *line)
+ {
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ unsigned long flags;
+ unsigned int i;
+- bool done;
+- int timeout = 0;
+-
+- do {
+- spin_lock_irqsave(&vfe->output_lock, flags);
+- done = !output->gen2.active_num;
+- spin_unlock_irqrestore(&vfe->output_lock, flags);
+- usleep_range(10000, 20000);
+-
+- if (timeout++ == 100) {
+- dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+- vfe_reset(vfe);
+- output->gen2.active_num = 0;
+- return 0;
+- }
+- } while (!done);
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ for (i = 0; i < output->wm_num; i++)
+ vfe_wm_stop(vfe, output->wm_idx[i]);
++ output->gen2.active_num = 0;
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+- return 0;
++ vfe_reset(vfe);
+ }
+
+ /*
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+index f70aad2e8c237..8ddb8016434ae 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+@@ -8,7 +8,6 @@
+ * Copyright (C) 2021 Jonathan Marek
+ */
+
+-#include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -328,35 +327,20 @@ static int vfe_enable_output(struct vfe_line *line)
+ return 0;
+ }
+
+-static int vfe_disable_output(struct vfe_line *line)
++static void vfe_disable_output(struct vfe_line *line)
+ {
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ unsigned long flags;
+ unsigned int i;
+- bool done;
+- int timeout = 0;
+-
+- do {
+- spin_lock_irqsave(&vfe->output_lock, flags);
+- done = !output->gen2.active_num;
+- spin_unlock_irqrestore(&vfe->output_lock, flags);
+- usleep_range(10000, 20000);
+-
+- if (timeout++ == 100) {
+- dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+- vfe_reset(vfe);
+- output->gen2.active_num = 0;
+- return 0;
+- }
+- } while (!done);
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ for (i = 0; i < output->wm_num; i++)
+ vfe_wm_stop(vfe, output->wm_idx[i]);
++ output->gen2.active_num = 0;
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+- return 0;
++ vfe_reset(vfe);
+ }
+
+ /*
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
+index 06c95568e5af4..965500b83d073 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe.c
+@@ -535,7 +535,8 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
+ struct camss_clock *clock = &vfe->clock[i];
+
+ if (!strcmp(clock->name, "vfe0") ||
+- !strcmp(clock->name, "vfe1")) {
++ !strcmp(clock->name, "vfe1") ||
++ !strcmp(clock->name, "vfe_lite")) {
+ u64 min_rate = 0;
+ unsigned long rate;
+
+@@ -611,7 +612,7 @@ int vfe_get(struct vfe_device *vfe)
+ } else {
+ ret = vfe_check_clock_rates(vfe);
+ if (ret < 0)
+- goto error_pm_runtime_get;
++ goto error_pm_domain;
+ }
+ vfe->power_count++;
+
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index f11dc59135a5a..75991d849b571 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1619,6 +1619,12 @@ static int camss_probe(struct platform_device *pdev)
+ if (ret < 0)
+ goto err_cleanup;
+
++ ret = camss_configure_pd(camss);
++ if (ret < 0) {
++ dev_err(dev, "Failed to configure power domains: %d\n", ret);
++ goto err_cleanup;
++ }
++
+ ret = camss_init_subdevices(camss);
+ if (ret < 0)
+ goto err_cleanup;
+@@ -1678,12 +1684,6 @@ static int camss_probe(struct platform_device *pdev)
+ }
+ }
+
+- ret = camss_configure_pd(camss);
+- if (ret < 0) {
+- dev_err(dev, "Failed to configure power domains: %d\n", ret);
+- return ret;
+- }
+-
+ pm_runtime_enable(dev);
+
+ return 0;
+diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
+index 7cab685a2ec80..0a041b4db9efc 100644
+--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
++++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
+@@ -398,7 +398,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
+ memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
+ idx++;
+
+- if (idx > HFI_BUFFER_TYPE_MAX)
++ if (idx >= HFI_BUFFER_TYPE_MAX)
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ req_bytes -= sizeof(struct hfi_buffer_requirements);
+diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
+index 6cf74b2bc5ae3..c43839539d4dd 100644
+--- a/drivers/media/platform/qcom/venus/hfi_parser.c
++++ b/drivers/media/platform/qcom/venus/hfi_parser.c
+@@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core)
+ struct hfi_plat_caps *caps = core->caps, *cap;
+ unsigned long bit;
+
++ if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
++ return;
++
+ for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
+ cap = &caps[core->codecs_count++];
+ cap->codec = BIT(bit);
+@@ -86,6 +89,9 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
+ {
+ const struct hfi_profile_level *pl = data;
+
++ if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT)
++ return;
++
+ memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
+ cap->num_pl += num;
+ }
+@@ -111,6 +117,9 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
+ {
+ const struct hfi_capability *caps = data;
+
++ if (cap->num_caps + num >= MAX_CAP_ENTRIES)
++ return;
++
+ memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
+ cap->num_caps += num;
+ }
+@@ -137,6 +146,9 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
+ {
+ const struct raw_formats *formats = fmts;
+
++ if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES)
++ return;
++
+ memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
+ cap->num_fmts += num_fmts;
+ }
+@@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ rawfmts[i].buftype = fmt->buffer_type;
+ i++;
+
++ if (i >= MAX_FMT_ENTRIES)
++ return;
++
+ if (pinfo->num_planes > MAX_PLANES)
+ break;
+
+diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
+index 19fc6575a4891..f9437b6412b91 100644
+--- a/drivers/media/platform/qcom/venus/hfi_venus.c
++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
+@@ -205,6 +205,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
+
+ new_wr_idx = wr_idx + dwords;
+ wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
++
++ if (wr_ptr < (u32 *)queue->qmem.kva ||
++ wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
++ return -EINVAL;
++
+ if (new_wr_idx < qsize) {
+ memcpy(wr_ptr, packet, dwords << 2);
+ } else {
+@@ -272,6 +277,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
+ }
+
+ rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
++
++ if (rd_ptr < (u32 *)queue->qmem.kva ||
++ rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
++ return -EINVAL;
++
+ dwords = *rd_ptr >> 2;
+ if (!dwords)
+ return -EINVAL;
+diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+index 76634d242b103..0f5b3845d7b94 100644
+--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
++++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+@@ -1133,12 +1133,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
+
+ ret = vb2_queue_init(q);
+ if (ret)
+- goto err_vd_rel;
++ return ret;
+
+ vp->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
+ if (ret)
+- goto err_vd_rel;
++ return ret;
+
+ video_set_drvdata(vfd, vp);
+
+@@ -1171,8 +1171,6 @@ err_ctrlh_free:
+ v4l2_ctrl_handler_free(&vp->ctrl_handler);
+ err_me_cleanup:
+ media_entity_cleanup(&vfd->entity);
+-err_vd_rel:
+- video_device_release(vfd);
+ return ret;
+ }
+
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index 423fc85d79ee3..50ec24c753e9e 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -125,7 +125,8 @@ void hantro_watchdog(struct work_struct *work)
+ ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+ if (ctx) {
+ vpu_err("frame processing timed out!\n");
+- ctx->codec_ops->reset(ctx);
++ if (ctx->codec_ops->reset)
++ ctx->codec_ops->reset(ctx);
+ hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
+ }
+ }
+diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
+index 0224ff68ab3fc..64d6fb852ae9b 100644
+--- a/drivers/media/platform/verisilicon/hantro_postproc.c
++++ b/drivers/media/platform/verisilicon/hantro_postproc.c
+@@ -107,7 +107,7 @@ static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
+
+ static int down_scale_factor(struct hantro_ctx *ctx)
+ {
+- if (ctx->src_fmt.width == ctx->dst_fmt.width)
++ if (ctx->src_fmt.width <= ctx->dst_fmt.width)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
+diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+index 816ffa905a4bb..f975276707835 100644
+--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
++++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+@@ -648,7 +648,7 @@ static const char * const rockchip_vpu_clk_names[] = {
+ };
+
+ static const char * const rk3588_vpu981_vpu_clk_names[] = {
+- "aclk", "hclk", "aclk_vdpu_root", "hclk_vdpu_root"
++ "aclk", "hclk",
+ };
+
+ /* VDPU1/VEPU1 */
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 74546f7e34691..5719dda6e0f0e 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -2427,6 +2427,12 @@ static int imon_probe(struct usb_interface *interface,
+ goto fail;
+ }
+
++ if (first_if->dev.driver != interface->dev.driver) {
++ dev_err(&interface->dev, "inconsistent driver matching\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++
+ if (ifnum == 0) {
+ ictx = imon_init_intf0(interface, id);
+ if (!ictx) {
+diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
+index 3d8488c39c561..3311099cbd573 100644
+--- a/drivers/media/rc/ir-sharp-decoder.c
++++ b/drivers/media/rc/ir-sharp-decoder.c
+@@ -15,7 +15,9 @@
+ #define SHARP_UNIT 40 /* us */
+ #define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */
+ #define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */
+-#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */
++#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680us space) */
++#define SHARP_BIT_0_SPACE (17 * SHARP_UNIT) /* 680us space */
++#define SHARP_BIT_1_SPACE (42 * SHARP_UNIT) /* 1680us space */
+ #define SHARP_ECHO_SPACE (1000 * SHARP_UNIT) /* 40 ms */
+ #define SHARP_TRAILER_SPACE (125 * SHARP_UNIT) /* 5 ms (even longer) */
+
+@@ -168,8 +170,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = {
+ .header_pulse = 0,
+ .header_space = 0,
+ .bit_pulse = SHARP_BIT_PULSE,
+- .bit_space[0] = SHARP_BIT_0_PERIOD,
+- .bit_space[1] = SHARP_BIT_1_PERIOD,
++ .bit_space[0] = SHARP_BIT_0_SPACE,
++ .bit_space[1] = SHARP_BIT_1_SPACE,
+ .trailer_pulse = SHARP_BIT_PULSE,
+ .trailer_space = SHARP_ECHO_SPACE,
+ .msb_first = 1,
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index 043d23aaa3cbc..a537734832c50 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -276,7 +276,11 @@ static ssize_t lirc_transmit(struct file *file, const char __user *buf,
+ if (ret < 0)
+ goto out_kfree_raw;
+
+- count = ret;
++ /* drop trailing space */
++ if (!(ret % 2))
++ count = ret - 1;
++ else
++ count = ret;
+
+ txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
+ if (!txbuf) {
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c
+index b51e6a3b8cbeb..f99878eff7ace 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_mux.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c
+@@ -504,13 +504,16 @@ struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
+ m->priv = args->priv;
+ m->network_id = args->network_id;
+ m->network_name = kstrdup(args->network_name, GFP_KERNEL);
++ if (!m->network_name)
++ goto free_mux_buf;
++
+ m->timing.current_jiffies = get_jiffies_64();
+
+ if (args->channels)
+ m->channels = args->channels;
+ else
+ if (vidtv_channels_init(m) < 0)
+- goto free_mux_buf;
++ goto free_mux_network_name;
+
+ /* will alloc data for pmt_sections after initializing pat */
+ if (vidtv_channel_si_init(m) < 0)
+@@ -527,6 +530,8 @@ free_channel_si:
+ vidtv_channel_si_destroy(m);
+ free_channels:
+ vidtv_channels_destroy(m);
++free_mux_network_name:
++ kfree(m->network_name);
+ free_mux_buf:
+ vfree(m->mux_buf);
+ free_mux:
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+index ce0b7a6e92dc3..2a51c898c11eb 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+@@ -301,16 +301,29 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc
+
+ desc->service_name_len = service_name_len;
+
+- if (service_name && service_name_len)
++ if (service_name && service_name_len) {
+ desc->service_name = kstrdup(service_name, GFP_KERNEL);
++ if (!desc->service_name)
++ goto free_desc;
++ }
+
+ desc->provider_name_len = provider_name_len;
+
+- if (provider_name && provider_name_len)
++ if (provider_name && provider_name_len) {
+ desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
++ if (!desc->provider_name)
++ goto free_desc_service_name;
++ }
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
++
++free_desc_service_name:
++ if (service_name && service_name_len)
++ kfree(desc->service_name);
++free_desc:
++ kfree(desc);
++ return NULL;
+ }
+
+ struct vidtv_psi_desc_registration
+@@ -355,8 +368,13 @@ struct vidtv_psi_desc_network_name
+
+ desc->length = network_name_len;
+
+- if (network_name && network_name_len)
++ if (network_name && network_name_len) {
+ desc->network_name = kstrdup(network_name, GFP_KERNEL);
++ if (!desc->network_name) {
++ kfree(desc);
++ return NULL;
++ }
++ }
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
+@@ -442,15 +460,32 @@ struct vidtv_psi_desc_short_event
+ iso_language_code = "eng";
+
+ desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL);
++ if (!desc->iso_language_code)
++ goto free_desc;
+
+- if (event_name && event_name_len)
++ if (event_name && event_name_len) {
+ desc->event_name = kstrdup(event_name, GFP_KERNEL);
++ if (!desc->event_name)
++ goto free_desc_language_code;
++ }
+
+- if (text && text_len)
++ if (text && text_len) {
+ desc->text = kstrdup(text, GFP_KERNEL);
++ if (!desc->text)
++ goto free_desc_event_name;
++ }
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
++
++free_desc_event_name:
++ if (event_name && event_name_len)
++ kfree(desc->event_name);
++free_desc_language_code:
++ kfree(desc->iso_language_code);
++free_desc:
++ kfree(desc);
++ return NULL;
+ }
+
+ struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
+diff --git a/drivers/media/test-drivers/vivid/vivid-rds-gen.c b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+index b5b104ee64c99..c57771119a34b 100644
+--- a/drivers/media/test-drivers/vivid/vivid-rds-gen.c
++++ b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+@@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
+ rds->ta = alt;
+ rds->ms = true;
+ snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
+- freq / 16, ((freq & 0xf) * 10) / 16);
++ (freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10);
+ if (alt)
+ strscpy(rds->radiotext,
+ " The Radio Data System can switch between different Radio Texts ",
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 33a2aa8907e65..4eb7dd4599b7e 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -322,8 +322,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ ret = -EOPNOTSUPP;
+ } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ (msg[0].addr == state->af9033_i2c_addr[1])) {
+- if (msg[0].len < 3 || msg[1].len < 1)
+- return -EOPNOTSUPP;
++ if (msg[0].len < 3 || msg[1].len < 1) {
++ ret = -EOPNOTSUPP;
++ goto unlock;
++ }
+ /* demod access via firmware interface */
+ u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
+@@ -383,8 +385,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ ret = -EOPNOTSUPP;
+ } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ (msg[0].addr == state->af9033_i2c_addr[1])) {
+- if (msg[0].len < 3)
+- return -EOPNOTSUPP;
++ if (msg[0].len < 3) {
++ ret = -EOPNOTSUPP;
++ goto unlock;
++ }
+ /* demod access via firmware interface */
+ u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
+@@ -459,6 +463,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ ret = -EOPNOTSUPP;
+ }
+
++unlock:
+ mutex_unlock(&d->i2c_mutex);
+
+ if (ret < 0)
+diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
+index 46ed95483e222..5f5fa851ca640 100644
+--- a/drivers/media/usb/gspca/cpia1.c
++++ b/drivers/media/usb/gspca/cpia1.c
+@@ -18,6 +18,7 @@
+
+ #include <linux/input.h>
+ #include <linux/sched/signal.h>
++#include <linux/bitops.h>
+
+ #include "gspca.h"
+
+@@ -1028,6 +1029,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
+ sd->params.exposure.expMode = 2;
+ sd->exposure_status = EXPOSURE_NORMAL;
+ }
++ if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp))
++ return -EINVAL;
+ currentexp = currentexp << sd->params.exposure.gain;
+ sd->params.exposure.gain = 0;
+ /* round down current exposure to nearest value */
+diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
+index 9e5b5dbd9c8df..2845041f32d69 100644
+--- a/drivers/memory/tegra/tegra234.c
++++ b/drivers/memory/tegra/tegra234.c
+@@ -986,6 +986,10 @@ static int tegra234_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+ msg.rx.data = &bwmgr_resp;
+ msg.rx.size = sizeof(bwmgr_resp);
+
++ if (pclient->bpmp_id >= TEGRA_ICC_BPMP_CPU_CLUSTER0 &&
++ pclient->bpmp_id <= TEGRA_ICC_BPMP_CPU_CLUSTER2)
++ msg.flags = TEGRA_BPMP_MESSAGE_RESET;
++
+ ret = tegra_bpmp_transfer(mc->bpmp, &msg);
+ if (ret < 0) {
+ dev_err(mc->dev, "BPMP transfer failed: %d\n", ret);
+diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
+index 02cf4f3e91d76..de5d894ac04af 100644
+--- a/drivers/mfd/arizona-spi.c
++++ b/drivers/mfd/arizona-spi.c
+@@ -159,6 +159,9 @@ static int arizona_spi_acpi_probe(struct arizona *arizona)
+ arizona->pdata.micd_ranges = arizona_micd_aosp_ranges;
+ arizona->pdata.num_micd_ranges = ARRAY_SIZE(arizona_micd_aosp_ranges);
+
++ /* Use left headphone speaker for HP vs line-out detection */
++ arizona->pdata.hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
++
+ return 0;
+ }
+
+diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
+index c7510434380a4..fbbe82c6e75b5 100644
+--- a/drivers/mfd/dln2.c
++++ b/drivers/mfd/dln2.c
+@@ -826,7 +826,6 @@ out_stop_rx:
+ dln2_stop_rx_urbs(dln2);
+
+ out_free:
+- usb_put_dev(dln2->usb_dev);
+ dln2_free(dln2);
+
+ return ret;
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index 699f44ffff0e4..ae5759200622c 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -561,6 +561,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info },
++ /* LNL-M */
++ { PCI_VDEVICE(INTEL, 0xa825), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0xa826), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0xa827), (kernel_ulong_t)&tgl_info },
++ { PCI_VDEVICE(INTEL, 0xa830), (kernel_ulong_t)&tgl_info },
++ { PCI_VDEVICE(INTEL, 0xa846), (kernel_ulong_t)&tgl_info },
++ { PCI_VDEVICE(INTEL, 0xa850), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa851), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa852), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0xa878), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info },
+ { }
+ };
+ MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 0ed7c0d7784e1..2b85509a90fc2 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -146,6 +146,7 @@ static int mfd_add_device(struct device *parent, int id,
+ struct platform_device *pdev;
+ struct device_node *np = NULL;
+ struct mfd_of_node_entry *of_entry, *tmp;
++ bool disabled = false;
+ int ret = -ENOMEM;
+ int platform_id;
+ int r;
+@@ -183,11 +184,10 @@ static int mfd_add_device(struct device *parent, int id,
+ if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) {
+ for_each_child_of_node(parent->of_node, np) {
+ if (of_device_is_compatible(np, cell->of_compatible)) {
+- /* Ignore 'disabled' devices error free */
++ /* Skip 'disabled' devices */
+ if (!of_device_is_available(np)) {
+- of_node_put(np);
+- ret = 0;
+- goto fail_alias;
++ disabled = true;
++ continue;
+ }
+
+ ret = mfd_match_of_node_to_dev(pdev, np, cell);
+@@ -197,10 +197,17 @@ static int mfd_add_device(struct device *parent, int id,
+ if (ret)
+ goto fail_alias;
+
+- break;
++ goto match;
+ }
+ }
+
++ if (disabled) {
++ /* Ignore 'disabled' devices error free */
++ ret = 0;
++ goto fail_alias;
++ }
++
++match:
+ if (!pdev->dev.of_node)
+ pr_warn("%s: Failed to locate of_node [id: %d]\n",
+ cell->name, platform_id);
+diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
+index 7e2cd79d17ebf..8e449cff5cec4 100644
+--- a/drivers/mfd/qcom-spmi-pmic.c
++++ b/drivers/mfd/qcom-spmi-pmic.c
+@@ -30,6 +30,8 @@ struct qcom_spmi_dev {
+ struct qcom_spmi_pmic pmic;
+ };
+
++static DEFINE_MUTEX(pmic_spmi_revid_lock);
++
+ #define N_USIDS(n) ((void *)n)
+
+ static const struct of_device_id pmic_spmi_id_table[] = {
+@@ -76,24 +78,21 @@ static const struct of_device_id pmic_spmi_id_table[] = {
+ *
+ * This only supports PMICs with 1 or 2 USIDs.
+ */
+-static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
++static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
+ {
+- struct spmi_device *sdev;
+- struct qcom_spmi_dev *ctx;
+ struct device_node *spmi_bus;
+- struct device_node *other_usid = NULL;
++ struct device_node *child;
+ int function_parent_usid, ret;
+ u32 pmic_addr;
+
+- sdev = to_spmi_device(dev);
+- ctx = dev_get_drvdata(&sdev->dev);
+-
+ /*
+ * Quick return if the function device is already in the base
+ * USID. This will always be hit for PMICs with only 1 USID.
+ */
+- if (sdev->usid % ctx->num_usids == 0)
++ if (sdev->usid % ctx->num_usids == 0) {
++ get_device(&sdev->dev);
+ return sdev;
++ }
+
+ function_parent_usid = sdev->usid;
+
+@@ -105,28 +104,61 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
+ * device for USID 2.
+ */
+ spmi_bus = of_get_parent(sdev->dev.of_node);
+- do {
+- other_usid = of_get_next_child(spmi_bus, other_usid);
+-
+- ret = of_property_read_u32_index(other_usid, "reg", 0, &pmic_addr);
+- if (ret)
+- return ERR_PTR(ret);
++ sdev = ERR_PTR(-ENODATA);
++ for_each_child_of_node(spmi_bus, child) {
++ ret = of_property_read_u32_index(child, "reg", 0, &pmic_addr);
++ if (ret) {
++ of_node_put(child);
++ sdev = ERR_PTR(ret);
++ break;
++ }
+
+- sdev = spmi_device_from_of(other_usid);
+ if (pmic_addr == function_parent_usid - (ctx->num_usids - 1)) {
+- if (!sdev)
++ sdev = spmi_device_from_of(child);
++ if (!sdev) {
+ /*
+- * If the base USID for this PMIC hasn't probed yet
+- * but the secondary USID has, then we need to defer
+- * the function driver so that it will attempt to
+- * probe again when the base USID is ready.
++ * If the base USID for this PMIC hasn't been
++ * registered yet then we need to defer.
+ */
+- return ERR_PTR(-EPROBE_DEFER);
+- return sdev;
++ sdev = ERR_PTR(-EPROBE_DEFER);
++ }
++ of_node_put(child);
++ break;
+ }
+- } while (other_usid->sibling);
++ }
+
+- return ERR_PTR(-ENODATA);
++ of_node_put(spmi_bus);
++
++ return sdev;
++}
++
++static int pmic_spmi_get_base_revid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
++{
++ struct qcom_spmi_dev *base_ctx;
++ struct spmi_device *base;
++ int ret = 0;
++
++ base = qcom_pmic_get_base_usid(sdev, ctx);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ /*
++ * Copy revid info from base device if it has probed and is still
++ * bound to its driver.
++ */
++ mutex_lock(&pmic_spmi_revid_lock);
++ base_ctx = spmi_device_get_drvdata(base);
++ if (!base_ctx) {
++ ret = -EPROBE_DEFER;
++ goto out_unlock;
++ }
++ memcpy(&ctx->pmic, &base_ctx->pmic, sizeof(ctx->pmic));
++out_unlock:
++ mutex_unlock(&pmic_spmi_revid_lock);
++
++ put_device(&base->dev);
++
++ return ret;
+ }
+
+ static int pmic_spmi_load_revid(struct regmap *map, struct device *dev,
+@@ -204,11 +236,7 @@ const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev)
+ if (!of_match_device(pmic_spmi_id_table, dev->parent))
+ return ERR_PTR(-EINVAL);
+
+- sdev = qcom_pmic_get_base_usid(dev->parent);
+-
+- if (IS_ERR(sdev))
+- return ERR_CAST(sdev);
+-
++ sdev = to_spmi_device(dev->parent);
+ spmi = dev_get_drvdata(&sdev->dev);
+
+ return &spmi->pmic;
+@@ -243,16 +271,31 @@ static int pmic_spmi_probe(struct spmi_device *sdev)
+ ret = pmic_spmi_load_revid(regmap, &sdev->dev, &ctx->pmic);
+ if (ret < 0)
+ return ret;
++ } else {
++ ret = pmic_spmi_get_base_revid(sdev, ctx);
++ if (ret)
++ return ret;
+ }
++
++ mutex_lock(&pmic_spmi_revid_lock);
+ spmi_device_set_drvdata(sdev, ctx);
++ mutex_unlock(&pmic_spmi_revid_lock);
+
+ return devm_of_platform_populate(&sdev->dev);
+ }
+
++static void pmic_spmi_remove(struct spmi_device *sdev)
++{
++ mutex_lock(&pmic_spmi_revid_lock);
++ spmi_device_set_drvdata(sdev, NULL);
++ mutex_unlock(&pmic_spmi_revid_lock);
++}
++
+ MODULE_DEVICE_TABLE(of, pmic_spmi_id_table);
+
+ static struct spmi_driver pmic_spmi_driver = {
+ .probe = pmic_spmi_probe,
++ .remove = pmic_spmi_remove,
+ .driver = {
+ .name = "pmic-spmi",
+ .of_match_table = pmic_spmi_id_table,
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index ed4d0ef5e5c31..af519088732d9 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -71,6 +71,7 @@
+ #define PCI_DEVICE_ID_TI_AM654 0xb00c
+ #define PCI_DEVICE_ID_TI_J7200 0xb00f
+ #define PCI_DEVICE_ID_TI_AM64 0xb010
++#define PCI_DEVICE_ID_TI_J721S2 0xb013
+ #define PCI_DEVICE_ID_LS1088A 0x80c0
+ #define PCI_DEVICE_ID_IMX8 0x0808
+
+@@ -81,6 +82,7 @@
+ #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
+ #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
+ #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
++#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
+
+ static DEFINE_IDA(pci_endpoint_test_ida);
+
+@@ -990,6 +992,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
++ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
++ .driver_data = (kernel_ulong_t)&default_data,
++ },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
+@@ -999,6 +1004,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
++ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
++ .driver_data = (kernel_ulong_t)&j721e_data,
++ },
+ { }
+ };
+ MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
+diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
+index c1a134bd8ba7b..b878431553abc 100644
+--- a/drivers/misc/ti-st/st_core.c
++++ b/drivers/misc/ti-st/st_core.c
+@@ -15,6 +15,7 @@
+ #include <linux/skbuff.h>
+
+ #include <linux/ti_wilink_st.h>
++#include <linux/netdevice.h>
+
+ /*
+ * function pointer pointing to either,
+@@ -429,7 +430,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+ case ST_LL_AWAKE_TO_ASLEEP:
+ pr_err("ST LL is illegal state(%ld),"
+ "purging received skb.", st_ll_getstate(st_gdata));
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ break;
+ case ST_LL_ASLEEP:
+ skb_queue_tail(&st_gdata->tx_waitq, skb);
+@@ -438,7 +439,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+ default:
+ pr_err("ST LL is illegal state(%ld),"
+ "purging received skb.", st_ll_getstate(st_gdata));
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ break;
+ }
+
+@@ -492,7 +493,7 @@ void st_tx_wakeup(struct st_data_s *st_data)
+ spin_unlock_irqrestore(&st_data->lock, flags);
+ break;
+ }
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ spin_unlock_irqrestore(&st_data->lock, flags);
+ }
+ /* if wake-up is set in another context- restart sending */
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 3a8f27c3e310a..f9a5cffa64b1f 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1482,6 +1482,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
++ } else if (mq->in_recovery) {
++ blk_mq_requeue_request(req, true);
+ } else {
+ blk_mq_end_request(req, BLK_STS_OK);
+ }
+@@ -2381,8 +2383,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
+ }
+ ret = mmc_blk_cqe_issue_flush(mq, req);
+ break;
+- case REQ_OP_READ:
+ case REQ_OP_WRITE:
++ card->written_flag = true;
++ fallthrough;
++ case REQ_OP_READ:
+ if (host->cqe_enabled)
+ ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ else
+diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
+index 4edf9057fa79d..b7754a1b8d978 100644
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -280,4 +280,8 @@ static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
+ return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
+ }
+
++static inline int mmc_card_broken_cache_flush(const struct mmc_card *c)
++{
++ return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH;
++}
+ #endif
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 3d3e0ca526148..a8c17b4cd7379 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -551,7 +551,9 @@ int mmc_cqe_recovery(struct mmc_host *host)
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+- mmc_wait_for_cmd(host, &cmd, 0);
++ mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
++
++ mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+@@ -559,10 +561,13 @@ int mmc_cqe_recovery(struct mmc_host *host)
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+- err = mmc_wait_for_cmd(host, &cmd, 0);
++ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
+ host->cqe_ops->cqe_recovery_finish(host);
+
++ if (err)
++ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
++
+ mmc_retune_release(host);
+
+ return err;
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 4a4bab9aa7263..a46ce0868fe1f 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
+ case 3: /* MMC v3.1 - v3.3 */
+ case 4: /* MMC v4 */
+ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
+- card->cid.oemid = UNSTUFF_BITS(resp, 104, 8);
++ card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+@@ -2081,13 +2081,17 @@ static int _mmc_flush_cache(struct mmc_host *host)
+ {
+ int err = 0;
+
++ if (mmc_card_broken_cache_flush(host->card) && !host->card->written_flag)
++ return 0;
++
+ if (_mmc_cache_enabled(host)) {
+ err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1,
+ CACHE_FLUSH_TIMEOUT_MS);
+ if (err)
+- pr_err("%s: cache flush error %d\n",
+- mmc_hostname(host), err);
++ pr_err("%s: cache flush error %d\n", mmc_hostname(host), err);
++ else
++ host->card->written_flag = false;
+ }
+
+ return err;
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 32b64b564fb1f..cca71867bc4ad 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -110,11 +110,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ MMC_QUIRK_TRIM_BROKEN),
+
+ /*
+- * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
+- * support being used to offload WRITE_ZEROES.
++ * Micron MTFC4GACAJCN-1M supports TRIM but does not appear to support
++ * WRITE_ZEROES offloading. It also supports caching, but the cache can
++ * only be flushed after a write has occurred.
+ */
+ MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
+- MMC_QUIRK_TRIM_BROKEN),
++ MMC_QUIRK_TRIM_BROKEN | MMC_QUIRK_BROKEN_CACHE_FLUSH),
+
+ /*
+ * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
+diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
+index b3d7d6d8d6548..41e94cd141098 100644
+--- a/drivers/mmc/host/cqhci-core.c
++++ b/drivers/mmc/host/cqhci-core.c
+@@ -942,8 +942,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
+ ret = cqhci_tasks_cleared(cq_host);
+
+ if (!ret)
+- pr_debug("%s: cqhci: Failed to clear tasks\n",
+- mmc_hostname(mmc));
++ pr_warn("%s: cqhci: Failed to clear tasks\n",
++ mmc_hostname(mmc));
+
+ return ret;
+ }
+@@ -976,7 +976,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+ ret = cqhci_halted(cq_host);
+
+ if (!ret)
+- pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
++ pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+
+ return ret;
+ }
+@@ -984,10 +984,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+ /*
+ * After halting we expect to be able to use the command line. We interpret the
+ * failure to halt to mean the data lines might still be in use (and the upper
+- * layers will need to send a STOP command), so we set the timeout based on a
+- * generous command timeout.
++ * layers will need to send a STOP command), however failing to halt complicates
++ * the recovery, so set a timeout that would reasonably allow I/O to complete.
+ */
+-#define CQHCI_START_HALT_TIMEOUT 5
++#define CQHCI_START_HALT_TIMEOUT 500
+
+ static void cqhci_recovery_start(struct mmc_host *mmc)
+ {
+@@ -1075,28 +1075,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
+
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+- ok = false;
+-
+ /*
+ * The specification contradicts itself, by saying that tasks cannot be
+ * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
+ * be disabled/re-enabled, but not to disable before clearing tasks.
+ * Have a go anyway.
+ */
+- if (!ok) {
+- pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
+- cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+- cqcfg &= ~CQHCI_ENABLE;
+- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+- cqcfg |= CQHCI_ENABLE;
+- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+- /* Be sure that there are no tasks */
+- ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+- ok = false;
+- WARN_ON(!ok);
+- }
++ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
++ ok = false;
++
++ /* Disable to make sure tasks really are cleared */
++ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
++ cqcfg &= ~CQHCI_ENABLE;
++ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
++
++ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
++ cqcfg |= CQHCI_ENABLE;
++ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
++
++ cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
++
++ if (!ok)
++ cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
+
+ cqhci_recover_mrqs(cq_host);
+
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index 9837dab096e64..c7c067b9415a4 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -801,7 +801,6 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
+
+ cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
+ cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
+- cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
+
+ meson_mmc_set_response_bits(cmd, &cmd_cfg);
+
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index 109d4b010f978..77911a57b12cf 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -25,6 +25,12 @@
+ #define GLI_9750_WT_EN_ON 0x1
+ #define GLI_9750_WT_EN_OFF 0x0
+
++#define PCI_GLI_9750_PM_CTRL 0xFC
++#define PCI_GLI_9750_PM_STATE GENMASK(1, 0)
++
++#define PCI_GLI_9750_CORRERR_MASK 0x214
++#define PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12)
++
+ #define SDHCI_GLI_9750_CFG2 0x848
+ #define SDHCI_GLI_9750_CFG2_L1DLY GENMASK(28, 24)
+ #define GLI_9750_CFG2_L1DLY_VALUE 0x1F
+@@ -149,6 +155,9 @@
+ #define PCI_GLI_9755_PM_CTRL 0xFC
+ #define PCI_GLI_9755_PM_STATE GENMASK(1, 0)
+
++#define PCI_GLI_9755_CORRERR_MASK 0x214
++#define PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12)
++
+ #define SDHCI_GLI_9767_GM_BURST_SIZE 0x510
+ #define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8)
+
+@@ -536,8 +545,12 @@ static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
+
+ static void gl9750_hw_setting(struct sdhci_host *host)
+ {
++ struct sdhci_pci_slot *slot = sdhci_priv(host);
++ struct pci_dev *pdev;
+ u32 value;
+
++ pdev = slot->chip->pdev;
++
+ gl9750_wt_on(host);
+
+ value = sdhci_readl(host, SDHCI_GLI_9750_CFG2);
+@@ -547,6 +560,18 @@ static void gl9750_hw_setting(struct sdhci_host *host)
+ GLI_9750_CFG2_L1DLY_VALUE);
+ sdhci_writel(host, value, SDHCI_GLI_9750_CFG2);
+
++ /* toggle PM state to allow GL9750 to enter ASPM L1.2 */
++ pci_read_config_dword(pdev, PCI_GLI_9750_PM_CTRL, &value);
++ value |= PCI_GLI_9750_PM_STATE;
++ pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
++ value &= ~PCI_GLI_9750_PM_STATE;
++ pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
++
++ /* mask the replay timer timeout of AER */
++ pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value);
++ value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
++ pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value);
++
+ gl9750_wt_off(host);
+ }
+
+@@ -756,6 +781,11 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
+ value &= ~PCI_GLI_9755_PM_STATE;
+ pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
+
++ /* mask the replay timer timeout of AER */
++ pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value);
++ value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
++ pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value);
++
+ gl9755_wt_off(pdev);
+ }
+
+@@ -1159,6 +1189,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
+ sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
+ }
+
++static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
++ bool enable)
++{
++ struct pci_dev *pdev = slot->chip->pdev;
++ u32 value;
++
++ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
++ value &= ~GLI_9763E_VHS_REV;
++ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
++ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
++
++ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
++
++ if (enable)
++ value &= ~GLI_9763E_CFG_LPSN_DIS;
++ else
++ value |= GLI_9763E_CFG_LPSN_DIS;
++
++ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
++
++ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
++ value &= ~GLI_9763E_VHS_REV;
++ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
++ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
++}
++
+ static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
+ unsigned int timing)
+ {
+@@ -1267,6 +1323,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
+ if (ret)
+ goto cleanup;
+
++ /* Disable LPM negotiation to avoid entering L1 state. */
++ gl9763e_set_low_power_negotiation(slot, false);
++
+ return 0;
+
+ cleanup:
+@@ -1310,31 +1369,6 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
+ }
+
+ #ifdef CONFIG_PM
+-static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
+-{
+- struct pci_dev *pdev = slot->chip->pdev;
+- u32 value;
+-
+- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+- value &= ~GLI_9763E_VHS_REV;
+- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+-
+- pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
+-
+- if (enable)
+- value &= ~GLI_9763E_CFG_LPSN_DIS;
+- else
+- value |= GLI_9763E_CFG_LPSN_DIS;
+-
+- pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
+-
+- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+- value &= ~GLI_9763E_VHS_REV;
+- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+-}
+-
+ static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
+ {
+ struct sdhci_pci_slot *slot = chip->slots[0];
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index 6b84ba27e6ab0..6b8a57e2d20f0 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -416,12 +416,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
+ mmc_request_done(host->mmc, mrq);
+ }
+
++static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
++ unsigned short vdd)
++{
++ struct mmc_host *mmc = host->mmc;
++
++ switch (mode) {
++ case MMC_POWER_OFF:
++ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
++
++ mmc_regulator_disable_vqmmc(mmc);
++ break;
++ case MMC_POWER_ON:
++ mmc_regulator_enable_vqmmc(mmc);
++ break;
++ case MMC_POWER_UP:
++ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
++ break;
++ }
++}
++
+ static struct sdhci_ops sdhci_sprd_ops = {
+ .read_l = sdhci_sprd_readl,
+ .write_l = sdhci_sprd_writel,
+ .write_w = sdhci_sprd_writew,
+ .write_b = sdhci_sprd_writeb,
+ .set_clock = sdhci_sprd_set_clock,
++ .set_power = sdhci_sprd_set_power,
+ .get_max_clock = sdhci_sprd_get_max_clock,
+ .get_min_clock = sdhci_sprd_get_min_clock,
+ .set_bus_width = sdhci_set_bus_width,
+@@ -823,6 +844,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_DDR50);
+
++ ret = mmc_regulator_get_supply(host->mmc);
++ if (ret)
++ goto pm_runtime_disable;
++
+ ret = sdhci_setup_host(host);
+ if (ret)
+ goto pm_runtime_disable;
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index c125485ba80e9..967bd2dfcda1b 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -598,7 +598,7 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
+ return 0;
+ }
+
+- for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
++ for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
+
+ ret = device_property_read_u32(dev, td[i].otap_binding,
+ &sdhci_am654->otap_del_sel[i]);
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 9ec593d52f0fa..cef0e716ad16f 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2309,6 +2309,7 @@ static int vub300_probe(struct usb_interface *interface,
+ vub300->read_only =
+ (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+ } else {
++ retval = -EINVAL;
+ goto error5;
+ }
+ usb_set_intfdata(interface, vub300);
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index 11b06fefaa0e2..c10693ba265ba 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -422,9 +422,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
+ extra_size = 0;
+
+ /* Protection Register info */
+- if (extp->NumProtectionFields)
++ if (extp->NumProtectionFields) {
++ struct cfi_intelext_otpinfo *otp =
++ (struct cfi_intelext_otpinfo *)&extp->extra[0];
++
+ extra_size += (extp->NumProtectionFields - 1) *
+- sizeof(struct cfi_intelext_otpinfo);
++ sizeof(struct cfi_intelext_otpinfo);
++
++ if (extp_size >= sizeof(*extp) + extra_size) {
++ int i;
++
++ /* Do some byteswapping if necessary */
++ for (i = 0; i < extp->NumProtectionFields - 1; i++) {
++ otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
++ otp->FactGroups = le16_to_cpu(otp->FactGroups);
++ otp->UserGroups = le16_to_cpu(otp->UserGroups);
++ otp++;
++ }
++ }
++ }
+ }
+
+ if (extp->MinorVersion >= '1') {
+diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
+index cb5d88f42297b..f0ad2308f6d50 100644
+--- a/drivers/mtd/nand/raw/intel-nand-controller.c
++++ b/drivers/mtd/nand/raw/intel-nand-controller.c
+@@ -619,6 +619,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
+ ebu_host->cs_num = cs;
+
+ resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
++ if (!resname) {
++ ret = -ENOMEM;
++ goto err_of_node_put;
++ }
++
+ ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
+ resname);
+ if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
+@@ -649,6 +654,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
+ }
+
+ resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
++ if (!resname) {
++ ret = -ENOMEM;
++ goto err_cleanup_dma;
++ }
++
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
+ if (!res) {
+ ret = -EINVAL;
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index 25e3c1cb605e7..a506e658d4624 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -1134,6 +1134,9 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
+ init.name = devm_kasprintf(nfc->dev,
+ GFP_KERNEL, "%s#div",
+ dev_name(nfc->dev));
++ if (!init.name)
++ return -ENOMEM;
++
+ init.ops = &clk_divider_ops;
+ nfc_divider_parent_data[0].fw_name = "device";
+ init.parent_data = nfc_divider_parent_data;
+diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
+index eb0b9d16e8dae..a553e3ac8ff41 100644
+--- a/drivers/mtd/nand/raw/tegra_nand.c
++++ b/drivers/mtd/nand/raw/tegra_nand.c
+@@ -1197,6 +1197,10 @@ static int tegra_nand_probe(struct platform_device *pdev)
+ init_completion(&ctrl->dma_complete);
+
+ ctrl->irq = platform_get_irq(pdev, 0);
++ if (ctrl->irq < 0) {
++ err = ctrl->irq;
++ goto err_put_pm;
++ }
+ err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
+ dev_name(&pdev->dev), ctrl);
+ if (err) {
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 51d47eda1c873..8e6cc0e133b7f 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1500,6 +1500,10 @@ done:
+ static void bond_setup_by_slave(struct net_device *bond_dev,
+ struct net_device *slave_dev)
+ {
++ bool was_up = !!(bond_dev->flags & IFF_UP);
++
++ dev_close(bond_dev);
++
+ bond_dev->header_ops = slave_dev->header_ops;
+
+ bond_dev->type = slave_dev->type;
+@@ -1514,6 +1518,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
+ bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
+ }
++ if (was_up)
++ dev_open(bond_dev, NULL);
+ }
+
+ /* On bonding slaves other than the currently active slave, suppress
+diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
+index 7f9334a8af500..735d5de3caa0e 100644
+--- a/drivers/net/can/dev/dev.c
++++ b/drivers/net/can/dev/dev.c
+@@ -132,7 +132,8 @@ static void can_restart(struct net_device *dev)
+ struct can_frame *cf;
+ int err;
+
+- BUG_ON(netif_carrier_ok(dev));
++ if (netif_carrier_ok(dev))
++ netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
+
+ /* No synchronization needed because the device is bus-off and
+ * no messages can come in or go out.
+@@ -153,11 +154,12 @@ restart:
+ priv->can_stats.restarts++;
+
+ /* Now restart the device */
+- err = priv->do_set_mode(dev, CAN_MODE_START);
+-
+ netif_carrier_on(dev);
+- if (err)
++ err = priv->do_set_mode(dev, CAN_MODE_START);
++ if (err) {
+ netdev_err(dev, "Error %d during restart", err);
++ netif_carrier_off(dev);
++ }
+ }
+
+ static void can_restart_work(struct work_struct *work)
+diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
+index f6d05b3ef59ab..3ebd4f779b9bd 100644
+--- a/drivers/net/can/dev/skb.c
++++ b/drivers/net/can/dev/skb.c
+@@ -49,7 +49,11 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ {
+ struct can_priv *priv = netdev_priv(dev);
+
+- BUG_ON(idx >= priv->echo_skb_max);
++ if (idx >= priv->echo_skb_max) {
++ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
++ __func__, idx, priv->echo_skb_max);
++ return -EINVAL;
++ }
+
+ /* check flag whether this packet has to be looped back */
+ if (!(dev->flags & IFF_ECHO) ||
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
+index 0c7f7505632cd..5e3a72b7c4691 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
++++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
+@@ -2230,6 +2230,7 @@ static int es58x_probe(struct usb_interface *intf,
+
+ for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) {
+ int ret = es58x_init_netdev(es58x_dev, ch_idx);
++
+ if (ret) {
+ es58x_free_netdevs(es58x_dev);
+ return ret;
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
+index c1ba1a4e8857b..2e183bdeedd72 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
++++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
+@@ -378,13 +378,13 @@ struct es58x_sw_version {
+
+ /**
+ * struct es58x_hw_revision - Hardware revision number.
+- * @letter: Revision letter.
++ * @letter: Revision letter, an alphanumeric character.
+ * @major: Version major number, represented on three digits.
+ * @minor: Version minor number, represented on three digits.
+ *
+ * The hardware revision uses its own format: "axxx/xxx" where 'a' is
+- * a letter and 'x' a digit. It can be retrieved from the product
+- * information string.
++ * an alphanumeric character and 'x' a digit. It can be retrieved from
++ * the product information string.
+ */
+ struct es58x_hw_revision {
+ char letter;
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+index 9fba29e2f57c6..635edeb8f68cd 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
++++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+@@ -125,14 +125,28 @@ static int es58x_parse_hw_rev(struct es58x_device *es58x_dev,
+ * firmware version, the bootloader version and the hardware
+ * revision.
+ *
+- * If the function fails, simply emit a log message and continue
+- * because product information is not critical for the driver to
+- * operate.
++ * If the function fails, set the version or revision to an invalid
++ * value and emit an informal message. Continue probing because the
++ * product information is not critical for the driver to operate.
+ */
+ void es58x_parse_product_info(struct es58x_device *es58x_dev)
+ {
++ static const struct es58x_sw_version sw_version_not_set = {
++ .major = -1,
++ .minor = -1,
++ .revision = -1,
++ };
++ static const struct es58x_hw_revision hw_revision_not_set = {
++ .letter = '\0',
++ .major = -1,
++ .minor = -1,
++ };
+ char *prod_info;
+
++ es58x_dev->firmware_version = sw_version_not_set;
++ es58x_dev->bootloader_version = sw_version_not_set;
++ es58x_dev->hardware_revision = hw_revision_not_set;
++
+ prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX);
+ if (!prod_info) {
+ dev_warn(es58x_dev->dev,
+@@ -150,29 +164,36 @@ void es58x_parse_product_info(struct es58x_device *es58x_dev)
+ }
+
+ /**
+- * es58x_sw_version_is_set() - Check if the version is a valid number.
++ * es58x_sw_version_is_valid() - Check if the version is a valid number.
+ * @sw_ver: Version number of either the firmware or the bootloader.
+ *
+- * If &es58x_sw_version.major, &es58x_sw_version.minor and
+- * &es58x_sw_version.revision are all zero, the product string could
+- * not be parsed and the version number is invalid.
++ * If any of the software version sub-numbers do not fit on two
++ * digits, the version is invalid, most probably because the product
++ * string could not be parsed.
++ *
++ * Return: @true if the software version is valid, @false otherwise.
+ */
+-static inline bool es58x_sw_version_is_set(struct es58x_sw_version *sw_ver)
++static inline bool es58x_sw_version_is_valid(struct es58x_sw_version *sw_ver)
+ {
+- return sw_ver->major || sw_ver->minor || sw_ver->revision;
++ return sw_ver->major < 100 && sw_ver->minor < 100 &&
++ sw_ver->revision < 100;
+ }
+
+ /**
+- * es58x_hw_revision_is_set() - Check if the revision is a valid number.
++ * es58x_hw_revision_is_valid() - Check if the revision is a valid number.
+ * @hw_rev: Revision number of the hardware.
+ *
+- * If &es58x_hw_revision.letter is the null character, the product
+- * string could not be parsed and the hardware revision number is
+- * invalid.
++ * If &es58x_hw_revision.letter is not a alphanumeric character or if
++ * any of the hardware revision sub-numbers do not fit on three
++ * digits, the revision is invalid, most probably because the product
++ * string could not be parsed.
++ *
++ * Return: @true if the hardware revision is valid, @false otherwise.
+ */
+-static inline bool es58x_hw_revision_is_set(struct es58x_hw_revision *hw_rev)
++static inline bool es58x_hw_revision_is_valid(struct es58x_hw_revision *hw_rev)
+ {
+- return hw_rev->letter != '\0';
++ return isalnum(hw_rev->letter) && hw_rev->major < 1000 &&
++ hw_rev->minor < 1000;
+ }
+
+ /**
+@@ -197,7 +218,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
+ int ret = 0;
+
+- if (es58x_sw_version_is_set(fw_ver)) {
++ if (es58x_sw_version_is_valid(fw_ver)) {
+ snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
+ fw_ver->major, fw_ver->minor, fw_ver->revision);
+ ret = devlink_info_version_running_put(req,
+@@ -207,7 +228,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ return ret;
+ }
+
+- if (es58x_sw_version_is_set(bl_ver)) {
++ if (es58x_sw_version_is_valid(bl_ver)) {
+ snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
+ bl_ver->major, bl_ver->minor, bl_ver->revision);
+ ret = devlink_info_version_running_put(req,
+@@ -217,7 +238,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ return ret;
+ }
+
+- if (es58x_hw_revision_is_set(hw_rev)) {
++ if (es58x_hw_revision_is_valid(hw_rev)) {
+ snprintf(buf, sizeof(buf), "%c%03u/%03u",
+ hw_rev->letter, hw_rev->major, hw_rev->minor);
+ ret = devlink_info_version_fixed_put(req,
+diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
+index d8ab2b77d201e..167a86f39f277 100644
+--- a/drivers/net/dsa/lan9303_mdio.c
++++ b/drivers/net/dsa/lan9303_mdio.c
+@@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ reg <<= 2; /* reg num to offset */
+- mutex_lock(&sw_dev->device->bus->mdio_lock);
++ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
+ lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+@@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ reg <<= 2; /* reg num to offset */
+- mutex_lock(&sw_dev->device->bus->mdio_lock);
++ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ *val = lan9303_mdio_real_read(sw_dev->device, reg);
+ *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index ab434a77b059a..dc7f9b99f409f 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -577,6 +577,18 @@ static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+ }
+
++static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
++ struct phylink_config *config)
++{
++ unsigned long *supported = config->supported_interfaces;
++
++ /* Translate the default cmode */
++ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
++
++ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
++ MAC_1000FD;
++}
++
+ static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
+ {
+ u16 reg, val;
+@@ -3880,7 +3892,8 @@ static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+- if (chip->info->ops->pcs_ops->pcs_init) {
++ if (chip->info->ops->pcs_ops &&
++ chip->info->ops->pcs_ops->pcs_init) {
+ err = chip->info->ops->pcs_ops->pcs_init(chip, port);
+ if (err)
+ return err;
+@@ -3895,7 +3908,8 @@ static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
+
+ mv88e6xxx_teardown_devlink_regions_port(ds, port);
+
+- if (chip->info->ops->pcs_ops->pcs_teardown)
++ if (chip->info->ops->pcs_ops &&
++ chip->info->ops->pcs_ops->pcs_teardown)
+ chip->info->ops->pcs_ops->pcs_teardown(chip, port);
+ }
+
+@@ -4340,7 +4354,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+- .phylink_get_caps = mv88e6185_phylink_get_caps,
++ .phylink_get_caps = mv88e6351_phylink_get_caps,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6172_ops = {
+@@ -4440,7 +4454,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+- .phylink_get_caps = mv88e6185_phylink_get_caps,
++ .phylink_get_caps = mv88e6351_phylink_get_caps,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6176_ops = {
+@@ -5069,7 +5083,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+- .phylink_get_caps = mv88e6185_phylink_get_caps,
++ .phylink_get_caps = mv88e6351_phylink_get_caps,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6351_ops = {
+@@ -5117,7 +5131,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+- .phylink_get_caps = mv88e6185_phylink_get_caps,
++ .phylink_get_caps = mv88e6351_phylink_get_caps,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6352_ops = {
+diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
+index 045fe133f6ee9..5beadabc21361 100644
+--- a/drivers/net/ethernet/amd/pds_core/adminq.c
++++ b/drivers/net/ethernet/amd/pds_core/adminq.c
+@@ -146,7 +146,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
+ }
+
+ queue_work(pdsc->wq, &qcq->work);
+- pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
++ pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
+index e545fafc48196..b1c1f1007b065 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.h
++++ b/drivers/net/ethernet/amd/pds_core/core.h
+@@ -15,7 +15,7 @@
+ #define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver"
+
+ #define PDSC_WATCHDOG_SECS 5
+-#define PDSC_QUEUE_NAME_MAX_SZ 32
++#define PDSC_QUEUE_NAME_MAX_SZ 16
+ #define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+ #define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
+ #define PDSC_TEARDOWN_RECOVERY false
+diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
+index f77cd9f5a2fda..eb178728edba9 100644
+--- a/drivers/net/ethernet/amd/pds_core/dev.c
++++ b/drivers/net/ethernet/amd/pds_core/dev.c
+@@ -254,10 +254,14 @@ static int pdsc_identify(struct pdsc *pdsc)
+ struct pds_core_drv_identity drv = {};
+ size_t sz;
+ int err;
++ int n;
+
+ drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
+- snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
+- "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
++ /* Catching the return quiets a Wformat-truncation complaint */
++ n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
++ "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
++ if (n > sizeof(drv.driver_ver_str))
++ dev_dbg(pdsc->dev, "release name truncated, don't care\n");
+
+ /* Next let's get some info about the device
+ * We use the devcmd_lock at this level in order to
+diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
+index d9607033bbf21..d2abf32b93fe3 100644
+--- a/drivers/net/ethernet/amd/pds_core/devlink.c
++++ b/drivers/net/ethernet/amd/pds_core/devlink.c
+@@ -104,7 +104,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct pds_core_fw_list_info fw_list;
+ struct pdsc *pdsc = devlink_priv(dl);
+ union pds_core_dev_comp comp;
+- char buf[16];
++ char buf[32];
+ int listlen;
+ int err;
+ int i;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 614c0278419bc..6b73648b37793 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
+ static void xgbe_service_timer(struct timer_list *t)
+ {
+ struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
++ struct xgbe_channel *channel;
++ unsigned int i;
+
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
+
+ mod_timer(&pdata->service_timer, jiffies + HZ);
++
++ if (!pdata->tx_usecs)
++ return;
++
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
++ if (!channel->tx_ring || channel->tx_timer_active)
++ break;
++ channel->tx_timer_active = 1;
++ mod_timer(&channel->tx_timer,
++ jiffies + usecs_to_jiffies(pdata->tx_usecs));
++ }
+ }
+
+ static void xgbe_init_timers(struct xgbe_prv_data *pdata)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index 6e83ff59172a3..32fab5e772462 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
+
+ cmd->base.phy_address = pdata->phy.address;
+
+- cmd->base.autoneg = pdata->phy.autoneg;
+- cmd->base.speed = pdata->phy.speed;
+- cmd->base.duplex = pdata->phy.duplex;
++ if (netif_carrier_ok(netdev)) {
++ cmd->base.speed = pdata->phy.speed;
++ cmd->base.duplex = pdata->phy.duplex;
++ } else {
++ cmd->base.speed = SPEED_UNKNOWN;
++ cmd->base.duplex = DUPLEX_UNKNOWN;
++ }
+
++ cmd->base.autoneg = pdata->phy.autoneg;
+ cmd->base.port = PORT_NONE;
+
+ XGBE_LM_COPY(cmd, supported, lks, supported);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 32d2c6fac6526..4a2dc705b5280 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1193,7 +1193,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+- xgbe_set_mode(pdata, mode);
++ /* Force the mode change for SFI in Fixed PHY config.
++ * Fixed PHY configs needs PLL to be enabled while doing mode set.
++ * When the SFP module isn't connected during boot, driver assumes
++ * AN is ON and attempts autonegotiation. However, if the connected
++ * SFP comes up in Fixed PHY config, the link will not come up as
++ * PLL isn't enabled while the initial mode set command is issued.
++ * So, force the mode change for SFI in Fixed PHY configuration to
++ * fix link issues.
++ */
++ if (mode == XGBE_MODE_SFI)
++ xgbe_change_mode(pdata, mode);
++ else
++ xgbe_set_mode(pdata, mode);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
+index 43d821fe7a542..63ba64dbb7310 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
+@@ -504,15 +504,12 @@ struct atl1c_rrd_ring {
+ u16 next_to_use;
+ u16 next_to_clean;
+ struct napi_struct napi;
+- struct page *rx_page;
+- unsigned int rx_page_offset;
+ };
+
+ /* board specific private data structure */
+ struct atl1c_adapter {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+- unsigned int rx_frag_size;
+ struct atl1c_hw hw;
+ struct atl1c_hw_stats hw_stats;
+ struct mii_if_info mii; /* MII interface info */
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index 940c5d1ff9cfc..74b78164cf74a 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -483,15 +483,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
+ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
+ struct net_device *dev)
+ {
+- unsigned int head_size;
+ int mtu = dev->mtu;
+
+ adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
+ roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
+-
+- head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
+- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+- adapter->rx_frag_size = roundup_pow_of_two(head_size);
+ }
+
+ static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+@@ -964,7 +959,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
+ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+- int i;
+
+ dma_free_coherent(&pdev->dev, adapter->ring_header.size,
+ adapter->ring_header.desc, adapter->ring_header.dma);
+@@ -977,12 +971,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+ kfree(adapter->tpd_ring[0].buffer_info);
+ adapter->tpd_ring[0].buffer_info = NULL;
+ }
+- for (i = 0; i < adapter->rx_queue_count; ++i) {
+- if (adapter->rrd_ring[i].rx_page) {
+- put_page(adapter->rrd_ring[i].rx_page);
+- adapter->rrd_ring[i].rx_page = NULL;
+- }
+- }
+ }
+
+ /**
+@@ -1754,48 +1742,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
+ skb_checksum_none_assert(skb);
+ }
+
+-static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
+- u32 queue, bool napi_mode)
+-{
+- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
+- struct sk_buff *skb;
+- struct page *page;
+-
+- if (adapter->rx_frag_size > PAGE_SIZE) {
+- if (likely(napi_mode))
+- return napi_alloc_skb(&rrd_ring->napi,
+- adapter->rx_buffer_len);
+- else
+- return netdev_alloc_skb_ip_align(adapter->netdev,
+- adapter->rx_buffer_len);
+- }
+-
+- page = rrd_ring->rx_page;
+- if (!page) {
+- page = alloc_page(GFP_ATOMIC);
+- if (unlikely(!page))
+- return NULL;
+- rrd_ring->rx_page = page;
+- rrd_ring->rx_page_offset = 0;
+- }
+-
+- skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
+- adapter->rx_frag_size);
+- if (likely(skb)) {
+- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+- rrd_ring->rx_page_offset += adapter->rx_frag_size;
+- if (rrd_ring->rx_page_offset >= PAGE_SIZE)
+- rrd_ring->rx_page = NULL;
+- else
+- get_page(page);
+- }
+- return skb;
+-}
+-
+ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ bool napi_mode)
+ {
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
++ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1c_buffer *buffer_info, *next_info;
+ struct sk_buff *skb;
+@@ -1814,13 +1765,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ while (next_info->flags & ATL1C_BUFFER_FREE) {
+ rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
+
+- skb = atl1c_alloc_skb(adapter, queue, napi_mode);
++ /* When DMA RX address is set to something like
++ * 0x....fc0, it will be very likely to cause DMA
++ * RFD overflow issue.
++ *
++ * To work around it, we apply rx skb with 64 bytes
++ * longer space, and offset the address whenever
++ * 0x....fc0 is detected.
++ */
++ if (likely(napi_mode))
++ skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64);
++ else
++ skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64);
+ if (unlikely(!skb)) {
+ if (netif_msg_rx_err(adapter))
+ dev_warn(&pdev->dev, "alloc rx buffer failed\n");
+ break;
+ }
+
++ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
++ skb_reserve(skb, 64);
++
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 14b311196b8f8..22b00912f7ac8 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -18078,7 +18078,8 @@ static void tg3_shutdown(struct pci_dev *pdev)
+ if (netif_running(dev))
+ dev_close(dev);
+
+- tg3_power_down(tp);
++ if (system_state == SYSTEM_POWER_OFF)
++ tg3_power_down(tp);
+
+ rtnl_unlock();
+
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+index 7750702900fa6..6f6525983130e 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+@@ -2259,7 +2259,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
+
+ if (tp->snd_una != snd_una) {
+ tp->snd_una = snd_una;
+- tp->rcv_tstamp = tcp_time_stamp(tp);
++ tp->rcv_tstamp = tcp_jiffies32;
+ if (tp->snd_una == tp->snd_nxt &&
+ !csk_flag_nochk(csk, CSK_TX_FAILOVER))
+ csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
+diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
+index a8b9d1a3e4d57..636949737d72f 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
+ .val = CONFIG0_MAXLEN_1536,
+ },
+ {
+- .max_l3_len = 1542,
+- .val = CONFIG0_MAXLEN_1542,
++ .max_l3_len = 1548,
++ .val = CONFIG0_MAXLEN_1548,
+ },
+ {
+ .max_l3_len = 9212,
+@@ -1145,6 +1145,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ dma_addr_t mapping;
+ unsigned short mtu;
+ void *buffer;
++ int ret;
+
+ mtu = ETH_HLEN;
+ mtu += netdev->mtu;
+@@ -1159,9 +1160,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ word3 |= mtu;
+ }
+
+- if (skb->ip_summed != CHECKSUM_NONE) {
++ if (skb->len >= ETH_FRAME_LEN) {
++ /* Hardware offloaded checksumming isn't working on frames
++ * bigger than 1514 bytes. A hypothesis about this is that the
++ * checksum buffer is only 1518 bytes, so when the frames get
++ * bigger they get truncated, or the last few bytes get
++ * overwritten by the FCS.
++ *
++ * Just use software checksumming and bypass on bigger frames.
++ */
++ if (skb->ip_summed == CHECKSUM_PARTIAL) {
++ ret = skb_checksum_help(skb);
++ if (ret)
++ return ret;
++ }
++ word1 |= TSS_BYPASS_BIT;
++ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int tcp = 0;
+
++ /* We do not switch off the checksumming on non TCP/UDP
++ * frames: as is shown from tests, the checksumming engine
++ * is smart enough to see that a frame is not actually TCP
++ * or UDP and then just pass it through without any changes
++ * to the frame.
++ */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ word1 |= TSS_IP_CHKSUM_BIT;
+ tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+@@ -1978,15 +2000,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
+ return 0;
+ }
+
+-static netdev_features_t gmac_fix_features(struct net_device *netdev,
+- netdev_features_t features)
+-{
+- if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
+- features &= ~GMAC_OFFLOAD_FEATURES;
+-
+- return features;
+-}
+-
+ static int gmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+ {
+@@ -2212,7 +2225,6 @@ static const struct net_device_ops gmac_351x_ops = {
+ .ndo_set_mac_address = gmac_set_mac_address,
+ .ndo_get_stats64 = gmac_get_stats64,
+ .ndo_change_mtu = gmac_change_mtu,
+- .ndo_fix_features = gmac_fix_features,
+ .ndo_set_features = gmac_set_features,
+ };
+
+@@ -2464,11 +2476,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
+
+ netdev->hw_features = GMAC_OFFLOAD_FEATURES;
+ netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+- /* We can handle jumbo frames up to 10236 bytes so, let's accept
+- * payloads of 10236 bytes minus VLAN and ethernet header
++ /* We can receive jumbo frames up to 10236 bytes but only
++ * transmit 2047 bytes so, let's accept payloads of 2047
++ * bytes minus VLAN and ethernet header
+ */
+ netdev->min_mtu = ETH_MIN_MTU;
+- netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
++ netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
+
+ port->freeq_refill = 0;
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll);
+diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
+index 9fdf77d5eb374..24bb989981f23 100644
+--- a/drivers/net/ethernet/cortina/gemini.h
++++ b/drivers/net/ethernet/cortina/gemini.h
+@@ -502,7 +502,7 @@ union gmac_txdesc_3 {
+ #define SOF_BIT 0x80000000
+ #define EOF_BIT 0x40000000
+ #define EOFIE_BIT BIT(29)
+-#define MTU_SIZE_BIT_MASK 0x1fff
++#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */
+
+ /* GMAC Tx Descriptor */
+ struct gmac_txdesc {
+@@ -787,7 +787,7 @@ union gmac_config0 {
+ #define CONFIG0_MAXLEN_1536 0
+ #define CONFIG0_MAXLEN_1518 1
+ #define CONFIG0_MAXLEN_1522 2
+-#define CONFIG0_MAXLEN_1542 3
++#define CONFIG0_MAXLEN_1548 3
+ #define CONFIG0_MAXLEN_9k 4 /* 9212 */
+ #define CONFIG0_MAXLEN_10k 5 /* 10236 */
+ #define CONFIG0_MAXLEN_1518__6 6
+diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
+index 6e14c918e3fb7..f188fba021a62 100644
+--- a/drivers/net/ethernet/engleder/tsnep.h
++++ b/drivers/net/ethernet/engleder/tsnep.h
+@@ -143,7 +143,7 @@ struct tsnep_rx {
+
+ struct tsnep_queue {
+ struct tsnep_adapter *adapter;
+- char name[IFNAMSIZ + 9];
++ char name[IFNAMSIZ + 16];
+
+ struct tsnep_tx *tx;
+ struct tsnep_rx *rx;
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index 8b992dc9bb52b..38da2d6c250e6 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -1779,14 +1779,14 @@ static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+ dev = queue->adapter;
+ } else {
+ if (queue->tx && queue->rx)
+- sprintf(queue->name, "%s-txrx-%d", name,
+- queue->rx->queue_index);
++ snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
++ name, queue->rx->queue_index);
+ else if (queue->tx)
+- sprintf(queue->name, "%s-tx-%d", name,
+- queue->tx->queue_index);
++ snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
++ name, queue->tx->queue_index);
+ else
+- sprintf(queue->name, "%s-rx-%d", name,
+- queue->rx->queue_index);
++ snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
++ name, queue->rx->queue_index);
+ handler = tsnep_irq_txrx;
+ dev = queue;
+ }
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index 15bab41cee48d..888509cf1f210 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -516,8 +516,6 @@ struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
+
+ memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
+
+- dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
+-
+ return skb;
+ }
+
+@@ -589,6 +587,7 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct device *dev = priv->net_dev->dev.parent;
++ bool recycle_rx_buf = false;
+ void *buf_data;
+ u32 xdp_act;
+
+@@ -618,6 +617,8 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
++ } else {
++ recycle_rx_buf = true;
+ }
+ } else if (fd_format == dpaa2_fd_sg) {
+ WARN_ON(priv->xdp_prog);
+@@ -637,6 +638,9 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ goto err_build_skb;
+
+ dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
++
++ if (recycle_rx_buf)
++ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
+ return;
+
+ err_build_skb:
+@@ -1073,14 +1077,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
+ dma_addr_t addr;
+
+ buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
+-
+- /* If there's enough room to align the FD address, do it.
+- * It will help hardware optimize accesses.
+- */
+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+ if (aligned_start >= skb->head)
+ buffer_start = aligned_start;
++ else
++ return -ENOMEM;
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+@@ -4967,6 +4969,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+ if (err)
+ goto err_dl_port_add;
+
++ net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
++
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() failed\n");
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+index bfb6c96c3b2f0..834cba8c3a416 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+@@ -740,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
+
+ static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
+ {
+- unsigned int headroom = DPAA2_ETH_SWA_SIZE;
++ unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+
+ /* If we don't have an skb (e.g. XDP buffer), we only need space for
+ * the software annotation area
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 35461165de0d2..b92e3aa7cd041 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
+ if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
+ priv->num_tx_rings) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+- "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
++ "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
+ num_xdp_tx_queues,
+ priv->min_num_stack_tx_queues,
+ priv->num_tx_rings);
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 5704b5f57cd0d..5703240474e5b 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -190,7 +190,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
+ rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
+ priv->rx_cfg.num_queues;
+ priv->stats_report_len = struct_size(priv->stats_report, stats,
+- tx_stats_num + rx_stats_num);
++ size_add(tx_stats_num, rx_stats_num));
+ priv->stats_report =
+ dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
+ &priv->stats_report_bus, GFP_KERNEL);
+@@ -254,10 +254,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
+ if (block->tx) {
+ if (block->tx->q_num < priv->tx_cfg.num_queues)
+ reschedule |= gve_tx_poll(block, budget);
+- else
++ else if (budget)
+ reschedule |= gve_xdp_poll(block, budget);
+ }
+
++ if (!budget)
++ return 0;
++
+ if (block->rx) {
+ work_done = gve_rx_poll(block, budget);
+ reschedule |= work_done == budget;
+@@ -298,6 +301,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+ if (block->tx)
+ reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+
++ if (!budget)
++ return 0;
++
+ if (block->rx) {
+ work_done = gve_rx_poll_dqo(block, budget);
+ reschedule |= work_done == budget;
+diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
+index e84a066aa1a40..73655347902d2 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx.c
++++ b/drivers/net/ethernet/google/gve/gve_rx.c
+@@ -1007,10 +1007,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget)
+
+ feat = block->napi.dev->features;
+
+- /* If budget is 0, do all the work */
+- if (budget == 0)
+- budget = INT_MAX;
+-
+ if (budget > 0)
+ work_done = gve_clean_rx_done(rx, budget, feat);
+
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index 6957a865cff37..9f6ffc4a54f0b 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -925,10 +925,6 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
+ bool repoll;
+ u32 to_do;
+
+- /* If budget is 0, do all the work */
+- if (budget == 0)
+- budget = INT_MAX;
+-
+ /* Find out how much work there is to be done */
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index b8508533878be..4f385a18d288e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -500,11 +500,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
+ }
+
+ sprintf(result[j++], "%d", i);
+- sprintf(result[j++], "%s", dim_state_str[dim->state]);
++ sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
++ dim_state_str[dim->state] : "unknown");
+ sprintf(result[j++], "%u", dim->profile_ix);
+- sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
++ sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
++ dim_cqe_mode_str[dim->mode] : "unknown");
+ sprintf(result[j++], "%s",
+- dim_tune_stat_str[dim->tune_state]);
++ dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
++ dim_tune_stat_str[dim->tune_state] : "unknown");
+ sprintf(result[j++], "%u", dim->steps_left);
+ sprintf(result[j++], "%u", dim->steps_right);
+ sprintf(result[j++], "%u", dim->tired);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index cf50368441b78..677cfaa5fe08c 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -5140,7 +5140,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hnae3_handle *h = priv->ae_handle;
+- u8 mac_addr_temp[ETH_ALEN];
++ u8 mac_addr_temp[ETH_ALEN] = {0};
+ int ret = 0;
+
+ if (h->ae_algo->ops->get_mac_addr)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index c42574e297476..a61d9fd732b96 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev);
+ static void hclge_update_fec_stats(struct hclge_dev *hdev);
+ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt);
++static int hclge_update_port_info(struct hclge_dev *hdev);
+
+ static struct hnae3_ae_algo ae_algo;
+
+@@ -3043,6 +3044,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
+
+ if (state != hdev->hw.mac.link) {
+ hdev->hw.mac.link = state;
++ if (state == HCLGE_LINK_STATUS_UP)
++ hclge_update_port_info(hdev);
++
+ client->ops->link_status_change(handle, state);
+ hclge_config_mac_tnl_int(hdev, state);
+ if (rclient && rclient->ops->link_status_change)
+@@ -10026,8 +10030,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+- mutex_lock(&hdev->vport_lock);
+-
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ if (is_write_tbl && vlan->hd_tbl_status)
+@@ -10042,8 +10044,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ break;
+ }
+ }
+-
+- mutex_unlock(&hdev->vport_lock);
+ }
+
+ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
+@@ -10452,11 +10452,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ * handle mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
++ mutex_lock(&hdev->vport_lock);
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
++ mutex_unlock(&hdev->vport_lock);
+ return -EBUSY;
++ } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
++ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ }
++ mutex_unlock(&hdev->vport_lock);
+
+ /* when port base vlan enabled, we use port base vlan as the vlan
+ * filter entry. In this case, we don't update vlan filter table
+@@ -10471,17 +10476,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ }
+
+ if (!ret) {
+- if (!is_kill)
++ if (!is_kill) {
+ hclge_add_vport_vlan_table(vport, vlan_id,
+ writen_to_tbl);
+- else if (is_kill && vlan_id != 0)
++ } else if (is_kill && vlan_id != 0) {
++ mutex_lock(&hdev->vport_lock);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
++ mutex_unlock(&hdev->vport_lock);
++ }
+ } else if (is_kill) {
+ /* when remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack
+ */
++ mutex_lock(&hdev->vport_lock);
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
++ mutex_unlock(&hdev->vport_lock);
+ }
+
+ hclge_set_vport_vlan_fltr_change(vport);
+@@ -10521,6 +10531,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+ int i, ret, sync_cnt = 0;
+ u16 vlan_id;
+
++ mutex_lock(&hdev->vport_lock);
+ /* start from vport 1 for PF is always alive */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+@@ -10531,21 +10542,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id, vlan_id,
+ true);
+- if (ret && ret != -EINVAL)
++ if (ret && ret != -EINVAL) {
++ mutex_unlock(&hdev->vport_lock);
+ return;
++ }
+
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ hclge_set_vport_vlan_fltr_change(vport);
+
+ sync_cnt++;
+- if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
++ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
++ mutex_unlock(&hdev->vport_lock);
+ return;
++ }
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ }
+ }
++ mutex_unlock(&hdev->vport_lock);
+
+ hclge_sync_vlan_fltr_state(hdev);
+ }
+@@ -11652,6 +11668,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ goto err_msi_irq_uninit;
+
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
++ clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ if (hnae3_dev_phy_imp_supported(hdev))
+ ret = hclge_update_tp_port_info(hdev);
+ else
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index a4d68fb216fb9..0aa9beefd1c7e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1206,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ return -EBUSY;
++ } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
++ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ }
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+@@ -1233,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+ int ret, sync_cnt = 0;
+ u16 vlan_id;
+
++ if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
++ return;
++
++ rtnl_lock();
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
+ vlan_id, true);
+ if (ret)
+- return;
++ break;
+
+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ sync_cnt++;
+ if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
+- return;
++ break;
+
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ }
++ rtnl_unlock();
+ }
+
+ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+@@ -1974,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
+ }
+
++static void hclgevf_reset_timer(struct timer_list *t)
++{
++ struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
++
++ hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
++ hclgevf_reset_task_schedule(hdev);
++}
++
+ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+ {
++#define HCLGEVF_RESET_DELAY 5
++
+ enum hclgevf_evt_cause event_cause;
+ struct hclgevf_dev *hdev = data;
+ u32 clearval;
+@@ -1987,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+
+ switch (event_cause) {
+ case HCLGEVF_VECTOR0_EVENT_RST:
+- hclgevf_reset_task_schedule(hdev);
++ mod_timer(&hdev->reset_timer,
++ jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
+ break;
+ case HCLGEVF_VECTOR0_EVENT_MBX:
+ hclgevf_mbx_handler(hdev);
+@@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ HCLGEVF_DRIVER_NAME);
+
+ hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
++ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+
+ return 0;
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index 81c16b8c8da29..a73f2bf3a56a6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -219,6 +219,7 @@ struct hclgevf_dev {
+ enum hnae3_reset_type reset_level;
+ unsigned long reset_pending;
+ enum hnae3_reset_type reset_type;
++ struct timer_list reset_timer;
+
+ #define HCLGEVF_RESET_REQUESTED 0
+ #define HCLGEVF_RESET_PENDING 1
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+index bbf7b14079de3..85c2a634c8f96 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ i++;
+ }
+
++ /* ensure additional_info will be seen after received_resp */
++ smp_rmb();
++
+ if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
+@@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
+ resp->resp_status = hclgevf_resp_to_errno(resp_status);
+ memcpy(resp->additional_info, req->msg.resp_data,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
++
++ /* ensure additional_info will be seen before setting received_resp */
++ smp_wmb();
++
+ if (match_id) {
+ /* If match_id is not zero, it means PF support match_id.
+ * if the match_id is right, VF get the right response, or
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index de7fd43dc11c8..00ca2b88165cb 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -16320,11 +16320,15 @@ static void i40e_remove(struct pci_dev *pdev)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+- /* Now we can shutdown the PF's VSI, just before we kill
++ /* Now we can shutdown the PF's VSIs, just before we kill
+ * adminq and hmc.
+ */
+- if (pf->vsi[pf->lan_vsi])
+- i40e_vsi_release(pf->vsi[pf->lan_vsi]);
++ for (i = pf->num_alloc_vsi; i--;)
++ if (pf->vsi[i]) {
++ i40e_vsi_close(pf->vsi[i]);
++ i40e_vsi_release(pf->vsi[i]);
++ pf->vsi[i] = NULL;
++ }
+
+ i40e_cloud_filter_exit(pf);
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index d3d6415553ed6..4441b00297f47 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -3842,7 +3842,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int aq_ret = 0;
+- int i, ret;
++ int i;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = -EINVAL;
+@@ -3866,8 +3866,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ }
+
+ cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
+- if (!cfilter)
+- return -ENOMEM;
++ if (!cfilter) {
++ aq_ret = -ENOMEM;
++ goto err_out;
++ }
+
+ /* parse destination mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+@@ -3915,13 +3917,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+
+ /* Adding cloud filter programmed as TC filter */
+ if (tcf.dst_port)
+- ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
++ aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ else
+- ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+- if (ret) {
++ aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
++ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
+- vf->vf_id, ERR_PTR(ret),
++ vf->vf_id, ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err_free;
+ }
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index e110ba3461857..d8d7b62ceb24e 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -298,8 +298,6 @@ struct iavf_adapter {
+ #define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
+ #define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
+ #define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
+-#define IAVF_FLAG_PROMISC_ON BIT(13)
+-#define IAVF_FLAG_ALLMULTI_ON BIT(14)
+ #define IAVF_FLAG_LEGACY_RX BIT(15)
+ #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
+ #define IAVF_FLAG_QUEUES_DISABLED BIT(17)
+@@ -325,10 +323,7 @@ struct iavf_adapter {
+ #define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+ #define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
+ #define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
+-#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15)
+-#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16)
+-#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17)
+-#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18)
++#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
+ #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
+ #define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
+ #define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
+@@ -365,6 +360,12 @@ struct iavf_adapter {
+ (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+
++ /* Lock to prevent possible clobbering of
++ * current_netdev_promisc_flags
++ */
++ spinlock_t current_netdev_promisc_flags_lock;
++ netdev_features_t current_netdev_promisc_flags;
++
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+@@ -551,7 +552,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter);
+ void iavf_del_ether_addrs(struct iavf_adapter *adapter);
+ void iavf_add_vlans(struct iavf_adapter *adapter);
+ void iavf_del_vlans(struct iavf_adapter *adapter);
+-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
++void iavf_set_promiscuous(struct iavf_adapter *adapter);
++bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
+ void iavf_request_stats(struct iavf_adapter *adapter);
+ int iavf_request_reset(struct iavf_adapter *adapter);
+ void iavf_get_hena(struct iavf_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index b3434dbc90d6f..68783a7b70962 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -1186,6 +1186,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
+ return 0;
+ }
+
++/**
++ * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
++ * @adapter: device specific adapter
++ */
++bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
++{
++ return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
++ (IFF_PROMISC | IFF_ALLMULTI);
++}
++
+ /**
+ * iavf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+@@ -1199,19 +1209,10 @@ static void iavf_set_rx_mode(struct net_device *netdev)
+ __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+- if (netdev->flags & IFF_PROMISC &&
+- !(adapter->flags & IAVF_FLAG_PROMISC_ON))
+- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
+- else if (!(netdev->flags & IFF_PROMISC) &&
+- adapter->flags & IAVF_FLAG_PROMISC_ON)
+- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
+-
+- if (netdev->flags & IFF_ALLMULTI &&
+- !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
+- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
+- else if (!(netdev->flags & IFF_ALLMULTI) &&
+- adapter->flags & IAVF_FLAG_ALLMULTI_ON)
+- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
++ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
++ if (iavf_promiscuous_mode_changed(adapter))
++ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
+ }
+
+ /**
+@@ -2162,19 +2163,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
+ return 0;
+ }
+
+- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
+- iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
+- FLAG_VF_MULTICAST_PROMISC);
+- return 0;
+- }
+-
+- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
+- iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
+- return 0;
+- }
+- if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
+- (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+- iavf_set_promiscuous(adapter, 0);
++ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
++ iavf_set_promiscuous(adapter);
+ return 0;
+ }
+
+@@ -4970,6 +4960,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ spin_lock_init(&adapter->cloud_filter_list_lock);
+ spin_lock_init(&adapter->fdir_fltr_lock);
+ spin_lock_init(&adapter->adv_rss_lock);
++ spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
+
+ INIT_LIST_HEAD(&adapter->mac_filter_list);
+ INIT_LIST_HEAD(&adapter->vlan_filter_list);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index f9727e9c3d630..0b97b424e487a 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -936,14 +936,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ /**
+ * iavf_set_promiscuous
+ * @adapter: adapter structure
+- * @flags: bitmask to control unicast/multicast promiscuous.
+ *
+ * Request that the PF enable promiscuous mode for our VSI.
+ **/
+-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
++void iavf_set_promiscuous(struct iavf_adapter *adapter)
+ {
++ struct net_device *netdev = adapter->netdev;
+ struct virtchnl_promisc_info vpi;
+- int promisc_all;
++ unsigned int flags;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+@@ -952,36 +952,57 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
+ return;
+ }
+
+- promisc_all = FLAG_VF_UNICAST_PROMISC |
+- FLAG_VF_MULTICAST_PROMISC;
+- if ((flags & promisc_all) == promisc_all) {
+- adapter->flags |= IAVF_FLAG_PROMISC_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
+- dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+- }
++ /* prevent changes to promiscuous flags */
++ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
+
+- if (flags & FLAG_VF_MULTICAST_PROMISC) {
+- adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
+- dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
+- adapter->netdev->name);
++ /* sanity check to prevent duplicate AQ calls */
++ if (!iavf_promiscuous_mode_changed(adapter)) {
++ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++ dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
++ /* allow changes to promiscuous flags */
++ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
++ return;
+ }
+
+- if (!flags) {
+- if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
+- adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
+- dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+- }
++ /* there are 2 bits, but only 3 states */
++ if (!(netdev->flags & IFF_PROMISC) &&
++ netdev->flags & IFF_ALLMULTI) {
++ /* State 1 - only multicast promiscuous mode enabled
++ * - !IFF_PROMISC && IFF_ALLMULTI
++ */
++ flags = FLAG_VF_MULTICAST_PROMISC;
++ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
++ adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
++ dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
++ } else if (!(netdev->flags & IFF_PROMISC) &&
++ !(netdev->flags & IFF_ALLMULTI)) {
++ /* State 2 - unicast/multicast promiscuous mode disabled
++ * - !IFF_PROMISC && !IFF_ALLMULTI
++ */
++ flags = 0;
++ adapter->current_netdev_promisc_flags &=
++ ~(IFF_PROMISC | IFF_ALLMULTI);
++ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
++ } else {
++ /* State 3 - unicast/multicast promiscuous mode enabled
++ * - IFF_PROMISC && IFF_ALLMULTI
++ * - IFF_PROMISC && !IFF_ALLMULTI
++ */
++ flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
++ adapter->current_netdev_promisc_flags |= IFF_PROMISC;
++ if (netdev->flags & IFF_ALLMULTI)
++ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
++ else
++ adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
+
+- if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
+- adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+- dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
+- adapter->netdev->name);
+- }
++ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ }
+
++ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++
++ /* allow changes to promiscuous flags */
++ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
++
+ adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ vpi.vsi_id = adapter->vsi_res->vsi_id;
+ vpi.flags = flags;
+diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
+index 7b1256992dcf6..d86e2460b5a4d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lag.c
++++ b/drivers/net/ethernet/intel/ice/ice_lag.c
+@@ -536,6 +536,50 @@ resume_traffic:
+ dev_dbg(dev, "Problem restarting traffic for LAG node move\n");
+ }
+
++/**
++ * ice_lag_build_netdev_list - populate the lag struct's netdev list
++ * @lag: local lag struct
++ * @ndlist: pointer to netdev list to populate
++ */
++static void ice_lag_build_netdev_list(struct ice_lag *lag,
++ struct ice_lag_netdev_list *ndlist)
++{
++ struct ice_lag_netdev_list *nl;
++ struct net_device *tmp_nd;
++
++ INIT_LIST_HEAD(&ndlist->node);
++ rcu_read_lock();
++ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
++ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
++ if (!nl)
++ break;
++
++ nl->netdev = tmp_nd;
++ list_add(&nl->node, &ndlist->node);
++ }
++ rcu_read_unlock();
++ lag->netdev_head = &ndlist->node;
++}
++
++/**
++ * ice_lag_destroy_netdev_list - free lag struct's netdev list
++ * @lag: pointer to local lag struct
++ * @ndlist: pointer to lag struct netdev list
++ */
++static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
++ struct ice_lag_netdev_list *ndlist)
++{
++ struct ice_lag_netdev_list *entry, *n;
++
++ rcu_read_lock();
++ list_for_each_entry_safe(entry, n, &ndlist->node, node) {
++ list_del(&entry->node);
++ kfree(entry);
++ }
++ rcu_read_unlock();
++ lag->netdev_head = NULL;
++}
++
+ /**
+ * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
+ * @lag: primary interface LAG struct
+@@ -564,7 +608,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
+ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
+ {
+ struct ice_lag_netdev_list ndlist;
+- struct list_head *tmp, *n;
+ u8 pri_port, act_port;
+ struct ice_lag *lag;
+ struct ice_vsi *vsi;
+@@ -588,38 +631,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
+ pri_port = pf->hw.port_info->lport;
+ act_port = lag->active_port;
+
+- if (lag->upper_netdev) {
+- struct ice_lag_netdev_list *nl;
+- struct net_device *tmp_nd;
+-
+- INIT_LIST_HEAD(&ndlist.node);
+- rcu_read_lock();
+- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+- nl = kzalloc(sizeof(*nl), GFP_KERNEL);
+- if (!nl)
+- break;
+-
+- nl->netdev = tmp_nd;
+- list_add(&nl->node, &ndlist.node);
+- }
+- rcu_read_unlock();
+- }
+-
+- lag->netdev_head = &ndlist.node;
++ if (lag->upper_netdev)
++ ice_lag_build_netdev_list(lag, &ndlist);
+
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
+ lag->bonded && lag->primary && pri_port != act_port &&
+ !list_empty(lag->netdev_head))
+ ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
+
+- list_for_each_safe(tmp, n, &ndlist.node) {
+- struct ice_lag_netdev_list *entry;
+-
+- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+- list_del(&entry->node);
+- kfree(entry);
+- }
+- lag->netdev_head = NULL;
++ ice_lag_destroy_netdev_list(lag, &ndlist);
+
+ new_vf_unlock:
+ mutex_unlock(&pf->lag_mutex);
+@@ -646,6 +666,29 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
+ ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
+ }
+
++/**
++ * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
++ * @lag: local lag struct
++ * @src_prt: lport value for source port
++ * @dst_prt: lport value for destination port
++ *
++ * This function is used to move nodes during an out-of-netdev-event situation,
++ * primarily when the driver needs to reconfigure or recreate resources.
++ *
++ * Must be called while holding the lag_mutex to avoid lag events from
++ * processing while out-of-sync moves are happening. Also, paired moves,
++ * such as used in a reset flow, should both be called under the same mutex
++ * lock to avoid changes between start of reset and end of reset.
++ */
++void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
++{
++ struct ice_lag_netdev_list ndlist;
++
++ ice_lag_build_netdev_list(lag, &ndlist);
++ ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
++ ice_lag_destroy_netdev_list(lag, &ndlist);
++}
++
+ #define ICE_LAG_SRIOV_CP_RECIPE 10
+ #define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
+
+@@ -1529,18 +1572,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
+ */
+ static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
+ {
+- struct ice_lag_netdev_list *entry;
+ struct ice_netdev_priv *np;
+- struct net_device *netdev;
+ struct ice_pf *pf;
+
+- list_for_each_entry(entry, lag->netdev_head, node) {
+- netdev = entry->netdev;
+- np = netdev_priv(netdev);
+- pf = np->vsi->back;
+-
+- ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+- }
++ np = netdev_priv(lag->netdev);
++ pf = np->vsi->back;
++ ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+ }
+
+ /**
+@@ -1672,7 +1709,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
+- nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL);
++ nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
+ if (!nd_list)
+ break;
+
+@@ -2028,7 +2065,6 @@ void ice_lag_rebuild(struct ice_pf *pf)
+ {
+ struct ice_lag_netdev_list ndlist;
+ struct ice_lag *lag, *prim_lag;
+- struct list_head *tmp, *n;
+ u8 act_port, loc_port;
+
+ if (!pf->lag || !pf->lag->bonded)
+@@ -2040,21 +2076,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
+ if (lag->primary) {
+ prim_lag = lag;
+ } else {
+- struct ice_lag_netdev_list *nl;
+- struct net_device *tmp_nd;
+-
+- INIT_LIST_HEAD(&ndlist.node);
+- rcu_read_lock();
+- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+- nl = kzalloc(sizeof(*nl), GFP_KERNEL);
+- if (!nl)
+- break;
+-
+- nl->netdev = tmp_nd;
+- list_add(&nl->node, &ndlist.node);
+- }
+- rcu_read_unlock();
+- lag->netdev_head = &ndlist.node;
++ ice_lag_build_netdev_list(lag, &ndlist);
+ prim_lag = ice_lag_find_primary(lag);
+ }
+
+@@ -2084,13 +2106,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
+
+ ice_clear_rdma_cap(pf);
+ lag_rebuild_out:
+- list_for_each_safe(tmp, n, &ndlist.node) {
+- struct ice_lag_netdev_list *entry;
+-
+- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+- list_del(&entry->node);
+- kfree(entry);
+- }
++ ice_lag_destroy_netdev_list(lag, &ndlist);
+ mutex_unlock(&pf->lag_mutex);
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
+index facb6c894b6dd..7f22987675012 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lag.h
++++ b/drivers/net/ethernet/intel/ice/ice_lag.h
+@@ -63,4 +63,5 @@ int ice_init_lag(struct ice_pf *pf);
+ void ice_deinit_lag(struct ice_pf *pf);
+ void ice_lag_rebuild(struct ice_pf *pf);
+ bool ice_lag_is_switchdev_running(struct ice_pf *pf);
++void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
+ #endif /* _ICE_LAG_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index 81d96a40d5a74..c4270708a7694 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -2246,18 +2246,20 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ static void
+ ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
+ {
+- info->n_per_out = N_PER_OUT_E810;
+-
+- if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
+- info->n_ext_ts = N_EXT_TS_E810;
+-
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810;
++ info->n_per_out = N_PER_OUT_E810T;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
++ } else if (ice_is_e810t(&pf->hw)) {
++ info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
++ info->n_per_out = N_PER_OUT_NO_SMA_E810T;
++ } else {
++ info->n_per_out = N_PER_OUT_E810;
++ info->n_ext_ts = N_EXT_TS_E810;
+ }
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+index 37b54db91df27..dd03cb69ad26b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+@@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev)
+ return ice_tc_tun_get_type(dev) != TNL_LAST;
+ }
+
+-static int
+-ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
+- struct flow_action_entry *act)
++static bool ice_tc_is_dev_uplink(struct net_device *dev)
++{
++ return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
++}
++
++static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
++ struct ice_tc_flower_fltr *fltr,
++ struct net_device *target_dev)
+ {
+ struct ice_repr *repr;
+
++ fltr->action.fltr_act = ICE_FWD_TO_VSI;
++
++ if (ice_is_port_repr_netdev(filter_dev) &&
++ ice_is_port_repr_netdev(target_dev)) {
++ repr = ice_netdev_to_repr(target_dev);
++
++ fltr->dest_vsi = repr->src_vsi;
++ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
++ } else if (ice_is_port_repr_netdev(filter_dev) &&
++ ice_tc_is_dev_uplink(target_dev)) {
++ repr = ice_netdev_to_repr(filter_dev);
++
++ fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
++ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
++ } else if (ice_tc_is_dev_uplink(filter_dev) &&
++ ice_is_port_repr_netdev(target_dev)) {
++ repr = ice_netdev_to_repr(target_dev);
++
++ fltr->dest_vsi = repr->src_vsi;
++ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
++ } else {
++ NL_SET_ERR_MSG_MOD(fltr->extack,
++ "Unsupported netdevice in switchdev mode");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int
++ice_tc_setup_drop_action(struct net_device *filter_dev,
++ struct ice_tc_flower_fltr *fltr)
++{
++ fltr->action.fltr_act = ICE_DROP_PACKET;
++
++ if (ice_is_port_repr_netdev(filter_dev)) {
++ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
++ } else if (ice_tc_is_dev_uplink(filter_dev)) {
++ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
++ } else {
++ NL_SET_ERR_MSG_MOD(fltr->extack,
++ "Unsupported netdevice in switchdev mode");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
++ struct ice_tc_flower_fltr *fltr,
++ struct flow_action_entry *act)
++{
++ int err;
++
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+- fltr->action.fltr_act = ICE_DROP_PACKET;
++ err = ice_tc_setup_drop_action(filter_dev, fltr);
++ if (err)
++ return err;
++
+ break;
+
+ case FLOW_ACTION_REDIRECT:
+- fltr->action.fltr_act = ICE_FWD_TO_VSI;
+-
+- if (ice_is_port_repr_netdev(act->dev)) {
+- repr = ice_netdev_to_repr(act->dev);
+-
+- fltr->dest_vsi = repr->src_vsi;
+- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+- } else if (netif_is_ice(act->dev) ||
+- ice_is_tunnel_supported(act->dev)) {
+- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+- } else {
+- NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
+- return -EINVAL;
+- }
++ err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
++ if (err)
++ return err;
+
+ break;
+
+@@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+ goto exit;
+ }
+
+- /* egress traffic is always redirect to uplink */
+- if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+- fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
+-
+ rule_info.sw_act.fltr_act = fltr->action.fltr_act;
+ if (fltr->action.fltr_act != ICE_DROP_PACKET)
+ rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
+@@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+ rule_info.flags_info.act_valid = true;
+
+ if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
++ /* Uplink to VF */
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
+- } else {
++ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
++ fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
++ /* VF to Uplink */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
++ } else {
++ /* VF to VF */
++ rule_info.sw_act.flag |= ICE_FLTR_TX;
++ rule_info.sw_act.src = vsi->idx;
++ rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
+ }
+
+ /* specify the cookie as filter_rule_id */
+@@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
+
+ /**
+ * ice_parse_tc_flower_actions - Parse the actions for a TC filter
++ * @filter_dev: Pointer to device on which filter is being added
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Parse the actions for a TC filter
+ */
+-static int
+-ice_parse_tc_flower_actions(struct ice_vsi *vsi,
+- struct flow_cls_offload *cls_flower,
+- struct ice_tc_flower_fltr *fltr)
++static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
++ struct ice_vsi *vsi,
++ struct flow_cls_offload *cls_flower,
++ struct ice_tc_flower_fltr *fltr)
+ {
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
+ struct flow_action *flow_action = &rule->action;
+@@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
+
+ flow_action_for_each(i, act, flow_action) {
+ if (ice_is_eswitch_mode_switchdev(vsi->back))
+- err = ice_eswitch_tc_parse_action(fltr, act);
++ err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
+ else
+ err = ice_tc_parse_action(vsi, fltr, act);
+ if (err)
+@@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
+ if (err < 0)
+ goto err;
+
+- err = ice_parse_tc_flower_actions(vsi, f, fltr);
++ err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
+ if (err < 0)
+ goto err;
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+index 24e4f4d897b66..d488c7156d093 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+@@ -827,12 +827,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
+ int ice_reset_vf(struct ice_vf *vf, u32 flags)
+ {
+ struct ice_pf *pf = vf->pf;
++ struct ice_lag *lag;
+ struct ice_vsi *vsi;
++ u8 act_prt, pri_prt;
+ struct device *dev;
+ int err = 0;
+ bool rsd;
+
+ dev = ice_pf_to_dev(pf);
++ act_prt = ICE_LAG_INVALID_PORT;
++ pri_prt = pf->hw.port_info->lport;
+
+ if (flags & ICE_VF_RESET_NOTIFY)
+ ice_notify_vf_reset(vf);
+@@ -843,6 +847,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
+ return 0;
+ }
+
++ lag = pf->lag;
++ mutex_lock(&pf->lag_mutex);
++ if (lag && lag->bonded && lag->primary) {
++ act_prt = lag->active_port;
++ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
++ lag->upper_netdev)
++ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
++ else
++ act_prt = ICE_LAG_INVALID_PORT;
++ }
++
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_lock(&vf->cfg_lock);
+ else
+@@ -935,6 +950,11 @@ out_unlock:
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
++ if (lag && lag->bonded && lag->primary &&
++ act_prt != ICE_LAG_INVALID_PORT)
++ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
++ mutex_unlock(&pf->lag_mutex);
++
+ return err;
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index db97353efd067..62337e6569b23 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -1600,9 +1600,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ struct ice_pf *pf = vf->pf;
++ struct ice_lag *lag;
+ struct ice_vsi *vsi;
++ u8 act_prt, pri_prt;
+ int i = -1, q_idx;
+
++ lag = pf->lag;
++ mutex_lock(&pf->lag_mutex);
++ act_prt = ICE_LAG_INVALID_PORT;
++ pri_prt = pf->hw.port_info->lport;
++ if (lag && lag->bonded && lag->primary) {
++ act_prt = lag->active_port;
++ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
++ lag->upper_netdev)
++ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
++ else
++ act_prt = ICE_LAG_INVALID_PORT;
++ }
++
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto error_param;
+
+@@ -1710,6 +1725,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ }
+ }
+
++ if (lag && lag->bonded && lag->primary &&
++ act_prt != ICE_LAG_INVALID_PORT)
++ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
++ mutex_unlock(&pf->lag_mutex);
++
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+@@ -1724,6 +1744,11 @@ error_param:
+ vf->vf_id, i);
+ }
+
++ if (lag && lag->bonded && lag->primary &&
++ act_prt != ICE_LAG_INVALID_PORT)
++ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
++ mutex_unlock(&pf->lag_mutex);
++
+ ice_lag_move_new_vf_nodes(vf);
+
+ /* send the response to the VF */
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index d483b8c00ec0e..165f76d1231c1 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -4790,14 +4790,17 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+ {
+ if (sset == ETH_SS_STATS) {
++ struct mvneta_port *pp = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mvneta_statistics[i].name, ETH_GSTRING_LEN);
+
+- data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+- page_pool_ethtool_stats_get_strings(data);
++ if (!pp->bm_priv) {
++ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
++ page_pool_ethtool_stats_get_strings(data);
++ }
+ }
+ }
+
+@@ -4915,8 +4918,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+ struct page_pool_stats stats = {};
+ int i;
+
+- for (i = 0; i < rxq_number; i++)
+- page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
++ for (i = 0; i < rxq_number; i++) {
++ if (pp->rxqs[i].page_pool)
++ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
++ }
+
+ page_pool_ethtool_stats_get(data, &stats);
+ }
+@@ -4932,14 +4937,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ *data++ = pp->ethtool_stats[i];
+
+- mvneta_ethtool_pp_stats(pp, data);
++ if (!pp->bm_priv)
++ mvneta_ethtool_pp_stats(pp, data);
+ }
+
+ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+ {
+- if (sset == ETH_SS_STATS)
+- return ARRAY_SIZE(mvneta_statistics) +
+- page_pool_ethtool_stats_get_count();
++ if (sset == ETH_SS_STATS) {
++ int count = ARRAY_SIZE(mvneta_statistics);
++ struct mvneta_port *pp = netdev_priv(dev);
++
++ if (!pp->bm_priv)
++ count += page_pool_ethtool_stats_get_count();
++
++ return count;
++ }
+
+ return -EOPNOTSUPP;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 23c2f2ed2fb83..c112c71ff576f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -5505,6 +5505,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
++ if (idx == MAX_BANDPROF_PER_PFFUNC)
++ break;
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+@@ -5518,8 +5520,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+- if (idx == MAX_BANDPROF_PER_PFFUNC)
+- break;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+index a4a258da8dd59..c1c99d7054f87 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+@@ -450,6 +450,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ aq->prof.pebs_mantissa = 0;
+ aq->prof_mask.pebs_mantissa = 0xFF;
+
++ aq->prof.hl_en = 0;
++ aq->prof_mask.hl_en = 1;
++
+ /* Fill AQ info */
+ aq->qidx = profile;
+ aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 818ce76185b2f..629cf1659e5f9 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -818,7 +818,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
+ int qidx, sqe_tail, sqe_head;
+ struct otx2_snd_queue *sq;
+ u64 incr, *ptr, val;
+- int timeout = 1000;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
+@@ -827,15 +826,11 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
+ continue;
+
+ incr = (u64)qidx << 32;
+- while (timeout) {
+- val = otx2_atomic64_add(incr, ptr);
+- sqe_head = (val >> 20) & 0x3F;
+- sqe_tail = (val >> 28) & 0x3F;
+- if (sqe_head == sqe_tail)
+- break;
+- usleep_range(1, 3);
+- timeout--;
+- }
++ val = otx2_atomic64_add(incr, ptr);
++ sqe_head = (val >> 20) & 0x3F;
++ sqe_tail = (val >> 28) & 0x3F;
++ if (sqe_head != sqe_tail)
++ usleep_range(50, 60);
+ }
+ }
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index c04a8ee53a82f..06910307085ef 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -977,6 +977,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
+ int otx2_txsch_alloc(struct otx2_nic *pfvf);
+ void otx2_txschq_stop(struct otx2_nic *pfvf);
+ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
++void otx2_free_pending_sqe(struct otx2_nic *pfvf);
+ void otx2_sqb_flush(struct otx2_nic *pfvf);
+ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma);
+@@ -1069,6 +1070,8 @@ int otx2_init_tc(struct otx2_nic *nic);
+ void otx2_shutdown_tc(struct otx2_nic *nic);
+ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
++void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
++
+ /* CGX/RPM DMAC filters support */
+ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+ int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 4762dbea64a12..97a71e9b85637 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -1088,6 +1088,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ struct ethhdr *eth_hdr;
+ bool new = false;
+ int err = 0;
++ u64 vf_num;
+ u32 ring;
+
+ if (!flow_cfg->max_flows) {
+@@ -1100,7 +1101,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return -ENOMEM;
+
+- if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
++ /* Number of queues on a VF can be greater or less than
++ * the PF's queue. Hence no need to check for the
++ * queue count. Hence no need to check queue count if PF
++ * is installing for its VF. Below is the expected vf_num value
++ * based on the ethtool commands.
++ *
++ * e.g.
++ * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
++ * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
++ * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
++ * vf_num:vf_idx+1
++ */
++ vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
++ if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
++ ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
+@@ -1182,6 +1197,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ flow_cfg->nr_flows++;
+ }
+
++ if (flow->is_vf)
++ netdev_info(pfvf->netdev,
++ "Make sure that VF's queue number is within its queue limit\n");
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 6daf4d58c25d6..532e324bdcc8e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -566,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
+ TYPE_PFVF);
+- vfs -= 64;
++ if (intr)
++ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
++ vfs = 64;
+ }
+
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
+@@ -574,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+
+- trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
++ if (intr)
++ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+
+ return IRQ_HANDLED;
+ }
+@@ -1193,31 +1196,32 @@ static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
+ };
+
+ static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
+- "NIX_SND_STATUS_GOOD",
+- "NIX_SND_STATUS_SQ_CTX_FAULT",
+- "NIX_SND_STATUS_SQ_CTX_POISON",
+- "NIX_SND_STATUS_SQB_FAULT",
+- "NIX_SND_STATUS_SQB_POISON",
+- "NIX_SND_STATUS_HDR_ERR",
+- "NIX_SND_STATUS_EXT_ERR",
+- "NIX_SND_STATUS_JUMP_FAULT",
+- "NIX_SND_STATUS_JUMP_POISON",
+- "NIX_SND_STATUS_CRC_ERR",
+- "NIX_SND_STATUS_IMM_ERR",
+- "NIX_SND_STATUS_SG_ERR",
+- "NIX_SND_STATUS_MEM_ERR",
+- "NIX_SND_STATUS_INVALID_SUBDC",
+- "NIX_SND_STATUS_SUBDC_ORDER_ERR",
+- "NIX_SND_STATUS_DATA_FAULT",
+- "NIX_SND_STATUS_DATA_POISON",
+- "NIX_SND_STATUS_NPC_DROP_ACTION",
+- "NIX_SND_STATUS_LOCK_VIOL",
+- "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
+- "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
+- "NIX_SND_STATUS_NPC_MCAST_ABORT",
+- "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+- "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+- "NIX_SND_STATUS_SEND_STATS_ERR",
++ [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
++ [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
++ [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
++ [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
++ [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
++ [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
++ [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
++ [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
++ [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
++ [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
++ [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
++ [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
++ [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
++ [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
++ [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
++ [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
++ [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
++ [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
++ [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
++ [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
++ [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
++ [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
++ [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
++ [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
++ [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
++ [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
+ };
+
+ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+@@ -1238,14 +1242,16 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ continue;
+
+ if (val & BIT_ULL(42)) {
+- netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
++ netdev_err(pf->netdev,
++ "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ } else {
+ if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ netdev_err(pf->netdev, "CQ%lld: Doorbell error",
+ qidx);
+ if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+- netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
++ netdev_err(pf->netdev,
++ "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ qidx);
+ }
+
+@@ -1272,7 +1278,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ (val & NIX_SQINT_BITS));
+
+ if (val & BIT_ULL(42)) {
+- netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
++ netdev_err(pf->netdev,
++ "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ goto done;
+ }
+@@ -1282,8 +1289,11 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ goto chk_mnq_err_dbg;
+
+ sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
+- netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n",
+- qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
++ netdev_err(pf->netdev,
++ "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n",
++ qidx, sq_op_err_dbg,
++ nix_sqoperr_e_str[sq_op_err_code],
++ sq_op_err_code);
+
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
+
+@@ -1300,16 +1310,21 @@ chk_mnq_err_dbg:
+ goto chk_snd_err_dbg;
+
+ mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
+- netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n",
+- qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]);
++ netdev_err(pf->netdev,
++ "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n",
++ qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code],
++ mnq_err_code);
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
+
+ chk_snd_err_dbg:
+ snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
+ if (snd_err_dbg & BIT(44)) {
+ snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
+- netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
+- qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
++ netdev_err(pf->netdev,
++ "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
++ qidx, snd_err_dbg,
++ nix_snd_status_e_str[snd_err_code],
++ snd_err_code);
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
+ }
+
+@@ -1589,6 +1604,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
+ else
+ otx2_cleanup_tx_cqes(pf, cq);
+ }
++ otx2_free_pending_sqe(pf);
+
+ otx2_free_sq_res(pf);
+
+@@ -1857,6 +1873,8 @@ int otx2_open(struct net_device *netdev)
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
+
++ otx2_tc_apply_ingress_police_rules(pf);
++
+ err = otx2_rxtx_enable(pf, true);
+ /* If a mbox communication error happens at this point then interface
+ * will end up in a state such that it is in down state but hardware
+@@ -1921,6 +1939,8 @@ int otx2_stop(struct net_device *netdev)
+ /* Clear RSS enable flag */
+ rss = &pf->hw.rss_info;
+ rss->enable = false;
++ if (!netif_is_rxfh_configured(netdev))
++ kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+
+ /* Cleanup Queue IRQ */
+ vec = pci_irq_vector(pf->pdev,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+index fa37b9f312cae..4e5899d8fa2e6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+@@ -318,23 +318,23 @@ enum nix_snd_status_e {
+ NIX_SND_STATUS_EXT_ERR = 0x6,
+ NIX_SND_STATUS_JUMP_FAULT = 0x7,
+ NIX_SND_STATUS_JUMP_POISON = 0x8,
+- NIX_SND_STATUS_CRC_ERR = 0x9,
+- NIX_SND_STATUS_IMM_ERR = 0x10,
+- NIX_SND_STATUS_SG_ERR = 0x11,
+- NIX_SND_STATUS_MEM_ERR = 0x12,
+- NIX_SND_STATUS_INVALID_SUBDC = 0x13,
+- NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
+- NIX_SND_STATUS_DATA_FAULT = 0x15,
+- NIX_SND_STATUS_DATA_POISON = 0x16,
+- NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
+- NIX_SND_STATUS_LOCK_VIOL = 0x18,
+- NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
+- NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
+- NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
+- NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
+- NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
+- NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
+- NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
++ NIX_SND_STATUS_CRC_ERR = 0x10,
++ NIX_SND_STATUS_IMM_ERR = 0x11,
++ NIX_SND_STATUS_SG_ERR = 0x12,
++ NIX_SND_STATUS_MEM_ERR = 0x13,
++ NIX_SND_STATUS_INVALID_SUBDC = 0x14,
++ NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15,
++ NIX_SND_STATUS_DATA_FAULT = 0x16,
++ NIX_SND_STATUS_DATA_POISON = 0x17,
++ NIX_SND_STATUS_NPC_DROP_ACTION = 0x20,
++ NIX_SND_STATUS_LOCK_VIOL = 0x21,
++ NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22,
++ NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23,
++ NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24,
++ NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25,
++ NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26,
++ NIX_SND_STATUS_SEND_MEM_FAULT = 0x27,
++ NIX_SND_STATUS_SEND_STATS_ERR = 0x28,
+ NIX_SND_STATUS_MAX,
+ };
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+index fab9d85bfb371..423ce54eaea69 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+@@ -45,6 +45,9 @@ struct otx2_tc_flow {
+ bool is_act_police;
+ u32 prio;
+ struct npc_install_flow_req req;
++ u64 rate;
++ u32 burst;
++ bool is_pps;
+ };
+
+ static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+@@ -282,21 +285,10 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
+ return err;
+ }
+
+-static int otx2_tc_act_set_police(struct otx2_nic *nic,
+- struct otx2_tc_flow *node,
+- struct flow_cls_offload *f,
+- u64 rate, u32 burst, u32 mark,
+- struct npc_install_flow_req *req, bool pps)
++static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
++ struct otx2_tc_flow *node)
+ {
+- struct netlink_ext_ack *extack = f->common.extack;
+- struct otx2_hw *hw = &nic->hw;
+- int rq_idx, rc;
+-
+- rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+- if (rq_idx >= hw->rx_queues) {
+- NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+- return -EINVAL;
+- }
++ int rc;
+
+ mutex_lock(&nic->mbox.lock);
+
+@@ -306,23 +298,17 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ return rc;
+ }
+
+- rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
++ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
++ node->burst, node->rate, node->is_pps);
+ if (rc)
+ goto free_leaf;
+
+- rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
++ rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
+ if (rc)
+ goto free_leaf;
+
+ mutex_unlock(&nic->mbox.lock);
+
+- req->match_id = mark & 0xFFFFULL;
+- req->index = rq_idx;
+- req->op = NIX_RX_ACTIONOP_UCAST;
+- set_bit(rq_idx, &nic->rq_bmap);
+- node->is_act_police = true;
+- node->rq = rq_idx;
+-
+ return 0;
+
+ free_leaf:
+@@ -334,6 +320,39 @@ free_leaf:
+ return rc;
+ }
+
++static int otx2_tc_act_set_police(struct otx2_nic *nic,
++ struct otx2_tc_flow *node,
++ struct flow_cls_offload *f,
++ u64 rate, u32 burst, u32 mark,
++ struct npc_install_flow_req *req, bool pps)
++{
++ struct netlink_ext_ack *extack = f->common.extack;
++ struct otx2_hw *hw = &nic->hw;
++ int rq_idx, rc;
++
++ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
++ if (rq_idx >= hw->rx_queues) {
++ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
++ return -EINVAL;
++ }
++
++ req->match_id = mark & 0xFFFFULL;
++ req->index = rq_idx;
++ req->op = NIX_RX_ACTIONOP_UCAST;
++
++ node->is_act_police = true;
++ node->rq = rq_idx;
++ node->burst = burst;
++ node->rate = rate;
++ node->is_pps = pps;
++
++ rc = otx2_tc_act_set_hw_police(nic, node);
++ if (!rc)
++ set_bit(rq_idx, &nic->rq_bmap);
++
++ return rc;
++}
++
+ static int otx2_tc_parse_actions(struct otx2_nic *nic,
+ struct flow_action *flow_action,
+ struct npc_install_flow_req *req,
+@@ -986,6 +1005,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
+ }
+
+ if (flow_node->is_act_police) {
++ __clear_bit(flow_node->rq, &nic->rq_bmap);
++
++ if (nic->flags & OTX2_FLAG_INTF_DOWN)
++ goto free_mcam_flow;
++
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+@@ -1001,11 +1025,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ flow_node->leaf_profile);
+
+- __clear_bit(flow_node->rq, &nic->rq_bmap);
+-
+ mutex_unlock(&nic->mbox.lock);
+ }
+
++free_mcam_flow:
+ otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
+ otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
+ kfree_rcu(flow_node, rcu);
+@@ -1025,6 +1048,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ return -ENOMEM;
+
++ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
++ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
++ return -EINVAL;
++ }
++
+ if (flow_cfg->nr_flows == flow_cfg->max_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Free MCAM entry not available to add the flow");
+@@ -1384,3 +1412,45 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
+ otx2_destroy_tc_flow_list(nic);
+ }
+ EXPORT_SYMBOL(otx2_shutdown_tc);
++
++static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
++ struct otx2_tc_flow *node)
++{
++ struct npc_install_flow_req *req;
++
++ if (otx2_tc_act_set_hw_police(nic, node))
++ return;
++
++ mutex_lock(&nic->mbox.lock);
++
++ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
++ if (!req)
++ goto err;
++
++ memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
++
++ if (otx2_sync_mbox_msg(&nic->mbox))
++ netdev_err(nic->netdev,
++ "Failed to install MCAM flow entry for ingress rule");
++err:
++ mutex_unlock(&nic->mbox.lock);
++}
++
++void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
++{
++ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
++ struct otx2_tc_flow *node;
++
++ /* If any ingress policer rules exist for the interface then
++ * apply those rules. Ingress policer rules depend on bandwidth
++ * profiles linked to the receive queues. Since no receive queues
++ * exist when interface is down, ingress policer rules are stored
++ * and configured in hardware after all receive queues are allocated
++ * in otx2_open.
++ */
++ list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
++ if (node->is_act_police)
++ otx2_tc_config_ingress_rule(nic, node);
++ }
++}
++EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index 53b2a4ef52985..6ee15f3c25ede 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -1247,9 +1247,11 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
+
+ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ {
++ int tx_pkts = 0, tx_bytes = 0;
+ struct sk_buff *skb = NULL;
+ struct otx2_snd_queue *sq;
+ struct nix_cqe_tx_s *cqe;
++ struct netdev_queue *txq;
+ int processed_cqe = 0;
+ struct sg_list *sg;
+ int qidx;
+@@ -1270,12 +1272,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ sg = &sq->sg[cqe->comp.sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (skb) {
++ tx_bytes += skb->len;
++ tx_pkts++;
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ dev_kfree_skb_any(skb);
+ sg->skb = (u64)NULL;
+ }
+ }
+
++ if (likely(tx_pkts)) {
++ if (qidx >= pfvf->hw.tx_queues)
++ qidx -= pfvf->hw.xdp_queues;
++ txq = netdev_get_tx_queue(pfvf->netdev, qidx);
++ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
++ }
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+@@ -1302,6 +1312,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
+ return err;
+ }
+
++void otx2_free_pending_sqe(struct otx2_nic *pfvf)
++{
++ int tx_pkts = 0, tx_bytes = 0;
++ struct sk_buff *skb = NULL;
++ struct otx2_snd_queue *sq;
++ struct netdev_queue *txq;
++ struct sg_list *sg;
++ int sq_idx, sqe;
++
++ for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
++ sq = &pfvf->qset.sq[sq_idx];
++ for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
++ sg = &sq->sg[sqe];
++ skb = (struct sk_buff *)sg->skb;
++ if (skb) {
++ tx_bytes += skb->len;
++ tx_pkts++;
++ otx2_dma_unmap_skb_frags(pfvf, sg);
++ dev_kfree_skb_any(skb);
++ sg->skb = (u64)NULL;
++ }
++ }
++
++ if (!tx_pkts)
++ continue;
++ txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
++ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
++ tx_pkts = 0;
++ tx_bytes = 0;
++ }
++}
++
+ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ int len, int *offset)
+ {
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+index 47ea69feb3b24..f87ab9b8a5901 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -64,8 +64,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+index bb11e644d24f7..af3928eddafd1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+@@ -177,6 +177,8 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
+
+ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
+ struct mlx5_cqe64 *cqe,
++ u8 *md_buff,
++ u8 *md_buff_sz,
+ int budget)
+ {
+ struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
+@@ -211,19 +213,24 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
+ mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
+ out:
+ napi_consume_skb(skb, budget);
+- mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
++ md_buff[*md_buff_sz++] = metadata_id;
+ if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
+ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
+ }
+
+-static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
++static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
+ {
+ struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
+- struct mlx5_cqwq *cqwq = &cq->wq;
++ int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
++ u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
++ u8 metadata_buff_sz = 0;
++ struct mlx5_cqwq *cqwq;
+ struct mlx5_cqe64 *cqe;
+ int work_done = 0;
+
++ cqwq = &cq->wq;
++
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
+ return false;
+
+@@ -234,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+ do {
+ mlx5_cqwq_pop(cqwq);
+
+- mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
++ mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
++ metadata_buff, &metadata_buff_sz, napi_budget);
+ } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
+
+ mlx5_cqwq_update_db_record(cqwq);
+@@ -242,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
++ while (metadata_buff_sz > 0)
++ mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
++ metadata_buff[--metadata_buff_sz]);
++
+ mlx5e_txqsq_wake(&ptpsq->txqsq);
+
+ return work_done == budget;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+index e8eea9ffd5eb6..03b119a434bc9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+@@ -702,11 +702,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
+
+ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+ {
+- char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_icosq *icosq = rq->icosq;
+ struct mlx5e_priv *priv = rq->priv;
+ struct mlx5e_err_ctx err_ctx = {};
++ char icosq_str[32] = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+@@ -715,7 +715,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+ if (icosq)
+ snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
+ snprintf(err_str, sizeof(err_str),
+- "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
++ "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
+ rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+index 00a04fdd756f5..668da5c70e63d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -300,9 +300,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ if (err)
+ goto destroy_neigh_entry;
+
+- e->encap_size = ipv4_encap_size;
+- e->encap_header = encap_header;
+-
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+@@ -322,6 +319,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ goto destroy_neigh_entry;
+ }
+
++ e->encap_size = ipv4_encap_size;
++ e->encap_header = encap_header;
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv4_put(&attr);
+@@ -404,16 +403,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ if (err)
+ goto free_encap;
+
+- e->encap_size = ipv4_encap_size;
+- kfree(e->encap_header);
+- e->encap_header = encap_header;
+-
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
+- goto release_neigh;
++ goto free_encap;
+ }
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -427,6 +422,10 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ goto free_encap;
+ }
+
++ e->encap_size = ipv4_encap_size;
++ kfree(e->encap_header);
++ e->encap_header = encap_header;
++
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv4_put(&attr);
+@@ -568,9 +567,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ if (err)
+ goto destroy_neigh_entry;
+
+- e->encap_size = ipv6_encap_size;
+- e->encap_header = encap_header;
+-
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+@@ -590,6 +586,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ goto destroy_neigh_entry;
+ }
+
++ e->encap_size = ipv6_encap_size;
++ e->encap_header = encap_header;
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv6_put(&attr);
+@@ -671,16 +669,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ if (err)
+ goto free_encap;
+
+- e->encap_size = ipv6_encap_size;
+- kfree(e->encap_header);
+- e->encap_header = encap_header;
+-
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
+- goto release_neigh;
++ goto free_encap;
+ }
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -694,6 +688,10 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ goto free_encap;
+ }
+
++ e->encap_size = ipv6_encap_size;
++ kfree(e->encap_header);
++ e->encap_header = encap_header;
++
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv6_put(&attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index dff02434ff458..7c66bd73ddfa2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ struct ethtool_drvinfo *drvinfo)
+ {
+ struct mlx5_core_dev *mdev = priv->mdev;
++ int count;
+
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+- "%d.%d.%04d (%.16s)",
+- fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
+- mdev->board_id);
++ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
++ if (count == sizeof(drvinfo->fw_version))
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev));
++
+ strscpy(drvinfo->bus_info, dev_name(mdev->device),
+ sizeof(drvinfo->bus_info));
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index fd1cce542b680..825f9c687633f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
++ int count;
+
+ strscpy(drvinfo->driver, mlx5e_rep_driver_name,
+ sizeof(drvinfo->driver));
+- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+- "%d.%d.%04d (%.16s)",
+- fw_rev_maj(mdev), fw_rev_min(mdev),
+- fw_rev_sub(mdev), mdev->board_id);
++ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
++ if (count == sizeof(drvinfo->fw_version))
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev));
+ }
+
+ static const struct counter_desc sw_rep_stats_desc[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index c8590483ddc64..b62fd37493410 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -3145,7 +3145,7 @@ static struct mlx5_fields fields[] = {
+ OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
+ OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
+- OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
++ OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp),
+
+ OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
+ OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
+@@ -3156,21 +3156,31 @@ static struct mlx5_fields fields[] = {
+ OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
+ };
+
+-static unsigned long mask_to_le(unsigned long mask, int size)
++static u32 mask_field_get(void *mask, struct mlx5_fields *f)
+ {
+- __be32 mask_be32;
+- __be16 mask_be16;
+-
+- if (size == 32) {
+- mask_be32 = (__force __be32)(mask);
+- mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
+- } else if (size == 16) {
+- mask_be32 = (__force __be32)(mask);
+- mask_be16 = *(__be16 *)&mask_be32;
+- mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
++ switch (f->field_bsize) {
++ case 32:
++ return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
++ case 16:
++ return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
++ default:
++ return *(u8 *)mask & (u8)f->field_mask;
+ }
++}
+
+- return mask;
++static void mask_field_clear(void *mask, struct mlx5_fields *f)
++{
++ switch (f->field_bsize) {
++ case 32:
++ *(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
++ break;
++ case 16:
++ *(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
++ break;
++ default:
++ *(u8 *)mask &= ~(u8)f->field_mask;
++ break;
++ }
+ }
+
+ static int offload_pedit_fields(struct mlx5e_priv *priv,
+@@ -3182,11 +3192,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ struct pedit_headers_action *hdrs = parse_attr->hdrs;
+ void *headers_c, *headers_v, *action, *vals_p;
+- u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
+- unsigned long mask, field_mask;
++ void *s_masks_p, *a_masks_p;
+ int i, first, last, next_z;
+ struct mlx5_fields *f;
++ unsigned long mask;
++ u32 s_mask, a_mask;
+ u8 cmd;
+
+ mod_acts = &parse_attr->mod_hdr_acts;
+@@ -3202,15 +3213,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ bool skip;
+
+ f = &fields[i];
+- /* avoid seeing bits set from previous iterations */
+- s_mask = 0;
+- a_mask = 0;
+-
+ s_masks_p = (void *)set_masks + f->offset;
+ a_masks_p = (void *)add_masks + f->offset;
+
+- s_mask = *s_masks_p & f->field_mask;
+- a_mask = *a_masks_p & f->field_mask;
++ s_mask = mask_field_get(s_masks_p, f);
++ a_mask = mask_field_get(a_masks_p, f);
+
+ if (!s_mask && !a_mask) /* nothing to offload here */
+ continue;
+@@ -3237,22 +3244,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ match_mask, f->field_bsize))
+ skip = true;
+ /* clear to denote we consumed this field */
+- *s_masks_p &= ~f->field_mask;
++ mask_field_clear(s_masks_p, f);
+ } else {
+ cmd = MLX5_ACTION_TYPE_ADD;
+ mask = a_mask;
+ vals_p = (void *)add_vals + f->offset;
+ /* add 0 is no change */
+- if ((*(u32 *)vals_p & f->field_mask) == 0)
++ if (!mask_field_get(vals_p, f))
+ skip = true;
+ /* clear to denote we consumed this field */
+- *a_masks_p &= ~f->field_mask;
++ mask_field_clear(a_masks_p, f);
+ }
+ if (skip)
+ continue;
+
+- mask = mask_to_le(mask, f->field_bsize);
+-
+ first = find_first_bit(&mask, f->field_bsize);
+ next_z = find_next_zero_bit(&mask, f->field_bsize, first);
+ last = find_last_bit(&mask, f->field_bsize);
+@@ -3279,10 +3284,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ MLX5_SET(set_action_in, action, field, f->field);
+
+ if (cmd == MLX5_ACTION_TYPE_SET) {
++ unsigned long field_mask = f->field_mask;
+ int start;
+
+- field_mask = mask_to_le(f->field_mask, f->field_bsize);
+-
+ /* if field is bit sized it can start not from first bit */
+ start = find_first_bit(&field_mask, f->field_bsize);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index d41435c22ce56..f0b506e562df3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -399,9 +399,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
+
+ mlx5e_skb_cb_hwtstamp_init(skb);
+- mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
+ mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
+ metadata_index);
++ mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
+ if (!netif_tx_queue_stopped(sq->txq) &&
+ mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
+ netif_tx_stop_queue(sq->txq);
+@@ -494,10 +494,10 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+
+ err_drop:
+ stats->dropped++;
+- dev_kfree_skb_any(skb);
+ if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
+ be32_to_cpu(eseg->flow_table_metadata));
++ dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index ea0405e0a43fa..40a6cb052a2da 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -885,11 +885,14 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
+ {
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_irq *irq;
++ int cpu;
+
+ irq = xa_load(&table->comp_irqs, vecidx);
+ if (!irq)
+ return;
+
++ cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
++ cpumask_clear_cpu(cpu, &table->used_cpus);
+ xa_erase(&table->comp_irqs, vecidx);
+ mlx5_irq_affinity_irq_release(dev, irq);
+ }
+@@ -897,16 +900,26 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
+ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
+ {
+ struct mlx5_eq_table *table = dev->priv.eq_table;
++ struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
++ struct irq_affinity_desc af_desc = {};
+ struct mlx5_irq *irq;
+
+- irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
+- if (IS_ERR(irq)) {
+- /* In case SF irq pool does not exist, fallback to the PF irqs*/
+- if (PTR_ERR(irq) == -ENOENT)
+- return comp_irq_request_pci(dev, vecidx);
++ /* In case SF irq pool does not exist, fallback to the PF irqs*/
++ if (!mlx5_irq_pool_is_sf_pool(pool))
++ return comp_irq_request_pci(dev, vecidx);
+
++ af_desc.is_managed = 1;
++ cpumask_copy(&af_desc.mask, cpu_online_mask);
++ cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
++ irq = mlx5_irq_affinity_request(pool, &af_desc);
++ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+- }
++
++ cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
++ mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
++ pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
++ cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
++ mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+
+ return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index b296ac52a4397..88236e75fd901 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -984,7 +984,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+- if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
++ if (rep->vport == MLX5_VPORT_UPLINK &&
++ on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
+ dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+index 047d5fed5f89e..612e666ec2635 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+@@ -168,45 +168,3 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
+ if (pool->irqs_per_cpu)
+ cpu_put(pool, cpu);
+ }
+-
+-/**
+- * mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
+- * @dev: mlx5 device that is requesting the IRQ.
+- * @used_cpus: cpumask of bounded cpus by the device
+- * @vecidx: vector index to request an IRQ for.
+- *
+- * Each IRQ is bounded to at most 1 CPU.
+- * This function is requesting an IRQ according to the default assignment.
+- * The default assignment policy is:
+- * - request the least loaded IRQ which is not bound to any
+- * CPU of the previous IRQs requested.
+- *
+- * On success, this function updates used_cpus mask and returns an irq pointer.
+- * In case of an error, an appropriate error pointer is returned.
+- */
+-struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+- struct cpumask *used_cpus, u16 vecidx)
+-{
+- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+- struct irq_affinity_desc af_desc = {};
+- struct mlx5_irq *irq;
+-
+- if (!mlx5_irq_pool_is_sf_pool(pool))
+- return ERR_PTR(-ENOENT);
+-
+- af_desc.is_managed = 1;
+- cpumask_copy(&af_desc.mask, cpu_online_mask);
+- cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
+- irq = mlx5_irq_affinity_request(pool, &af_desc);
+-
+- if (IS_ERR(irq))
+- return irq;
+-
+- cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
+- mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
+- pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
+- cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
+- mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+-
+- return irq;
+-}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index aa29f09e83564..0c83ef174275a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -384,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+
+ static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
+ {
+- return mlx5_ptp_adjtime(ptp, delta);
++ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
++ struct mlx5_core_dev *mdev;
++
++ mdev = container_of(clock, struct mlx5_core_dev, clock);
++
++ return mlx5_ptp_adjtime_real_time(mdev, delta);
+ }
+
+ static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index 653648216730a..4dcf995cb1a20 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -28,7 +28,7 @@
+ struct mlx5_irq {
+ struct atomic_notifier_head nh;
+ cpumask_var_t mask;
+- char name[MLX5_MAX_IRQ_NAME];
++ char name[MLX5_MAX_IRQ_FORMATTED_NAME];
+ struct mlx5_irq_pool *pool;
+ int refcount;
+ struct msi_map map;
+@@ -292,8 +292,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
+ else
+ irq_sf_set_name(pool, name, i);
+ ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
+- snprintf(irq->name, MLX5_MAX_IRQ_NAME,
+- "%s@pci:%s", name, pci_name(dev->pdev));
++ snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
++ MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
+ err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
+ &irq->nh);
+ if (err) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+index d3a77a0ab8488..c4d377f8df308 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+@@ -7,6 +7,9 @@
+ #include <linux/mlx5/driver.h>
+
+ #define MLX5_MAX_IRQ_NAME (32)
++#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
++#define MLX5_MAX_IRQ_FORMATTED_NAME \
++ (MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
+ /* max irq_index is 2047, so four chars */
+ #define MLX5_MAX_IRQ_IDX_CHARS (4)
+ #define MLX5_EQ_REFS_PER_IRQ (2)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+index 4e8527a724f50..6fa06ba2d3465 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+@@ -52,7 +52,6 @@ struct dr_qp_init_attr {
+ u32 cqn;
+ u32 pdn;
+ u32 max_send_wr;
+- u32 max_send_sge;
+ struct mlx5_uars_page *uar;
+ u8 isolate_vl_tc:1;
+ };
+@@ -247,37 +246,6 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
+ return err == CQ_POLL_ERR ? err : npolled;
+ }
+
+-static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr)
+-{
+- return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) +
+- sizeof(struct mlx5_wqe_flow_update_ctrl_seg) +
+- sizeof(struct mlx5_wqe_header_modify_argument_update_seg));
+-}
+-
+-/* We calculate for specific RC QP with the required functionality */
+-static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr)
+-{
+- int update_arg_size;
+- int inl_size = 0;
+- int tot_size;
+- int size;
+-
+- update_arg_size = dr_qp_get_args_update_send_wqe_size(attr);
+-
+- size = sizeof(struct mlx5_wqe_ctrl_seg) +
+- sizeof(struct mlx5_wqe_raddr_seg);
+- inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) +
+- DR_STE_SIZE, 16);
+-
+- size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg);
+-
+- size = max(size, update_arg_size);
+-
+- tot_size = max(size, inl_size);
+-
+- return ALIGN(tot_size, MLX5_SEND_WQE_BB);
+-}
+-
+ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
+ struct dr_qp_init_attr *attr)
+ {
+@@ -285,7 +253,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
+ u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
+ struct mlx5_wq_param wqp;
+ struct mlx5dr_qp *dr_qp;
+- int wqe_size;
+ int inlen;
+ void *qpc;
+ void *in;
+@@ -365,15 +332,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
+ if (err)
+ goto err_in;
+ dr_qp->uar = attr->uar;
+- wqe_size = dr_qp_calc_rc_send_wqe(attr);
+- dr_qp->max_inline_data = min(wqe_size -
+- (sizeof(struct mlx5_wqe_ctrl_seg) +
+- sizeof(struct mlx5_wqe_raddr_seg) +
+- sizeof(struct mlx5_wqe_inline_seg)),
+- (2 * MLX5_SEND_WQE_BB -
+- (sizeof(struct mlx5_wqe_ctrl_seg) +
+- sizeof(struct mlx5_wqe_raddr_seg) +
+- sizeof(struct mlx5_wqe_inline_seg))));
+
+ return dr_qp;
+
+@@ -437,48 +395,8 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+ MLX5_SEND_WQE_DS;
+ }
+
+-static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp,
+- struct dr_data_seg *data_seg, void *wqe)
+-{
+- int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) +
+- sizeof(struct mlx5_wqe_raddr_seg) +
+- sizeof(struct mlx5_wqe_inline_seg);
+- struct mlx5_wqe_inline_seg *seg;
+- int left_space;
+- int inl = 0;
+- void *addr;
+- int len;
+- int idx;
+-
+- seg = wqe;
+- wqe += sizeof(*seg);
+- addr = (void *)(unsigned long)(data_seg->addr);
+- len = data_seg->length;
+- inl += len;
+- left_space = MLX5_SEND_WQE_BB - inline_header_size;
+-
+- if (likely(len > left_space)) {
+- memcpy(wqe, addr, left_space);
+- len -= left_space;
+- addr += left_space;
+- idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1);
+- wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
+- }
+-
+- memcpy(wqe, addr, len);
+-
+- if (likely(inl)) {
+- seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
+- return DIV_ROUND_UP(inl + sizeof(seg->byte_count),
+- MLX5_SEND_WQE_DS);
+- } else {
+- return 0;
+- }
+-}
+-
+ static void
+-dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
+- struct mlx5_wqe_ctrl_seg *wq_ctrl,
++dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+ u64 remote_addr,
+ u32 rkey,
+ struct dr_data_seg *data_seg,
+@@ -494,17 +412,15 @@ dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
+ wq_raddr->reserved = 0;
+
+ wq_dseg = (void *)(wq_raddr + 1);
+- /* WQE ctrl segment + WQE remote addr segment */
+- *size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS;
+
+- if (data_seg->send_flags & IB_SEND_INLINE) {
+- *size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg);
+- } else {
+- wq_dseg->byte_count = cpu_to_be32(data_seg->length);
+- wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
+- wq_dseg->addr = cpu_to_be64(data_seg->addr);
+- *size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */
+- }
++ wq_dseg->byte_count = cpu_to_be32(data_seg->length);
++ wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
++ wq_dseg->addr = cpu_to_be64(data_seg->addr);
++
++ *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
++ sizeof(*wq_dseg) + /* WQE data segment */
++ sizeof(*wq_raddr)) / /* WQE remote addr segment */
++ MLX5_SEND_WQE_DS;
+ }
+
+ static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+@@ -535,7 +451,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
+ switch (opcode) {
+ case MLX5_OPCODE_RDMA_READ:
+ case MLX5_OPCODE_RDMA_WRITE:
+- dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr,
++ dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
+ rkey, data_seg, &size);
+ break;
+ case MLX5_OPCODE_FLOW_TBL_ACCESS:
+@@ -656,7 +572,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+ send_info->write.send_flags |= IB_SEND_SIGNALED;
+ else
+- send_info->write.send_flags &= ~IB_SEND_SIGNALED;
++ send_info->write.send_flags = 0;
+ }
+
+ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
+@@ -680,13 +596,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
+ }
+
+ send_ring->pending_wqe++;
+- if (!send_info->write.lkey)
+- send_info->write.send_flags |= IB_SEND_INLINE;
+
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+ send_info->write.send_flags |= IB_SEND_SIGNALED;
+- else
+- send_info->write.send_flags &= ~IB_SEND_SIGNALED;
+
+ send_ring->pending_wqe++;
+ send_info->read.length = send_info->write.length;
+@@ -696,9 +608,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
+ send_info->read.lkey = send_ring->sync_mr->mkey;
+
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+- send_info->read.send_flags |= IB_SEND_SIGNALED;
++ send_info->read.send_flags = IB_SEND_SIGNALED;
+ else
+- send_info->read.send_flags &= ~IB_SEND_SIGNALED;
++ send_info->read.send_flags = 0;
+ }
+
+ static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
+@@ -1345,7 +1257,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
+ dmn->send_ring->cq->qp = dmn->send_ring->qp;
+
+ dmn->info.max_send_wr = QUEUE_SIZE;
+- init_attr.max_send_sge = 1;
+ dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
+ DR_STE_SIZE);
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+index e2aced7ab4547..95f63fcf4ba1f 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+@@ -496,7 +496,7 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
+ * is 2^ACL_MAX_BF_LOG
+ */
+ bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
+- bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks),
++ bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)),
+ GFP_KERNEL);
+ if (!bf)
+ return ERR_PTR(-ENOMEM);
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 361b90007148b..62cabeeb842a1 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -579,6 +579,7 @@ struct rtl8169_tc_offsets {
+ enum rtl_flag {
+ RTL_FLAG_TASK_ENABLED = 0,
+ RTL_FLAG_TASK_RESET_PENDING,
++ RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
+ RTL_FLAG_TASK_TX_TIMEOUT,
+ RTL_FLAG_MAX
+ };
+@@ -624,6 +625,7 @@ struct rtl8169_private {
+
+ unsigned supports_gmii:1;
+ unsigned aspm_manageable:1;
++ unsigned dash_enabled:1;
+ dma_addr_t counters_phys_addr;
+ struct rtl8169_counters *counters;
+ struct rtl8169_tc_offsets tc_offset;
+@@ -1253,14 +1255,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
+ return r8168ep_ocp_read(tp, 0x128) & BIT(0);
+ }
+
+-static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
++static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
++{
++ switch (tp->dash_type) {
++ case RTL_DASH_DP:
++ return r8168dp_check_dash(tp);
++ case RTL_DASH_EP:
++ return r8168ep_check_dash(tp);
++ default:
++ return false;
++ }
++}
++
++static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
+ {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+- return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
++ return RTL_DASH_DP;
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+- return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
++ return RTL_DASH_EP;
+ default:
+ return RTL_DASH_NONE;
+ }
+@@ -1453,7 +1467,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+
+ device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+
+- if (tp->dash_type == RTL_DASH_NONE) {
++ if (!tp->dash_enabled) {
+ rtl_set_d3_pll_down(tp, !wolopts);
+ tp->dev->wol_enabled = wolopts ? 1 : 0;
+ }
+@@ -2512,7 +2526,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
+
+ static void rtl_prepare_power_down(struct rtl8169_private *tp)
+ {
+- if (tp->dash_type != RTL_DASH_NONE)
++ if (tp->dash_enabled)
+ return;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+@@ -2582,6 +2596,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_mode |= AcceptAllPhys;
++ } else if (!(dev->flags & IFF_MULTICAST)) {
++ rx_mode &= ~AcceptMulticast;
+ } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ dev->flags & IFF_ALLMULTI ||
+ tp->mac_version == RTL_GIGA_MAC_VER_35) {
+@@ -4567,6 +4583,8 @@ static void rtl_task(struct work_struct *work)
+ reset:
+ rtl_reset_work(tp);
+ netif_wake_queue(tp->dev);
++ } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
++ rtl_reset_work(tp);
+ }
+ out_unlock:
+ rtnl_unlock();
+@@ -4596,7 +4614,11 @@ static void r8169_phylink_handler(struct net_device *ndev)
+ if (netif_carrier_ok(ndev)) {
+ rtl_link_chg_patch(tp);
+ pm_request_resume(d);
++ netif_wake_queue(tp->dev);
+ } else {
++ /* In few cases rx is broken after link-down otherwise */
++ if (rtl_is_8125(tp))
++ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
+ pm_runtime_idle(d);
+ }
+
+@@ -4640,10 +4662,16 @@ static void rtl8169_down(struct rtl8169_private *tp)
+ rtl8169_cleanup(tp);
+ rtl_disable_exit_l1(tp);
+ rtl_prepare_power_down(tp);
++
++ if (tp->dash_type != RTL_DASH_NONE)
++ rtl8168_driver_stop(tp);
+ }
+
+ static void rtl8169_up(struct rtl8169_private *tp)
+ {
++ if (tp->dash_type != RTL_DASH_NONE)
++ rtl8168_driver_start(tp);
++
+ pci_set_master(tp->pci_dev);
+ phy_init_hw(tp->phydev);
+ phy_resume(tp->phydev);
+@@ -4666,7 +4694,7 @@ static int rtl8169_close(struct net_device *dev)
+ rtl8169_down(tp);
+ rtl8169_rx_clear(tp);
+
+- cancel_work_sync(&tp->wk.work);
++ cancel_work(&tp->wk.work);
+
+ free_irq(tp->irq, tp);
+
+@@ -4861,7 +4889,7 @@ static int rtl8169_runtime_idle(struct device *device)
+ {
+ struct rtl8169_private *tp = dev_get_drvdata(device);
+
+- if (tp->dash_type != RTL_DASH_NONE)
++ if (tp->dash_enabled)
+ return -EBUSY;
+
+ if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
+@@ -4887,8 +4915,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
+ /* Restore original MAC address */
+ rtl_rar_set(tp, tp->dev->perm_addr);
+
+- if (system_state == SYSTEM_POWER_OFF &&
+- tp->dash_type == RTL_DASH_NONE) {
++ if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
+ pci_wake_from_d3(pdev, tp->saved_wolopts);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+@@ -4901,6 +4928,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
++ cancel_work_sync(&tp->wk.work);
++
+ unregister_netdev(tp->dev);
+
+ if (tp->dash_type != RTL_DASH_NONE)
+@@ -5246,7 +5275,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ tp->aspm_manageable = !rc;
+
+- tp->dash_type = rtl_check_dash(tp);
++ tp->dash_type = rtl_get_dash_type(tp);
++ tp->dash_enabled = rtl_dash_is_enabled(tp);
+
+ tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
+
+@@ -5317,7 +5347,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* configure chip for default features */
+ rtl8169_set_features(dev, dev->features);
+
+- if (tp->dash_type == RTL_DASH_NONE) {
++ if (!tp->dash_enabled) {
+ rtl_set_d3_pll_down(tp, true);
+ } else {
+ rtl_set_d3_pll_down(tp, false);
+@@ -5357,7 +5387,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ "ok" : "ko");
+
+ if (tp->dash_type != RTL_DASH_NONE) {
+- netdev_info(dev, "DASH enabled\n");
++ netdev_info(dev, "DASH %s\n",
++ tp->dash_enabled ? "enabled" : "disabled");
+ rtl8168_driver_start(tp);
+ }
+
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 0ef0b88b71459..bb56cf4090423 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -515,6 +515,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
+ {
+ struct ravb_private *priv = netdev_priv(ndev);
+
++ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
++ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
++ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
++ } else {
++ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
++ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
++ CXR31_SEL_LINK0);
++ }
++
+ /* Receive frame limit set register */
+ ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+
+@@ -537,14 +546,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
+-
+- if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
+- ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
+- } else {
+- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
+- CXR31_SEL_LINK0);
+- }
+ }
+
+ static void ravb_emac_init_rcar(struct net_device *ndev)
+@@ -1811,19 +1812,20 @@ static int ravb_open(struct net_device *ndev)
+ if (info->gptp)
+ ravb_ptp_init(ndev, priv->pdev);
+
+- netif_tx_start_all_queues(ndev);
+-
+ /* PHY control start */
+ error = ravb_phy_start(ndev);
+ if (error)
+ goto out_ptp_stop;
+
++ netif_tx_start_all_queues(ndev);
++
+ return 0;
+
+ out_ptp_stop:
+ /* Stop PTP Clock driver */
+ if (info->gptp)
+ ravb_ptp_stop(ndev);
++ ravb_stop_dma(ndev);
+ out_free_irq_mgmta:
+ if (!info->multi_irqs)
+ goto out_free_irq;
+@@ -1874,6 +1876,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+ struct net_device *ndev = priv->ndev;
+ int error;
+
++ if (!rtnl_trylock()) {
++ usleep_range(1000, 2000);
++ schedule_work(&priv->work);
++ return;
++ }
++
+ netif_tx_stop_all_queues(ndev);
+
+ /* Stop PTP Clock driver */
+@@ -1907,7 +1915,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+ */
+ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+- return;
++ goto out_unlock;
+ }
+ ravb_emac_init(ndev);
+
+@@ -1917,6 +1925,9 @@ out:
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
++
++out_unlock:
++ rtnl_unlock();
+ }
+
+ /* Packet transmit function for Ethernet AVB */
+@@ -2645,9 +2656,14 @@ static int ravb_probe(struct platform_device *pdev)
+ ndev->features = info->net_features;
+ ndev->hw_features = info->net_hw_features;
+
+- reset_control_deassert(rstc);
++ error = reset_control_deassert(rstc);
++ if (error)
++ goto out_free_netdev;
++
+ pm_runtime_enable(&pdev->dev);
+- pm_runtime_get_sync(&pdev->dev);
++ error = pm_runtime_resume_and_get(&pdev->dev);
++ if (error < 0)
++ goto out_rpm_disable;
+
+ if (info->multi_irqs) {
+ if (info->err_mgmt_irqs)
+@@ -2872,11 +2888,12 @@ out_disable_gptp_clk:
+ out_disable_refclk:
+ clk_disable_unprepare(priv->refclk);
+ out_release:
+- free_netdev(ndev);
+-
+ pm_runtime_put(&pdev->dev);
++out_rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(rstc);
++out_free_netdev:
++ free_netdev(ndev);
+ return error;
+ }
+
+@@ -2886,22 +2903,26 @@ static int ravb_remove(struct platform_device *pdev)
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+- /* Stop PTP Clock driver */
+- if (info->ccc_gac)
+- ravb_ptp_stop(ndev);
+-
+- clk_disable_unprepare(priv->gptp_clk);
+- clk_disable_unprepare(priv->refclk);
+-
+- /* Set reset mode */
+- ravb_write(ndev, CCC_OPC_RESET, CCC);
+ unregister_netdev(ndev);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
++
+ ravb_mdio_release(priv);
++
++ /* Stop PTP Clock driver */
++ if (info->ccc_gac)
++ ravb_ptp_stop(ndev);
++
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
++
++ /* Set reset mode */
++ ravb_write(ndev, CCC_OPC_RESET, CCC);
++
++ clk_disable_unprepare(priv->gptp_clk);
++ clk_disable_unprepare(priv->refclk);
++
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(priv->rstc);
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index 0fc0b6bea7530..ae9d8722b76f7 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -1501,8 +1501,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ {
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_gwca_queue *gq = rdev->tx_queue;
++ netdev_tx_t ret = NETDEV_TX_OK;
+ struct rswitch_ext_desc *desc;
+- int ret = NETDEV_TX_OK;
+ dma_addr_t dma_addr;
+
+ if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
+@@ -1514,10 +1514,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ return ret;
+
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
+- if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
+- dev_kfree_skb_any(skb);
+- return ret;
+- }
++ if (dma_mapping_error(ndev->dev.parent, dma_addr))
++ goto err_kfree;
+
+ gq->skbs[gq->cur] = skb;
+ desc = &gq->tx_ring[gq->cur];
+@@ -1530,10 +1528,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ struct rswitch_gwca_ts_info *ts_info;
+
+ ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
+- if (!ts_info) {
+- dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
+- return -ENOMEM;
+- }
++ if (!ts_info)
++ goto err_unmap;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ rdev->ts_tag++;
+@@ -1555,6 +1551,14 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ gq->cur = rswitch_next_queue_index(gq, true, 1);
+ rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
+
++ return ret;
++
++err_unmap:
++ dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
++
++err_kfree:
++ dev_kfree_skb_any(skb);
++
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+index 7a8f47e7b728b..a4e8b498dea96 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+@@ -259,7 +259,7 @@
+ ((val) << XGMAC_PPS_MINIDX(x))
+ #define XGMAC_PPSCMD_START 0x2
+ #define XGMAC_PPSCMD_STOP 0x5
+-#define XGMAC_PPSEN0 BIT(4)
++#define XGMAC_PPSENx(x) BIT(4 + (x) * 8)
+ #define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
+ #define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
+ #define XGMAC_TRGTBUSY0 BIT(31)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index f352be269deb5..453e88b75be08 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -1178,7 +1178,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
+
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
+- val |= XGMAC_PPSEN0;
++
++ /* XGMAC Core has 4 PPS outputs at most.
++ *
++ * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
++ * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
++ * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
++ * read-only reserved to 0.
++ * But we always set PPSEN{1,2,3} do not make things worse ;-)
++ *
++ * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
++ * be set, or the PPS outputs stay in Fixed PPS mode by default.
++ */
++ val |= XGMAC_PPSENx(index);
+
+ writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+index ea4910ae0921a..6a7c1d325c464 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+@@ -177,8 +177,10 @@
+ #define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
+ #define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+
++#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
+ #define MMC_XGMAC_TX_FPE_FRAG 0x208
+ #define MMC_XGMAC_TX_HOLD_REQ 0x20c
++#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
+ #define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
+ #define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
+ #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
+@@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+ {
+ writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
+ writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
++ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
++ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 5801f4d50f951..1fa4da96c8f50 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -5267,6 +5267,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+
+ dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+ buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
++ limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
+
+ if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+@@ -5302,10 +5303,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ len = 0;
+ }
+
++read_again:
+ if (count >= limit)
+ break;
+
+-read_again:
+ buf1_len = 0;
+ buf2_len = 0;
+ entry = next_entry;
+diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
+index 4cf2a52e43783..3025e9c189702 100644
+--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
+@@ -177,7 +177,7 @@ static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ writel(upper_32_bits(ns), iep->base +
+ iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+- writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
++ writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+ }
+
+ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index 4914d0ef58e9b..c09ecb3da7723 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -2050,7 +2050,7 @@ static int prueth_probe(struct platform_device *pdev)
+ &prueth->shram);
+ if (ret) {
+ dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
+- pruss_put(prueth->pruss);
++ goto put_pruss;
+ }
+
+ prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
+@@ -2092,10 +2092,7 @@ static int prueth_probe(struct platform_device *pdev)
+ prueth->iep1 = icss_iep_get_idx(np, 1);
+ if (IS_ERR(prueth->iep1)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
+- icss_iep_put(prueth->iep0);
+- prueth->iep0 = NULL;
+- prueth->iep1 = NULL;
+- goto free_pool;
++ goto put_iep0;
+ }
+
+ if (prueth->pdata.quirk_10m_link_issue) {
+@@ -2185,6 +2182,12 @@ netdev_exit:
+ exit_iep:
+ if (prueth->pdata.quirk_10m_link_issue)
+ icss_iep_exit_fw(prueth->iep1);
++ icss_iep_put(prueth->iep1);
++
++put_iep0:
++ icss_iep_put(prueth->iep0);
++ prueth->iep0 = NULL;
++ prueth->iep1 = NULL;
+
+ free_pool:
+ gen_pool_free(prueth->sram_pool,
+@@ -2192,6 +2195,8 @@ free_pool:
+
+ put_mem:
+ pruss_release_mem_region(prueth->pruss, &prueth->shram);
++
++put_pruss:
+ pruss_put(prueth->pruss);
+
+ put_cores:
+diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
+index 50d7eacfec582..87e67121477cb 100644
+--- a/drivers/net/ethernet/toshiba/spider_net.c
++++ b/drivers/net/ethernet/toshiba/spider_net.c
+@@ -2332,7 +2332,7 @@ spider_net_alloc_card(void)
+ struct spider_net_card *card;
+
+ netdev = alloc_etherdev(struct_size(card, darray,
+- tx_descriptors + rx_descriptors));
++ size_add(tx_descriptors, rx_descriptors)));
+ if (!netdev)
+ return NULL;
+
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+index 85dc16faca544..52130df26aee5 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+@@ -1677,10 +1677,12 @@ int wx_sw_init(struct wx *wx)
+ wx->subsystem_device_id = pdev->subsystem_device;
+ } else {
+ err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
+- if (!err)
+- wx->subsystem_device_id = swab16((u16)ssid);
++ if (err < 0) {
++ wx_err(wx, "read of internal subsystem device id failed\n");
++ return err;
++ }
+
+- return err;
++ wx->subsystem_device_id = swab16((u16)ssid);
+ }
+
+ wx->mac_table = kcalloc(wx->mac.num_rar_entries,
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index e04d4a5eed7ba..21505920136c6 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -1965,11 +1965,11 @@ void wx_reset_interrupt_capability(struct wx *wx)
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ return;
+
+- pci_free_irq_vectors(wx->pdev);
+ if (pdev->msix_enabled) {
+ kfree(wx->msix_entries);
+ wx->msix_entries = NULL;
+ }
++ pci_free_irq_vectors(wx->pdev);
+ }
+ EXPORT_SYMBOL(wx_reset_interrupt_capability);
+
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+index 2b431db6085a6..a4d63d2f3c5bb 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+@@ -121,10 +121,8 @@ static int ngbe_sw_init(struct wx *wx)
+
+ /* PCI config space info */
+ err = wx_sw_init(wx);
+- if (err < 0) {
+- wx_err(wx, "read of internal subsystem device id failed\n");
++ if (err < 0)
+ return err;
+- }
+
+ /* mac type, phy type , oem type */
+ ngbe_init_type_code(wx);
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+index 5c3aed516ac20..d60c26ba0ba4c 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+@@ -362,10 +362,8 @@ static int txgbe_sw_init(struct wx *wx)
+
+ /* PCI config space info */
+ err = wx_sw_init(wx);
+- if (err < 0) {
+- wx_err(wx, "read of internal subsystem device id failed\n");
++ if (err < 0)
+ return err;
+- }
+
+ txgbe_init_type_code(wx);
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index b7ec4dafae90c..3297aff969c80 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -822,7 +822,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
+ /* Tx Full Checksum Offload Enabled */
+ cur_p->app0 |= 2;
+- } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
++ } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
+ csum_start_off = skb_transport_offset(skb);
+ csum_index_off = csum_start_off + skb->csum_offset;
+ /* Tx Partial Checksum Offload Enabled */
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index b22596b18ee8c..b1919278e931f 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -630,7 +630,7 @@ static void __gtp_encap_destroy(struct sock *sk)
+ gtp->sk0 = NULL;
+ else
+ gtp->sk1u = NULL;
+- udp_sk(sk)->encap_type = 0;
++ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ rcu_assign_sk_user_data(sk, NULL);
+ release_sock(sk);
+ sock_put(sk);
+@@ -682,7 +682,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
+
+ netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+
+- switch (udp_sk(sk)->encap_type) {
++ switch (READ_ONCE(udp_sk(sk)->encap_type)) {
+ case UDP_ENCAP_GTP0:
+ netdev_dbg(gtp->dev, "received GTP0 packet\n");
+ ret = gtp0_udp_encap_recv(gtp, skb);
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 3ba3c8fb28a5d..706ea5263e879 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2206,9 +2206,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
+ goto upper_link_failed;
+ }
+
+- /* set slave flag before open to prevent IPv6 addrconf */
+- vf_netdev->flags |= IFF_SLAVE;
+-
+ schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
+
+ call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
+@@ -2315,16 +2312,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+
+ }
+
+- /* Fallback path to check synthetic vf with
+- * help of mac addr
++ /* Fallback path to check synthetic vf with help of mac addr.
++ * Because this function can be called before vf_netdev is
++ * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
++ * from dev_addr, also try to match to its dev_addr.
++ * Note: On Hyper-V and Azure, it's not possible to set a MAC address
++ * on a VF that matches to the MAC of a unrelated NETVSC device.
+ */
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+- if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
+- netdev_notice(vf_netdev,
+- "falling back to mac addr based matching\n");
++ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
++ ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
+ return ndev;
+- }
+ }
+
+ netdev_notice(vf_netdev,
+@@ -2332,6 +2331,19 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+ return NULL;
+ }
+
++static int netvsc_prepare_bonding(struct net_device *vf_netdev)
++{
++ struct net_device *ndev;
++
++ ndev = get_netvsc_byslot(vf_netdev);
++ if (!ndev)
++ return NOTIFY_DONE;
++
++ /* set slave flag before open to prevent IPv6 addrconf */
++ vf_netdev->flags |= IFF_SLAVE;
++ return NOTIFY_DONE;
++}
++
+ static int netvsc_register_vf(struct net_device *vf_netdev)
+ {
+ struct net_device_context *net_device_ctx;
+@@ -2531,15 +2543,6 @@ static int netvsc_probe(struct hv_device *dev,
+ goto devinfo_failed;
+ }
+
+- nvdev = rndis_filter_device_add(dev, device_info);
+- if (IS_ERR(nvdev)) {
+- ret = PTR_ERR(nvdev);
+- netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+- goto rndis_failed;
+- }
+-
+- eth_hw_addr_set(net, device_info->mac_adr);
+-
+ /* We must get rtnl lock before scheduling nvdev->subchan_work,
+ * otherwise netvsc_subchan_work() can get rtnl lock first and wait
+ * all subchannels to show up, but that may not happen because
+@@ -2547,9 +2550,23 @@ static int netvsc_probe(struct hv_device *dev,
+ * -> ... -> device_add() -> ... -> __device_attach() can't get
+ * the device lock, so all the subchannels can't be processed --
+ * finally netvsc_subchan_work() hangs forever.
++ *
++ * The rtnl lock also needs to be held before rndis_filter_device_add()
++ * which advertises nvsp_2_vsc_capability / sriov bit, and triggers
++ * VF NIC offering and registering. If VF NIC finished register_netdev()
++ * earlier it may cause name based config failure.
+ */
+ rtnl_lock();
+
++ nvdev = rndis_filter_device_add(dev, device_info);
++ if (IS_ERR(nvdev)) {
++ ret = PTR_ERR(nvdev);
++ netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
++ goto rndis_failed;
++ }
++
++ eth_hw_addr_set(net, device_info->mac_adr);
++
+ if (nvdev->num_chn > 1)
+ schedule_work(&nvdev->subchan_work);
+
+@@ -2586,9 +2603,9 @@ static int netvsc_probe(struct hv_device *dev,
+ return 0;
+
+ register_failed:
+- rtnl_unlock();
+ rndis_filter_device_remove(dev, nvdev);
+ rndis_failed:
++ rtnl_unlock();
+ netvsc_devinfo_put(device_info);
+ devinfo_failed:
+ free_percpu(net_device_ctx->vf_stats);
+@@ -2753,6 +2770,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
+ return NOTIFY_DONE;
+
+ switch (event) {
++ case NETDEV_POST_INIT:
++ return netvsc_prepare_bonding(event_dev);
+ case NETDEV_REGISTER:
+ return netvsc_register_vf(event_dev);
+ case NETDEV_UNREGISTER:
+@@ -2788,12 +2807,17 @@ static int __init netvsc_drv_init(void)
+ }
+ netvsc_ring_bytes = ring_size * PAGE_SIZE;
+
++ register_netdevice_notifier(&netvsc_netdev_notifier);
++
+ ret = vmbus_driver_register(&netvsc_drv);
+ if (ret)
+- return ret;
++ goto err_vmbus_reg;
+
+- register_netdevice_notifier(&netvsc_netdev_notifier);
+ return 0;
++
++err_vmbus_reg:
++ unregister_netdevice_notifier(&netvsc_netdev_notifier);
++ return ret;
+ }
+
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c
+index d7b81a36d673b..145eb0bd096d6 100644
+--- a/drivers/net/ipa/reg/gsi_reg-v5.0.c
++++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c
+@@ -78,7 +78,7 @@ REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001c000 + 0x12000 * GSI_EE_AP, 0x80);
+
+ static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+- [R_LENGTH] = GENMASK(19, 0),
++ [R_LENGTH] = GENMASK(23, 0),
+ };
+
+ REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index c0c49f1813673..2d5b021b4ea60 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -411,7 +411,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
+ return addr;
+ }
+
+-static int ipvlan_process_v4_outbound(struct sk_buff *skb)
++static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ {
+ const struct iphdr *ip4h = ip_hdr(skb);
+ struct net_device *dev = skb->dev;
+@@ -441,25 +441,23 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+
+ err = ip_local_out(net, skb->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out;
+ err:
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ out:
+ return ret;
+ }
+
+ #if IS_ENABLED(CONFIG_IPV6)
+-static int ipvlan_process_v6_outbound(struct sk_buff *skb)
++
++static noinline_for_stack int
++ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
+ {
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+- struct net_device *dev = skb->dev;
+- struct net *net = dev_net(dev);
+- struct dst_entry *dst;
+- int err, ret = NET_XMIT_DROP;
+ struct flowi6 fl6 = {
+ .flowi6_oif = dev->ifindex,
+ .daddr = ip6h->daddr,
+@@ -469,27 +467,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = ip6h->nexthdr,
+ };
++ struct dst_entry *dst;
++ int err;
+
+- dst = ip6_route_output(net, NULL, &fl6);
+- if (dst->error) {
+- ret = dst->error;
++ dst = ip6_route_output(dev_net(dev), NULL, &fl6);
++ err = dst->error;
++ if (err) {
+ dst_release(dst);
+- goto err;
++ return err;
+ }
+ skb_dst_set(skb, dst);
++ return 0;
++}
++
++static int ipvlan_process_v6_outbound(struct sk_buff *skb)
++{
++ struct net_device *dev = skb->dev;
++ int err, ret = NET_XMIT_DROP;
++
++ err = ipvlan_route_v6_outbound(dev, skb);
++ if (unlikely(err)) {
++ DEV_STATS_INC(dev, tx_errors);
++ kfree_skb(skb);
++ return err;
++ }
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+- err = ip6_local_out(net, skb->sk, skb);
++ err = ip6_local_out(dev_net(dev), skb->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ else
+ ret = NET_XMIT_SUCCESS;
+- goto out;
+-err:
+- dev->stats.tx_errors++;
+- kfree_skb(skb);
+-out:
+ return ret;
+ }
+ #else
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index 1b55928e89b8a..57c79f5f29916 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -324,6 +324,7 @@ static void ipvlan_get_stats64(struct net_device *dev,
+ s->rx_dropped = rx_errs;
+ s->tx_dropped = tx_drps;
+ }
++ s->tx_errors = DEV_STATS_READ(dev, tx_errors);
+ }
+
+ static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index c5cd4551c67ca..9663050a852d8 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3657,9 +3657,9 @@ static void macsec_get_stats64(struct net_device *dev,
+
+ dev_fetch_sw_netstats(s, dev->tstats);
+
+- s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
+- s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
+- s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
++ s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
++ s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
++ s->rx_errors = DEV_STATS_READ(dev, rx_errors);
+ }
+
+ static int macsec_get_iflink(const struct net_device *dev)
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 02bd201bc7e58..c8da94af4161a 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -780,7 +780,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+- if (change & IFF_PROMISC)
++ if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
+ dev_set_promiscuity(lowerdev,
+ dev->flags & IFF_PROMISC ? 1 : -1);
+
+diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
+index f60eb97e3a627..608953d4f98da 100644
+--- a/drivers/net/netdevsim/bpf.c
++++ b/drivers/net/netdevsim/bpf.c
+@@ -93,7 +93,7 @@ static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
+ {
+ struct nsim_bpf_bound_prog *state;
+
+- if (!prog || !prog->aux->offload)
++ if (!prog || !bpf_prog_is_offloaded(prog->aux))
+ return;
+
+ state = prog->aux->offload->dev_priv;
+@@ -311,7 +311,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+ if (!bpf->prog)
+ return 0;
+
+- if (!bpf->prog->aux->offload) {
++ if (!bpf_prog_is_offloaded(bpf->prog->aux)) {
+ NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
+ return -EINVAL;
+ }
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 0d7354955d626..b5f012619e42d 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1631,6 +1631,7 @@ struct phylink *phylink_create(struct phylink_config *config,
+ pl->config = config;
+ if (config->type == PHYLINK_NETDEV) {
+ pl->netdev = to_net_dev(config->dev);
++ netif_carrier_off(pl->netdev);
+ } else if (config->type == PHYLINK_DEV) {
+ pl->dev = config->dev;
+ } else {
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 4ecfac2278651..3679a43f4eb02 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -452,6 +452,11 @@ static const struct sfp_quirk sfp_quirks[] = {
+ // Rollball protocol to talk to the PHY.
+ SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+
++ // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
++ // NRZ in their EEPROM
++ SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
++ sfp_fixup_ignore_tx_fault),
++
+ SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+
+ // HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
+@@ -463,6 +468,9 @@ static const struct sfp_quirk sfp_quirks[] = {
+ SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+ sfp_fixup_ignore_tx_fault),
+
++ // FS 2.5G Base-T
++ SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
++
+ // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
+ // 2500MBd NRZ in their EEPROM
+ SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
+index ebcdffdf4f0e0..52d05ce4a2819 100644
+--- a/drivers/net/ppp/ppp_synctty.c
++++ b/drivers/net/ppp/ppp_synctty.c
+@@ -453,6 +453,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+ case PPPIOCSMRU:
+ if (get_user(val, (int __user *) argp))
+ break;
++ if (val > U16_MAX) {
++ err = -EINVAL;
++ break;
++ }
+ if (val < PPP_MRU)
+ val = PPP_MRU;
+ ap->mru = val;
+@@ -687,7 +691,7 @@ ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags, int count)
+
+ /* strip address/control field if present */
+ p = skb->data;
+- if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
++ if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+ /* chop off address/control */
+ if (skb->len < 3)
+ goto err;
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index aff39bf3161de..4ea0e155bb0d5 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1583,11 +1583,11 @@ static int ax88179_reset(struct usbnet *dev)
+
+ *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+- msleep(200);
++ msleep(500);
+
+ *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
+- msleep(100);
++ msleep(200);
+
+ /* Ethernet PHY Auto Detach*/
+ ax88179_auto_detach(dev);
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index afb20c0ed688d..be18d72cefcce 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -2543,7 +2543,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
+ }
+ }
+
+- if (list_empty(&tp->rx_done))
++ if (list_empty(&tp->rx_done) || work_done >= budget)
+ goto out1;
+
+ clear_bit(RX_EPROTO, &tp->flags);
+@@ -2559,6 +2559,15 @@ static int rx_bottom(struct r8152 *tp, int budget)
+ struct urb *urb;
+ u8 *rx_data;
+
++ /* A bulk transfer of USB may contain may packets, so the
++ * total packets may more than the budget. Deal with all
++ * packets in current bulk transfer, and stop to handle the
++ * next bulk transfer until next schedule, if budget is
++ * exhausted.
++ */
++ if (work_done >= budget)
++ break;
++
+ list_del_init(cursor);
+
+ agg = list_entry(cursor, struct rx_agg, list);
+@@ -2578,9 +2587,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
+ unsigned int pkt_len, rx_frag_head_sz;
+ struct sk_buff *skb;
+
+- /* limit the skb numbers for rx_queue */
+- if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
+- break;
++ WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000);
+
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ if (pkt_len < ETH_ZLEN)
+@@ -2658,9 +2665,10 @@ submit:
+ }
+ }
+
++ /* Splice the remained list back to rx_done for next schedule */
+ if (!list_empty(&rx_queue)) {
+ spin_lock_irqsave(&tp->rx_lock, flags);
+- list_splice_tail(&rx_queue, &tp->rx_done);
++ list_splice(&rx_queue, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ }
+
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 0deefd1573cf2..0f798bcbe25cd 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -236,8 +236,8 @@ static void veth_get_ethtool_stats(struct net_device *dev,
+ data[tx_idx + j] += *(u64 *)(base + offset);
+ }
+ } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
+- pp_idx = tx_idx + VETH_TQ_STATS_LEN;
+ }
++ pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;
+
+ page_pool_stats:
+ veth_get_page_pool_stats(dev, &data[pp_idx]);
+@@ -373,7 +373,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ skb_tx_timestamp(skb);
+ if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+ if (!use_napi)
+- dev_lstats_add(dev, length);
++ dev_sw_netstats_tx_add(dev, 1, length);
+ else
+ __veth_xdp_flush(rq);
+ } else {
+@@ -387,14 +387,6 @@ drop:
+ return ret;
+ }
+
+-static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
+-{
+- struct veth_priv *priv = netdev_priv(dev);
+-
+- dev_lstats_read(dev, packets, bytes);
+- return atomic64_read(&priv->dropped);
+-}
+-
+ static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
+ {
+ struct veth_priv *priv = netdev_priv(dev);
+@@ -432,24 +424,24 @@ static void veth_get_stats64(struct net_device *dev,
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+ struct veth_stats rx;
+- u64 packets, bytes;
+
+- tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
+- tot->tx_bytes = bytes;
+- tot->tx_packets = packets;
++ tot->tx_dropped = atomic64_read(&priv->dropped);
++ dev_fetch_sw_netstats(tot, dev->tstats);
+
+ veth_stats_rx(&rx, dev);
+ tot->tx_dropped += rx.xdp_tx_err;
+ tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
+- tot->rx_bytes = rx.xdp_bytes;
+- tot->rx_packets = rx.xdp_packets;
++ tot->rx_bytes += rx.xdp_bytes;
++ tot->rx_packets += rx.xdp_packets;
+
+ rcu_read_lock();
+ peer = rcu_dereference(priv->peer);
+ if (peer) {
+- veth_stats_tx(peer, &packets, &bytes);
+- tot->rx_bytes += bytes;
+- tot->rx_packets += packets;
++ struct rtnl_link_stats64 tot_peer = {};
++
++ dev_fetch_sw_netstats(&tot_peer, peer->tstats);
++ tot->rx_bytes += tot_peer.tx_bytes;
++ tot->rx_packets += tot_peer.tx_packets;
+
+ veth_stats_rx(&rx, peer);
+ tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
+@@ -1499,25 +1491,12 @@ static void veth_free_queues(struct net_device *dev)
+
+ static int veth_dev_init(struct net_device *dev)
+ {
+- int err;
+-
+- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
+- if (!dev->lstats)
+- return -ENOMEM;
+-
+- err = veth_alloc_queues(dev);
+- if (err) {
+- free_percpu(dev->lstats);
+- return err;
+- }
+-
+- return 0;
++ return veth_alloc_queues(dev);
+ }
+
+ static void veth_dev_free(struct net_device *dev)
+ {
+ veth_free_queues(dev);
+- free_percpu(dev->lstats);
+ }
+
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+@@ -1789,6 +1768,7 @@ static void veth_setup(struct net_device *dev)
+ NETIF_F_HW_VLAN_STAG_RX);
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = veth_dev_free;
++ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->max_mtu = ETH_MAX_MTU;
+
+ dev->hw_features = VETH_FEATURES;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index d67f742fbd4c5..0c0be6b872c6a 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -81,24 +81,24 @@ struct virtnet_stat_desc {
+
+ struct virtnet_sq_stats {
+ struct u64_stats_sync syncp;
+- u64 packets;
+- u64 bytes;
+- u64 xdp_tx;
+- u64 xdp_tx_drops;
+- u64 kicks;
+- u64 tx_timeouts;
++ u64_stats_t packets;
++ u64_stats_t bytes;
++ u64_stats_t xdp_tx;
++ u64_stats_t xdp_tx_drops;
++ u64_stats_t kicks;
++ u64_stats_t tx_timeouts;
+ };
+
+ struct virtnet_rq_stats {
+ struct u64_stats_sync syncp;
+- u64 packets;
+- u64 bytes;
+- u64 drops;
+- u64 xdp_packets;
+- u64 xdp_tx;
+- u64 xdp_redirects;
+- u64 xdp_drops;
+- u64 kicks;
++ u64_stats_t packets;
++ u64_stats_t bytes;
++ u64_stats_t drops;
++ u64_stats_t xdp_packets;
++ u64_stats_t xdp_tx;
++ u64_stats_t xdp_redirects;
++ u64_stats_t xdp_drops;
++ u64_stats_t kicks;
+ };
+
+ #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
+@@ -775,8 +775,8 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+ return;
+
+ u64_stats_update_begin(&sq->stats.syncp);
+- sq->stats.bytes += bytes;
+- sq->stats.packets += packets;
++ u64_stats_add(&sq->stats.bytes, bytes);
++ u64_stats_add(&sq->stats.packets, packets);
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+
+@@ -975,11 +975,11 @@ static int virtnet_xdp_xmit(struct net_device *dev,
+ }
+ out:
+ u64_stats_update_begin(&sq->stats.syncp);
+- sq->stats.bytes += bytes;
+- sq->stats.packets += packets;
+- sq->stats.xdp_tx += n;
+- sq->stats.xdp_tx_drops += n - nxmit;
+- sq->stats.kicks += kicks;
++ u64_stats_add(&sq->stats.bytes, bytes);
++ u64_stats_add(&sq->stats.packets, packets);
++ u64_stats_add(&sq->stats.xdp_tx, n);
++ u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
++ u64_stats_add(&sq->stats.kicks, kicks);
+ u64_stats_update_end(&sq->stats.syncp);
+
+ virtnet_xdp_put_sq(vi, sq);
+@@ -1011,14 +1011,14 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+ u32 act;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+- stats->xdp_packets++;
++ u64_stats_inc(&stats->xdp_packets);
+
+ switch (act) {
+ case XDP_PASS:
+ return act;
+
+ case XDP_TX:
+- stats->xdp_tx++;
++ u64_stats_inc(&stats->xdp_tx);
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ netdev_dbg(dev, "convert buff to frame failed for xdp\n");
+@@ -1036,7 +1036,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+ return act;
+
+ case XDP_REDIRECT:
+- stats->xdp_redirects++;
++ u64_stats_inc(&stats->xdp_redirects);
+ err = xdp_do_redirect(dev, xdp, xdp_prog);
+ if (err)
+ return XDP_DROP;
+@@ -1232,9 +1232,9 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
+ return skb;
+
+ err_xdp:
+- stats->xdp_drops++;
++ u64_stats_inc(&stats->xdp_drops);
+ err:
+- stats->drops++;
++ u64_stats_inc(&stats->drops);
+ put_page(page);
+ xdp_xmit:
+ return NULL;
+@@ -1253,7 +1253,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
+ struct sk_buff *skb;
+
+ len -= vi->hdr_len;
+- stats->bytes += len;
++ u64_stats_add(&stats->bytes, len);
+
+ if (unlikely(len > GOOD_PACKET_LEN)) {
+ pr_debug("%s: rx error: len %u exceeds max size %d\n",
+@@ -1282,7 +1282,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
+ return skb;
+
+ err:
+- stats->drops++;
++ u64_stats_inc(&stats->drops);
+ put_page(page);
+ return NULL;
+ }
+@@ -1298,14 +1298,14 @@ static struct sk_buff *receive_big(struct net_device *dev,
+ struct sk_buff *skb =
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
+
+- stats->bytes += len - vi->hdr_len;
++ u64_stats_add(&stats->bytes, len - vi->hdr_len);
+ if (unlikely(!skb))
+ goto err;
+
+ return skb;
+
+ err:
+- stats->drops++;
++ u64_stats_inc(&stats->drops);
+ give_pages(rq, page);
+ return NULL;
+ }
+@@ -1326,7 +1326,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
+ dev->stats.rx_length_errors++;
+ break;
+ }
+- stats->bytes += len;
++ u64_stats_add(&stats->bytes, len);
+ page = virt_to_head_page(buf);
+ put_page(page);
+ }
+@@ -1436,7 +1436,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
+ goto err;
+ }
+
+- stats->bytes += len;
++ u64_stats_add(&stats->bytes, len);
+ page = virt_to_head_page(buf);
+ offset = buf - page_address(page);
+
+@@ -1600,8 +1600,8 @@ err_xdp:
+ put_page(page);
+ mergeable_buf_free(rq, num_buf, dev, stats);
+
+- stats->xdp_drops++;
+- stats->drops++;
++ u64_stats_inc(&stats->xdp_drops);
++ u64_stats_inc(&stats->drops);
+ return NULL;
+ }
+
+@@ -1625,7 +1625,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
+
+ head_skb = NULL;
+- stats->bytes += len - vi->hdr_len;
++ u64_stats_add(&stats->bytes, len - vi->hdr_len);
+
+ if (unlikely(len > truesize - room)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+@@ -1666,7 +1666,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ goto err_buf;
+ }
+
+- stats->bytes += len;
++ u64_stats_add(&stats->bytes, len);
+ page = virt_to_head_page(buf);
+
+ truesize = mergeable_ctx_to_truesize(ctx);
+@@ -1718,7 +1718,7 @@ err_skb:
+ mergeable_buf_free(rq, num_buf, dev, stats);
+
+ err_buf:
+- stats->drops++;
++ u64_stats_inc(&stats->drops);
+ dev_kfree_skb(head_skb);
+ return NULL;
+ }
+@@ -1985,7 +1985,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
+ unsigned long flags;
+
+ flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
+- rq->stats.kicks++;
++ u64_stats_inc(&rq->stats.kicks);
+ u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
+ }
+
+@@ -2065,22 +2065,23 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct virtnet_rq_stats stats = {};
+ unsigned int len;
++ int packets = 0;
+ void *buf;
+ int i;
+
+ if (!vi->big_packets || vi->mergeable_rx_bufs) {
+ void *ctx;
+
+- while (stats.packets < budget &&
++ while (packets < budget &&
+ (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
+ receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
+- stats.packets++;
++ packets++;
+ }
+ } else {
+- while (stats.packets < budget &&
++ while (packets < budget &&
+ (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
+ receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
+- stats.packets++;
++ packets++;
+ }
+ }
+
+@@ -2093,17 +2094,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
+ }
+ }
+
++ u64_stats_set(&stats.packets, packets);
+ u64_stats_update_begin(&rq->stats.syncp);
+ for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ size_t offset = virtnet_rq_stats_desc[i].offset;
+- u64 *item;
++ u64_stats_t *item, *src;
+
+- item = (u64 *)((u8 *)&rq->stats + offset);
+- *item += *(u64 *)((u8 *)&stats + offset);
++ item = (u64_stats_t *)((u8 *)&rq->stats + offset);
++ src = (u64_stats_t *)((u8 *)&stats + offset);
++ u64_stats_add(item, u64_stats_read(src));
+ }
+ u64_stats_update_end(&rq->stats.syncp);
+
+- return stats.packets;
++ return packets;
+ }
+
+ static void virtnet_poll_cleantx(struct receive_queue *rq)
+@@ -2158,7 +2161,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ sq = virtnet_xdp_get_sq(vi);
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+- sq->stats.kicks++;
++ u64_stats_inc(&sq->stats.kicks);
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+ virtnet_xdp_put_sq(vi, sq);
+@@ -2370,7 +2373,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (kick || netif_xmit_stopped(txq)) {
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+- sq->stats.kicks++;
++ u64_stats_inc(&sq->stats.kicks);
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+ }
+@@ -2553,16 +2556,16 @@ static void virtnet_stats(struct net_device *dev,
+
+ do {
+ start = u64_stats_fetch_begin(&sq->stats.syncp);
+- tpackets = sq->stats.packets;
+- tbytes = sq->stats.bytes;
+- terrors = sq->stats.tx_timeouts;
++ tpackets = u64_stats_read(&sq->stats.packets);
++ tbytes = u64_stats_read(&sq->stats.bytes);
++ terrors = u64_stats_read(&sq->stats.tx_timeouts);
+ } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin(&rq->stats.syncp);
+- rpackets = rq->stats.packets;
+- rbytes = rq->stats.bytes;
+- rdrops = rq->stats.drops;
++ rpackets = u64_stats_read(&rq->stats.packets);
++ rbytes = u64_stats_read(&rq->stats.bytes);
++ rdrops = u64_stats_read(&rq->stats.drops);
+ } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
+
+ tot->rx_packets += rpackets;
+@@ -2855,6 +2858,9 @@ static void virtnet_get_ringparam(struct net_device *dev,
+ ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+ }
+
++static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
++ u16 vqn, u32 max_usecs, u32 max_packets);
++
+ static int virtnet_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+@@ -2890,12 +2896,36 @@ static int virtnet_set_ringparam(struct net_device *dev,
+ err = virtnet_tx_resize(vi, sq, ring->tx_pending);
+ if (err)
+ return err;
++
++ /* Upon disabling and re-enabling a transmit virtqueue, the device must
++ * set the coalescing parameters of the virtqueue to those configured
++ * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
++ * did not set any TX coalescing parameters, to 0.
++ */
++ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i),
++ vi->intr_coal_tx.max_usecs,
++ vi->intr_coal_tx.max_packets);
++ if (err)
++ return err;
++
++ vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs;
++ vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets;
+ }
+
+ if (ring->rx_pending != rx_pending) {
+ err = virtnet_rx_resize(vi, rq, ring->rx_pending);
+ if (err)
+ return err;
++
++ /* The reason is same as the transmit virtqueue reset */
++ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i),
++ vi->intr_coal_rx.max_usecs,
++ vi->intr_coal_rx.max_packets);
++ if (err)
++ return err;
++
++ vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs;
++ vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets;
+ }
+ }
+
+@@ -3164,17 +3194,19 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
+ struct virtnet_info *vi = netdev_priv(dev);
+ unsigned int idx = 0, start, i, j;
+ const u8 *stats_base;
++ const u64_stats_t *p;
+ size_t offset;
+
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ struct receive_queue *rq = &vi->rq[i];
+
+- stats_base = (u8 *)&rq->stats;
++ stats_base = (const u8 *)&rq->stats;
+ do {
+ start = u64_stats_fetch_begin(&rq->stats.syncp);
+ for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
+ offset = virtnet_rq_stats_desc[j].offset;
+- data[idx + j] = *(u64 *)(stats_base + offset);
++ p = (const u64_stats_t *)(stats_base + offset);
++ data[idx + j] = u64_stats_read(p);
+ }
+ } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
+ idx += VIRTNET_RQ_STATS_LEN;
+@@ -3183,12 +3215,13 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ struct send_queue *sq = &vi->sq[i];
+
+- stats_base = (u8 *)&sq->stats;
++ stats_base = (const u8 *)&sq->stats;
+ do {
+ start = u64_stats_fetch_begin(&sq->stats.syncp);
+ for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
+ offset = virtnet_sq_stats_desc[j].offset;
+- data[idx + j] = *(u64 *)(stats_base + offset);
++ p = (const u64_stats_t *)(stats_base + offset);
++ data[idx + j] = u64_stats_read(p);
+ }
+ } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
+ idx += VIRTNET_SQ_STATS_LEN;
+@@ -3233,6 +3266,7 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
+ {
+ struct scatterlist sgs_tx, sgs_rx;
++ int i;
+
+ vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+@@ -3246,6 +3280,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ /* Save parameters */
+ vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
+ vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
++ for (i = 0; i < vi->max_queue_pairs; i++) {
++ vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
++ vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
++ }
+
+ vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+@@ -3259,6 +3297,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ /* Save parameters */
+ vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
+ vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
++ for (i = 0; i < vi->max_queue_pairs; i++) {
++ vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
++ vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
++ }
+
+ return 0;
+ }
+@@ -3287,27 +3329,23 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
+ {
+ int err;
+
+- if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) {
+- err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
+- ec->rx_coalesce_usecs,
+- ec->rx_max_coalesced_frames);
+- if (err)
+- return err;
+- /* Save parameters */
+- vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+- vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+- }
++ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
++ ec->rx_coalesce_usecs,
++ ec->rx_max_coalesced_frames);
++ if (err)
++ return err;
+
+- if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) {
+- err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
+- ec->tx_coalesce_usecs,
+- ec->tx_max_coalesced_frames);
+- if (err)
+- return err;
+- /* Save parameters */
+- vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
+- vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
+- }
++ vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
++ vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
++
++ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
++ ec->tx_coalesce_usecs,
++ ec->tx_max_coalesced_frames);
++ if (err)
++ return err;
++
++ vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
++ vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
+
+ return 0;
+ }
+@@ -3453,7 +3491,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
+ } else {
+ ec->rx_max_coalesced_frames = 1;
+
+- if (vi->sq[0].napi.weight)
++ if (vi->sq[queue].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+ }
+
+@@ -3866,7 +3904,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
+
+ u64_stats_update_begin(&sq->stats.syncp);
+- sq->stats.tx_timeouts++;
++ u64_stats_inc(&sq->stats.tx_timeouts);
+ u64_stats_update_end(&sq->stats.syncp);
+
+ netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index a3408e4e1491b..b90dccdc2d33c 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -121,22 +121,12 @@ struct net_vrf {
+ int ifindex;
+ };
+
+-struct pcpu_dstats {
+- u64 tx_pkts;
+- u64 tx_bytes;
+- u64 tx_drps;
+- u64 rx_pkts;
+- u64 rx_bytes;
+- u64 rx_drps;
+- struct u64_stats_sync syncp;
+-};
+-
+ static void vrf_rx_stats(struct net_device *dev, int len)
+ {
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+- dstats->rx_pkts++;
++ dstats->rx_packets++;
+ dstats->rx_bytes += len;
+ u64_stats_update_end(&dstats->syncp);
+ }
+@@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev,
+ do {
+ start = u64_stats_fetch_begin(&dstats->syncp);
+ tbytes = dstats->tx_bytes;
+- tpkts = dstats->tx_pkts;
+- tdrops = dstats->tx_drps;
++ tpkts = dstats->tx_packets;
++ tdrops = dstats->tx_drops;
+ rbytes = dstats->rx_bytes;
+- rpkts = dstats->rx_pkts;
++ rpkts = dstats->rx_packets;
+ } while (u64_stats_fetch_retry(&dstats->syncp, start));
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpkts;
+@@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
+ vrf_rx_stats(dev, len);
+ else
+- this_cpu_inc(dev->dstats->rx_drps);
++ this_cpu_inc(dev->dstats->rx_drops);
+
+ return NETDEV_TX_OK;
+ }
+@@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+- dstats->tx_pkts++;
++ dstats->tx_packets++;
+ dstats->tx_bytes += len;
+ u64_stats_update_end(&dstats->syncp);
+ } else {
+- this_cpu_inc(dev->dstats->tx_drps);
++ this_cpu_inc(dev->dstats->tx_drops);
+ }
+
+ return ret;
+@@ -1174,22 +1164,15 @@ static void vrf_dev_uninit(struct net_device *dev)
+
+ vrf_rtable_release(dev, vrf);
+ vrf_rt6_release(dev, vrf);
+-
+- free_percpu(dev->dstats);
+- dev->dstats = NULL;
+ }
+
+ static int vrf_dev_init(struct net_device *dev)
+ {
+ struct net_vrf *vrf = netdev_priv(dev);
+
+- dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
+- if (!dev->dstats)
+- goto out_nomem;
+-
+ /* create the default dst which points back to us */
+ if (vrf_rtable_create(dev) != 0)
+- goto out_stats;
++ goto out_nomem;
+
+ if (vrf_rt6_create(dev) != 0)
+ goto out_rth;
+@@ -1203,9 +1186,6 @@ static int vrf_dev_init(struct net_device *dev)
+
+ out_rth:
+ vrf_rtable_release(dev, vrf);
+-out_stats:
+- free_percpu(dev->dstats);
+- dev->dstats = NULL;
+ out_nomem:
+ return -ENOMEM;
+ }
+@@ -1704,6 +1684,8 @@ static void vrf_setup(struct net_device *dev)
+ dev->min_mtu = IPV6_MIN_MTU;
+ dev->max_mtu = IP6_MAX_MTU;
+ dev->mtu = dev->max_mtu;
++
++ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+ }
+
+ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
+diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
+index 258dcc1039216..deb9636b0ecf8 100644
+--- a/drivers/net/wireguard/device.c
++++ b/drivers/net/wireguard/device.c
+@@ -210,7 +210,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+ */
+ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
+ dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
+- ++dev->stats.tx_dropped;
++ DEV_STATS_INC(dev, tx_dropped);
+ }
+ skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+@@ -228,7 +228,7 @@ err_icmp:
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+ err:
+- ++dev->stats.tx_errors;
++ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return ret;
+ }
+diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
+index 0b3f0c8435509..a176653c88616 100644
+--- a/drivers/net/wireguard/receive.c
++++ b/drivers/net/wireguard/receive.c
+@@ -416,20 +416,20 @@ dishonest_packet_peer:
+ net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
+ dev->name, skb, peer->internal_id,
+ &peer->endpoint.addr);
+- ++dev->stats.rx_errors;
+- ++dev->stats.rx_frame_errors;
++ DEV_STATS_INC(dev, rx_errors);
++ DEV_STATS_INC(dev, rx_frame_errors);
+ goto packet_processed;
+ dishonest_packet_type:
+ net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+- ++dev->stats.rx_errors;
+- ++dev->stats.rx_frame_errors;
++ DEV_STATS_INC(dev, rx_errors);
++ DEV_STATS_INC(dev, rx_frame_errors);
+ goto packet_processed;
+ dishonest_packet_size:
+ net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+- ++dev->stats.rx_errors;
+- ++dev->stats.rx_length_errors;
++ DEV_STATS_INC(dev, rx_errors);
++ DEV_STATS_INC(dev, rx_length_errors);
+ goto packet_processed;
+ packet_processed:
+ dev_kfree_skb(skb);
+diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
+index 95c853b59e1da..0d48e0f4a1ba3 100644
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -333,7 +333,8 @@ err:
+ void wg_packet_purge_staged_packets(struct wg_peer *peer)
+ {
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+- peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
++ DEV_STATS_ADD(peer->device->dev, tx_dropped,
++ peer->staged_packet_queue.qlen);
+ __skb_queue_purge(&peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+ }
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index f9518e1c99039..fe89bc61e5317 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -1140,7 +1140,7 @@ void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *ath10k_gstrings_stats,
++ memcpy(data, ath10k_gstrings_stats,
+ sizeof(ath10k_gstrings_stats));
+ }
+
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index 26214c00cd0d7..2c39bad7ebfb9 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -828,12 +828,20 @@ static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
+
+ static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
+ {
+- ath10k_ce_disable_interrupts(ar);
++ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
++ int id;
++
++ for (id = 0; id < CE_COUNT_MAX; id++)
++ disable_irq(ar_snoc->ce_irqs[id].irq_line);
+ }
+
+ static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
+ {
+- ath10k_ce_enable_interrupts(ar);
++ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
++ int id;
++
++ for (id = 0; id < CE_COUNT_MAX; id++)
++ enable_irq(ar_snoc->ce_irqs[id].irq_line);
+ }
+
+ static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+@@ -1090,6 +1098,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
+ goto err_free_rri;
+ }
+
++ ath10k_ce_enable_interrupts(ar);
++
+ return 0;
+
+ err_free_rri:
+@@ -1253,8 +1263,8 @@ static int ath10k_snoc_request_irq(struct ath10k *ar)
+
+ for (id = 0; id < CE_COUNT_MAX; id++) {
+ ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
+- ath10k_snoc_per_engine_handler, 0,
+- ce_name[id], ar);
++ ath10k_snoc_per_engine_handler,
++ IRQF_NO_AUTOEN, ce_name[id], ar);
+ if (ret) {
+ ath10k_err(ar,
+ "failed to register IRQ handler for CE %d: %d\n",
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 62bc98852f0f7..a993e74bbae83 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -1621,14 +1621,20 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
+ u8 pdev_id;
+
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
++
++ rcu_read_lock();
++
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+- return;
++ goto out;
+ }
+
+ trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
+ ar->ab->pktlog_defs_checksum);
++
++out:
++ rcu_read_unlock();
+ }
+
+ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index c071bf5841af6..b328a0599818b 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -9042,6 +9042,14 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ if (ar->state != ATH11K_STATE_ON)
+ goto err_fallback;
+
++ /* Firmware doesn't provide Tx power during CAC hence no need to fetch
++ * the stats.
++ */
++ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
++ mutex_unlock(&ar->conf_mutex);
++ return -EAGAIN;
++ }
++
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index a5aa1857ec14b..09e65c5e55c4a 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -854,10 +854,16 @@ unsupported_wcn6855_soc:
+ if (ret)
+ goto err_pci_disable_msi;
+
++ ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
++ if (ret) {
++ ath11k_err(ab, "failed to set irq affinity %d\n", ret);
++ goto err_pci_disable_msi;
++ }
++
+ ret = ath11k_mhi_register(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to register mhi: %d\n", ret);
+- goto err_pci_disable_msi;
++ goto err_irq_affinity_cleanup;
+ }
+
+ ret = ath11k_hal_srng_init(ab);
+@@ -878,12 +884,6 @@ unsupported_wcn6855_soc:
+ goto err_ce_free;
+ }
+
+- ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+- if (ret) {
+- ath11k_err(ab, "failed to set irq affinity %d\n", ret);
+- goto err_free_irq;
+- }
+-
+ /* kernel may allocate a dummy vector before request_irq and
+ * then allocate a real vector when request_irq is called.
+ * So get msi_data here again to avoid spurious interrupt
+@@ -892,20 +892,17 @@ unsupported_wcn6855_soc:
+ ret = ath11k_pci_config_msi_data(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to config msi_data: %d\n", ret);
+- goto err_irq_affinity_cleanup;
++ goto err_free_irq;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+- goto err_irq_affinity_cleanup;
++ goto err_free_irq;
+ }
+ ath11k_qmi_fwreset_from_cold_boot(ab);
+ return 0;
+
+-err_irq_affinity_cleanup:
+- ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+-
+ err_free_irq:
+ ath11k_pcic_free_irq(ab);
+
+@@ -918,6 +915,9 @@ err_hal_srng_deinit:
+ err_mhi_unregister:
+ ath11k_mhi_unregister(ab_pci);
+
++err_irq_affinity_cleanup:
++ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
++
+ err_pci_disable_msi:
+ ath11k_pci_free_msi(ab_pci);
+
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
+index 23ad6825e5be5..1c07f55c25e67 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.c
++++ b/drivers/net/wireless/ath/ath11k/wmi.c
+@@ -8337,6 +8337,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
++ rcu_read_lock();
++
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+ if (!ar) {
+@@ -8354,6 +8356,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
+ ieee80211_radar_detected(ar->hw);
+
+ exit:
++ rcu_read_unlock();
++
+ kfree(tb);
+ }
+
+@@ -8383,15 +8387,19 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n",
+ ev->temp, ev->pdev_id);
+
++ rcu_read_lock();
++
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
+- kfree(tb);
+- return;
++ goto exit;
+ }
+
+ ath11k_thermal_event_temperature(ar, ev->temp);
+
++exit:
++ rcu_read_unlock();
++
+ kfree(tb);
+ }
+
+@@ -8611,12 +8619,13 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ return;
+ }
+
++ rcu_read_lock();
++
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ ev->vdev_id);
+- kfree(tb);
+- return;
++ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n",
+@@ -8633,6 +8642,8 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
++exit:
++ rcu_read_unlock();
+
+ kfree(tb);
+ }
+diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
+index f933896f2a68d..6893466f61f04 100644
+--- a/drivers/net/wireless/ath/ath12k/dp.c
++++ b/drivers/net/wireless/ath/ath12k/dp.c
+@@ -38,6 +38,7 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
+
+ ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+ crypto_free_shash(peer->tfm_mmic);
++ peer->dp_setup_done = false;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
+index e6e64d437c47a..dbcbe7e0cd2a7 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
+@@ -1555,6 +1555,13 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+
+ msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
+ len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
++ if (len > (skb->len - struct_size(msg, data, 0))) {
++ ath12k_warn(ab,
++ "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
++ len, skb->len);
++ return -EINVAL;
++ }
++
+ pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
+ ppdu_id = le32_to_cpu(msg->ppdu_id);
+
+@@ -1583,6 +1590,16 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+ goto exit;
+ }
+
++ if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
++ spin_unlock_bh(&ar->data_lock);
++ ath12k_warn(ab,
++ "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
++ ppdu_info->ppdu_stats.common.num_users,
++ HTT_PPDU_STATS_MAX_USERS);
++ ret = -EINVAL;
++ goto exit;
++ }
++
+ /* back up data rate tlv for all peers */
+ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
+ (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
+@@ -1641,11 +1658,12 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
+ pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
+ HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
+- ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+
++ rcu_read_lock();
++ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+- return;
++ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+@@ -1661,6 +1679,8 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
+
+ spin_unlock_bh(&ar->data_lock);
++exit:
++ rcu_read_unlock();
+ }
+
+ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
+@@ -2748,6 +2768,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
+ }
+
+ peer->tfm_mmic = tfm;
++ peer->dp_setup_done = true;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+@@ -3214,6 +3235,14 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
+ ret = -ENOENT;
+ goto out_unlock;
+ }
++
++ if (!peer->dp_setup_done) {
++ ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
++ peer->addr, peer_id);
++ ret = -ENOENT;
++ goto out_unlock;
++ }
++
+ rx_tid = &peer->rx_tid[tid];
+
+ if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+@@ -3229,7 +3258,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
+ goto out_unlock;
+ }
+
+- if (frag_no > __fls(rx_tid->rx_frag_bitmap))
++ if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
+ __skb_queue_tail(&rx_tid->rx_frags, msdu);
+ else
+ ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
+diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
+index 8874c815d7faf..16d889fc20433 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
+@@ -330,8 +330,11 @@ tcl_ring_sel:
+
+ fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
++
++ if (skb_cb->paddr_ext_desc)
++ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
++ sizeof(struct hal_tx_msdu_ext_desc),
++ DMA_TO_DEVICE);
+
+ fail_remove_tx_buf:
+ ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
+index 42f1140baa4fe..f83d3e09ae366 100644
+--- a/drivers/net/wireless/ath/ath12k/mhi.c
++++ b/drivers/net/wireless/ath/ath12k/mhi.c
+@@ -370,8 +370,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
+ ret = ath12k_mhi_get_msi(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to get msi for mhi\n");
+- mhi_free_controller(mhi_ctrl);
+- return ret;
++ goto free_controller;
+ }
+
+ mhi_ctrl->iova_start = 0;
+@@ -388,11 +387,15 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
+ ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config);
+ if (ret) {
+ ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
+- mhi_free_controller(mhi_ctrl);
+- return ret;
++ goto free_controller;
+ }
+
+ return 0;
++
++free_controller:
++ mhi_free_controller(mhi_ctrl);
++ ab_pci->mhi_ctrl = NULL;
++ return ret;
+ }
+
+ void ath12k_mhi_unregister(struct ath12k_pci *ab_pci)
+diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
+index b296dc0e2f671..c6edb24cbedd8 100644
+--- a/drivers/net/wireless/ath/ath12k/peer.h
++++ b/drivers/net/wireless/ath/ath12k/peer.h
+@@ -44,6 +44,9 @@ struct ath12k_peer {
+ struct ppdu_user_delayba ppdu_stats_delayba;
+ bool delayba_flag;
+ bool is_authorized;
++
++ /* protected by ab->data_lock */
++ bool dp_setup_done;
+ };
+
+ void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index ef0f3cf35cfd1..d217b70a7a8fb 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -3876,6 +3876,12 @@ static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
+ ath12k_warn(soc, "failed to extract reg cap %d\n", i);
+ return ret;
+ }
++
++ if (reg_cap.phy_id >= MAX_RADIOS) {
++ ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
++ return -EINVAL;
++ }
++
+ soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
+ }
+ return 0;
+@@ -6476,6 +6482,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
++ rcu_read_lock();
++
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
+
+ if (!ar) {
+@@ -6493,6 +6501,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
+ ieee80211_radar_detected(ar->hw);
+
+ exit:
++ rcu_read_unlock();
++
+ kfree(tb);
+ }
+
+@@ -6511,11 +6521,16 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
+
++ rcu_read_lock();
++
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
+- return;
++ goto exit;
+ }
++
++exit:
++ rcu_read_unlock();
+ }
+
+ static void ath12k_fils_discovery_event(struct ath12k_base *ab,
+diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
+index 9bc57c5a89bfe..a0376a6787b8d 100644
+--- a/drivers/net/wireless/ath/ath9k/debug.c
++++ b/drivers/net/wireless/ath/ath9k/debug.c
+@@ -1293,7 +1293,7 @@ void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *ath9k_gstrings_stats,
++ memcpy(data, ath9k_gstrings_stats,
+ sizeof(ath9k_gstrings_stats));
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+index c549ff3abcdc4..278ddc713fdc2 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+@@ -423,7 +423,7 @@ void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *ath9k_htc_gstrings_stats,
++ memcpy(data, ath9k_htc_gstrings_stats,
+ sizeof(ath9k_htc_gstrings_stats));
+ }
+
+diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
+index 27f4d74a41c80..2788a1b06c17c 100644
+--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
++++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
+@@ -206,7 +206,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
+
+ INIT_LIST_HEAD(&cd->head);
+ cd->freq = freq;
+- cd->detectors = kmalloc_array(dpd->num_radar_types,
++ cd->detectors = kcalloc(dpd->num_radar_types,
+ sizeof(*cd->detectors), GFP_ATOMIC);
+ if (cd->detectors == NULL)
+ goto fail;
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+index b9893b22e41da..42e765fe3cfe1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+@@ -134,12 +134,10 @@ static const struct iwl_base_params iwl_bz_base_params = {
+ .ht_params = &iwl_gl_a_ht_params
+
+ /*
+- * If the device doesn't support HE, no need to have that many buffers.
+- * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an
++ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
+ * A-MPDU, with additional overhead to account for processing time.
+ */
+-#define IWL_NUM_RBDS_NON_HE 512
+-#define IWL_NUM_RBDS_BZ_HE 4096
++#define IWL_NUM_RBDS_BZ_EHT (512 * 16)
+
+ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_BZ,
+@@ -160,16 +158,16 @@ const struct iwl_cfg iwl_cfg_bz = {
+ .fw_name_mac = "bz",
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
+- .num_rbds = IWL_NUM_RBDS_BZ_HE,
++ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
++ .num_rbds = IWL_NUM_RBDS_BZ_EHT,
+ };
+
+ const struct iwl_cfg iwl_cfg_gl = {
+ .fw_name_mac = "gl",
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
+- .num_rbds = IWL_NUM_RBDS_BZ_HE,
++ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
++ .num_rbds = IWL_NUM_RBDS_BZ_EHT,
+ };
+
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+index ad283fd22e2a2..604e9cef6baac 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+@@ -127,12 +127,10 @@ static const struct iwl_base_params iwl_sc_base_params = {
+ .ht_params = &iwl_22000_ht_params
+
+ /*
+- * If the device doesn't support HE, no need to have that many buffers.
+- * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an
++ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
+ * A-MPDU, with additional overhead to account for processing time.
+ */
+-#define IWL_NUM_RBDS_NON_HE 512
+-#define IWL_NUM_RBDS_SC_HE 4096
++#define IWL_NUM_RBDS_SC_EHT (512 * 16)
+
+ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_SC,
+@@ -153,8 +151,8 @@ const struct iwl_cfg iwl_cfg_sc = {
+ .fw_name_mac = "sc",
+ .uhb_supported = true,
+ IWL_DEVICE_SC,
+- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
+- .num_rbds = IWL_NUM_RBDS_SC_HE,
++ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
++ .num_rbds = IWL_NUM_RBDS_SC_EHT,
+ };
+
+ MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+index 60a7b61d59aa3..ca1daec641c4f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+@@ -3,6 +3,7 @@
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2019 Intel Corporation
++ * Copyright (C) 2023 Intel Corporation
+ *****************************************************************************/
+
+ #include <linux/kernel.h>
+@@ -1169,7 +1170,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
+ }
+
+- iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
++ iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false);
+
+ freed = 0;
+
+@@ -1315,7 +1316,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
+- &reclaimed_skbs);
++ &reclaimed_skbs, false);
+
+ IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+ "sta_id = %d\n",
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+index ba538d70985f4..39bee9c00e071 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+@@ -13,6 +13,7 @@
+ #define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
+ #define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
+ #define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
++#define IWL_FW_INI_PRESET_DISABLE 0xff
+
+ /**
+ * struct iwl_fw_ini_hcmd
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index 241a9e3f2a1a7..f45f645ca6485 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -86,10 +86,7 @@ enum iwl_nvm_type {
+ #define IWL_DEFAULT_MAX_TX_POWER 22
+ #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
+ NETIF_F_TSO | NETIF_F_TSO6)
+-#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6)
+-#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \
+- IWL_TX_CSUM_NETIF_FLAGS_BZ | \
+- NETIF_F_RXCSUM)
++#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM)
+
+ /* Antenna presence definitions */
+ #define ANT_NONE 0x0
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+index 128059ca77e60..06fb7d6653905 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2023 Intel Corporation
+ */
+ #ifndef __iwl_dbg_tlv_h__
+ #define __iwl_dbg_tlv_h__
+@@ -10,7 +10,8 @@
+ #include <fw/file.h>
+ #include <fw/api/dbg-tlv.h>
+
+-#define IWL_DBG_TLV_MAX_PRESET 15
++#define IWL_DBG_TLV_MAX_PRESET 15
++#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
+
+ /**
+ * struct iwl_dbg_tlv_node - debug TLV node
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index 3d87d26845e74..fb5e254757e71 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1795,6 +1795,22 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ #endif
+
+ drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
++ if (iwlwifi_mod_params.enable_ini != ENABLE_INI) {
++ /* We have a non-default value in the module parameter,
++ * take its value
++ */
++ drv->trans->dbg.domains_bitmap &= 0xffff;
++ if (iwlwifi_mod_params.enable_ini != IWL_FW_INI_PRESET_DISABLE) {
++ if (iwlwifi_mod_params.enable_ini > ENABLE_INI) {
++ IWL_ERR(trans,
++ "invalid enable_ini module parameter value: max = %d, using 0 instead\n",
++ ENABLE_INI);
++ iwlwifi_mod_params.enable_ini = 0;
++ }
++ drv->trans->dbg.domains_bitmap =
++ BIT(IWL_FW_DBG_DOMAIN_POS + iwlwifi_mod_params.enable_ini);
++ }
++ }
+
+ ret = iwl_request_firmware(drv, true);
+ if (ret) {
+@@ -1843,8 +1859,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
+ kfree(drv);
+ }
+
+-#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
+-
+ /* shared module parameters */
+ struct iwl_mod_params iwlwifi_mod_params = {
+ .fw_restart = true,
+@@ -1964,38 +1978,7 @@ module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
+ MODULE_PARM_DESC(uapsd_disable,
+ "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
+
+-static int enable_ini_set(const char *arg, const struct kernel_param *kp)
+-{
+- int ret = 0;
+- bool res;
+- __u32 new_enable_ini;
+-
+- /* in case the argument type is a number */
+- ret = kstrtou32(arg, 0, &new_enable_ini);
+- if (!ret) {
+- if (new_enable_ini > ENABLE_INI) {
+- pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
+- return -EINVAL;
+- }
+- goto out;
+- }
+-
+- /* in case the argument type is boolean */
+- ret = kstrtobool(arg, &res);
+- if (ret)
+- return ret;
+- new_enable_ini = (res ? ENABLE_INI : 0);
+-
+-out:
+- iwlwifi_mod_params.enable_ini = new_enable_ini;
+- return 0;
+-}
+-
+-static const struct kernel_param_ops enable_ini_ops = {
+- .set = enable_ini_set
+-};
+-
+-module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
++module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, uint, 0444);
+ MODULE_PARM_DESC(enable_ini,
+ "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
+ "Debug INI TLV FW debug infrastructure (default: 16)");
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+index 6dd381ff0f9e7..2a63968b0e55b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+@@ -348,8 +348,8 @@
+ #define RFIC_REG_RD 0xAD0470
+ #define WFPM_CTRL_REG 0xA03030
+ #define WFPM_OTP_CFG1_ADDR 0x00a03098
+-#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
+-#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
++#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(5)
++#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(4)
+ #define WFPM_OTP_BZ_BNJ_JACKET_BIT 5
+ #define WFPM_OTP_BZ_BNJ_CDB_BIT 4
+ #define WFPM_OTP_CFG1_IS_JACKET(_val) (((_val) & 0x00000020) >> WFPM_OTP_BZ_BNJ_JACKET_BIT)
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+index 3b6b0e03037f1..168eda2132fb8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+@@ -56,6 +56,10 @@
+ * 6) Eventually, the free function will be called.
+ */
+
++/* default preset 0 (start from bit 16)*/
++#define IWL_FW_DBG_DOMAIN_POS 16
++#define IWL_FW_DBG_DOMAIN BIT(IWL_FW_DBG_DOMAIN_POS)
++
+ #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
+
+ #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
+@@ -584,7 +588,7 @@ struct iwl_trans_ops {
+ int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int queue);
+ void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
+- struct sk_buff_head *skbs);
++ struct sk_buff_head *skbs, bool is_flush);
+
+ void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
+
+@@ -1269,14 +1273,15 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ }
+
+ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
+- int ssn, struct sk_buff_head *skbs)
++ int ssn, struct sk_buff_head *skbs,
++ bool is_flush)
+ {
+ if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ return;
+ }
+
+- trans->ops->reclaim(trans, queue, ssn, skbs);
++ trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
+ }
+
+ static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index f6488b4bbe68b..be2602d8c5bfa 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2012,6 +2012,16 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
+ if (IS_ERR(key_config))
+ return false;
+ ieee80211_set_key_rx_seq(key_config, 0, &seq);
++
++ if (key_config->keyidx == 4 || key_config->keyidx == 5) {
++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
++ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
++ struct iwl_mvm_vif_link_info *mvm_link =
++ mvmvif->link[link_id];
++
++ mvm_link->igtk = key_config;
++ }
++
+ return true;
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+index b49781d1a07a7..10b9219b3bfd3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2023 Intel Corporation
+ */
+ #include <net/cfg80211.h>
+ #include <linux/etherdevice.h>
+@@ -302,7 +302,12 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_pasn_sta *sta)
+ {
+ list_del(&sta->list);
+- iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
++
++ if (iwl_mvm_has_mld_api(mvm->fw))
++ iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id);
++ else
++ iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
++
+ iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta);
+ kfree(sta);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+index ace82e2c5bd91..4ab55a1fcbf04 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+@@ -53,7 +53,6 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ struct iwl_link_config_cmd cmd = {};
+- struct iwl_mvm_phy_ctxt *phyctxt;
+
+ if (WARN_ON_ONCE(!link_info))
+ return -EINVAL;
+@@ -61,7 +60,7 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
+ link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
+ mvmvif);
+- if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
++ if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))
+ return -EINVAL;
+
+ rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
+@@ -77,12 +76,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ cmd.link_id = cpu_to_le32(link_info->fw_link_id);
+ cmd.mac_id = cpu_to_le32(mvmvif->id);
+ cmd.spec_link_id = link_conf->link_id;
+- /* P2P-Device already has a valid PHY context during add */
+- phyctxt = link_info->phy_ctxt;
+- if (phyctxt)
+- cmd.phy_id = cpu_to_le32(phyctxt->id);
+- else
+- cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
++ WARN_ON_ONCE(link_info->phy_ctxt);
++ cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
+
+ memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN);
+
+@@ -194,11 +189,14 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ flags_mask |= LINK_FLG_MU_EDCA_CW;
+ }
+
+- if (link_conf->eht_puncturing && !iwlwifi_mod_params.disable_11be)
+- cmd.puncture_mask = cpu_to_le16(link_conf->eht_puncturing);
+- else
+- /* This flag can be set only if the MAC has eht support */
+- changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
++ if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) {
++ if (iwlwifi_mod_params.disable_11be ||
++ !link_conf->eht_support)
++ changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
++ else
++ cmd.puncture_mask =
++ cpu_to_le16(link_conf->eht_puncturing);
++ }
+
+ cmd.bss_color = link_conf->he_bss_color.color;
+
+@@ -245,7 +243,7 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int ret;
+
+ if (WARN_ON(!link_info ||
+- link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
++ link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ return -EINVAL;
+
+ RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+index 7369a45f7f2bd..9c97691e60384 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+@@ -286,6 +286,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ INIT_LIST_HEAD(&mvmvif->time_event_data.list);
+ mvmvif->time_event_data.id = TE_MAX;
+
++ mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
++ mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
++ mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
++
+ /* No need to allocate data queues to P2P Device MAC and NAN.*/
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return 0;
+@@ -300,10 +304,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+ }
+
+- mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
+- mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
+- mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
+-
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
+ mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 5918c1f2b10c3..a25ea638229b0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -1589,32 +1589,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ }
+
+- /*
+- * P2P_DEVICE interface does not have a channel context assigned to it,
+- * so a dedicated PHY context is allocated to it and the corresponding
+- * MAC context is bound to it at this stage.
+- */
+- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+-
+- mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+- if (!mvmvif->deflink.phy_ctxt) {
+- ret = -ENOSPC;
+- goto out_free_bf;
+- }
+-
+- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+- ret = iwl_mvm_binding_add_vif(mvm, vif);
+- if (ret)
+- goto out_unref_phy;
+-
+- ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
+- if (ret)
+- goto out_unbind;
+-
+- /* Save a pointer to p2p device vif, so it can later be used to
+- * update the p2p device MAC when a GO is started/stopped */
++ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mvm->p2p_device_vif = vif;
+- }
+
+ iwl_mvm_tcm_add_vif(mvm, vif);
+ INIT_DELAYED_WORK(&mvmvif->csa_work,
+@@ -1643,11 +1619,6 @@ out:
+
+ goto out_unlock;
+
+- out_unbind:
+- iwl_mvm_binding_remove_vif(mvm, vif);
+- out_unref_phy:
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+- out_free_bf:
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+@@ -1744,12 +1715,17 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ if (iwl_mvm_mac_remove_interface_common(hw, vif))
+ goto out;
+
++ /* Before the interface removal, mac80211 would cancel the ROC, and the
++ * ROC worker would be scheduled if needed. The worker would be flushed
++ * in iwl_mvm_prepare_mac_removal() and thus at this point there is no
++ * binding etc. so nothing needs to be done here.
++ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
++ if (mvmvif->deflink.phy_ctxt) {
++ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
++ mvmvif->deflink.phy_ctxt = NULL;
++ }
+ mvm->p2p_device_vif = NULL;
+- iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
+- iwl_mvm_binding_remove_vif(mvm, vif);
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+- mvmvif->deflink.phy_ctxt = NULL;
+ }
+
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+@@ -3791,6 +3767,12 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
+
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
++ /* MFP is set by default before the station is authorized.
++ * Clear it here in case it's not used.
++ */
++ if (!sta->mfp)
++ return callbacks->update_sta(mvm, vif, sta);
++
+ return 0;
+ }
+
+@@ -4531,30 +4513,20 @@ static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id)
+ return ret;
+ }
+
+-static int iwl_mvm_roc_switch_binding(struct iwl_mvm *mvm,
+- struct ieee80211_vif *vif,
+- struct iwl_mvm_phy_ctxt *new_phy_ctxt)
++static int iwl_mvm_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ {
+- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+- int ret = 0;
++ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+- /* Unbind the P2P_DEVICE from the current PHY context,
+- * and if the PHY context is not used remove it.
+- */
+- ret = iwl_mvm_binding_remove_vif(mvm, vif);
+- if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
++ ret = iwl_mvm_binding_add_vif(mvm, vif);
++ if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+ return ret;
+
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+-
+- /* Bind the P2P_DEVICE to the current PHY Context */
+- mvmvif->deflink.phy_ctxt = new_phy_ctxt;
+-
+- ret = iwl_mvm_binding_add_vif(mvm, vif);
+- WARN(ret, "Failed binding P2P_DEVICE\n");
+- return ret;
++ /* The station and queue allocation must be done only after the binding
++ * is done, as otherwise the FW might incorrectly configure its state.
++ */
++ return iwl_mvm_add_p2p_bcast_sta(mvm, vif);
+ }
+
+ static int iwl_mvm_roc(struct ieee80211_hw *hw,
+@@ -4565,7 +4537,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
+ {
+ static const struct iwl_mvm_roc_ops ops = {
+ .add_aux_sta_for_hs20 = iwl_mvm_add_aux_sta_for_hs20,
+- .switch_phy_ctxt = iwl_mvm_roc_switch_binding,
++ .link = iwl_mvm_roc_link,
+ };
+
+ return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
+@@ -4581,7 +4553,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct cfg80211_chan_def chandef;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+- bool band_change_removal;
+ int ret, i;
+ u32 lmac_id;
+
+@@ -4610,82 +4581,61 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ /* handle below */
+ break;
+ default:
+- IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
++ IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
++ /* Try using a PHY context that is already in use */
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ phy_ctxt = &mvm->phy_ctxts[i];
+- if (phy_ctxt->ref == 0 || mvmvif->deflink.phy_ctxt == phy_ctxt)
++ if (!phy_ctxt->ref || mvmvif->deflink.phy_ctxt == phy_ctxt)
+ continue;
+
+- if (phy_ctxt->ref && channel == phy_ctxt->channel) {
+- ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt);
+- if (ret)
+- goto out_unlock;
++ if (channel == phy_ctxt->channel) {
++ if (mvmvif->deflink.phy_ctxt)
++ iwl_mvm_phy_ctxt_unref(mvm,
++ mvmvif->deflink.phy_ctxt);
+
++ mvmvif->deflink.phy_ctxt = phy_ctxt;
+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+- goto schedule_time_event;
++ goto link_and_start_p2p_roc;
+ }
+ }
+
+- /* Need to update the PHY context only if the ROC channel changed */
+- if (channel == mvmvif->deflink.phy_ctxt->channel)
+- goto schedule_time_event;
+-
+- cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+-
+- /*
+- * Check if the remain-on-channel is on a different band and that
+- * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
+- * so, we'll need to release and then re-configure here, since we
+- * must not remove a PHY context that's part of a binding.
++ /* If the currently used PHY context is configured with a matching
++ * channel use it
+ */
+- band_change_removal =
+- fw_has_capa(&mvm->fw->ucode_capa,
+- IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+- mvmvif->deflink.phy_ctxt->channel->band != chandef.chan->band;
+-
+- if (mvmvif->deflink.phy_ctxt->ref == 1 && !band_change_removal) {
+- /*
+- * Change the PHY context configuration as it is currently
+- * referenced only by the P2P Device MAC (and we can modify it)
+- */
+- ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->deflink.phy_ctxt,
+- &chandef, 1, 1);
+- if (ret)
+- goto out_unlock;
++ if (mvmvif->deflink.phy_ctxt) {
++ if (channel == mvmvif->deflink.phy_ctxt->channel)
++ goto link_and_start_p2p_roc;
+ } else {
+- /*
+- * The PHY context is shared with other MACs (or we're trying to
+- * switch bands), so remove the P2P Device from the binding,
+- * allocate an new PHY context and create a new binding.
+- */
+ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!phy_ctxt) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
+- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
+- 1, 1);
+- if (ret) {
+- IWL_ERR(mvm, "Failed to change PHY context\n");
+- goto out_unlock;
+- }
++ mvmvif->deflink.phy_ctxt = phy_ctxt;
++ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
++ }
+
+- ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt);
+- if (ret)
+- goto out_unlock;
++ /* Configure the PHY context */
++ cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+
+- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
++ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
++ 1, 1);
++ if (ret) {
++ IWL_ERR(mvm, "Failed to change PHY context\n");
++ goto out_unlock;
+ }
+
+-schedule_time_event:
+- /* Schedule the time events */
+- ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
++link_and_start_p2p_roc:
++ ret = ops->link(mvm, vif);
++ if (ret)
++ goto out_unlock;
+
++ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+ IWL_DEBUG_MAC80211(mvm, "leave\n");
+@@ -5629,7 +5579,8 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ }
+
+ if (drop) {
+- if (iwl_mvm_flush_sta(mvm, mvmsta, false))
++ if (iwl_mvm_flush_sta(mvm, mvmsta->deflink.sta_id,
++ mvmsta->tfd_queue_msk))
+ IWL_ERR(mvm, "flush request fail\n");
+ } else {
+ if (iwl_mvm_has_new_tx_api(mvm))
+@@ -5651,22 +5602,21 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+ {
++ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+- int i;
++ struct iwl_mvm_link_sta *mvm_link_sta;
++ struct ieee80211_link_sta *link_sta;
++ int link_id;
+
+ mutex_lock(&mvm->mutex);
+- for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+- struct iwl_mvm_sta *mvmsta;
+- struct ieee80211_sta *tmp;
+-
+- tmp = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+- lockdep_is_held(&mvm->mutex));
+- if (tmp != sta)
++ for_each_sta_active_link(vif, sta, link_sta, link_id) {
++ mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id],
++ lockdep_is_held(&mvm->mutex));
++ if (!mvm_link_sta)
+ continue;
+
+- mvmsta = iwl_mvm_sta_from_mac80211(sta);
+-
+- if (iwl_mvm_flush_sta(mvm, mvmsta, false))
++ if (iwl_mvm_flush_sta(mvm, mvm_link_sta->sta_id,
++ mvmsta->tfd_queue_msk))
+ IWL_ERR(mvm, "flush request fail\n");
+ }
+ mutex_unlock(&mvm->mutex);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+index 2c9f2f71b083a..ea3e9e9c6e26c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+@@ -24,10 +24,15 @@ static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm,
+ return 0;
+ }
+
+- /* AP group keys are per link and should be on the mcast STA */
++ /* AP group keys are per link and should be on the mcast/bcast STA */
+ if (vif->type == NL80211_IFTYPE_AP &&
+- !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
++ !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
++ /* IGTK/BIGTK to bcast STA */
++ if (keyconf->keyidx >= 4)
++ return BIT(link_info->bcast_sta.sta_id);
++ /* GTK for data to mcast STA */
+ return BIT(link_info->mcast_sta.sta_id);
++ }
+
+ /* for client mode use the AP STA also for group keys */
+ if (!sta && vif->type == NL80211_IFTYPE_STATION)
+@@ -91,7 +96,12 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
+ if (!sta && vif->type == NL80211_IFTYPE_STATION)
+ sta = mvmvif->ap_sta;
+
+- if (!IS_ERR_OR_NULL(sta) && sta->mfp)
++ /* Set the MFP flag also for an AP interface where the key is an IGTK
++ * key as in such a case the station would always be NULL
++ */
++ if ((!IS_ERR_OR_NULL(sta) && sta->mfp) ||
++ (vif->type == NL80211_IFTYPE_AP &&
++ (keyconf->keyidx == 4 || keyconf->keyidx == 5)))
+ flags |= IWL_SEC_KEY_FLAG_MFP;
+
+ return flags;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+index b719843e94576..2ddb6f763a0b3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+@@ -56,43 +56,15 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ }
+
+- /*
+- * P2P_DEVICE interface does not have a channel context assigned to it,
+- * so a dedicated PHY context is allocated to it and the corresponding
+- * MAC context is bound to it at this stage.
+- */
+- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+- mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+- if (!mvmvif->deflink.phy_ctxt) {
+- ret = -ENOSPC;
+- goto out_free_bf;
+- }
+-
+- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+- ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+- if (ret)
+- goto out_unref_phy;
+-
+- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+- LINK_CONTEXT_MODIFY_ACTIVE |
+- LINK_CONTEXT_MODIFY_RATES_INFO,
+- true);
+- if (ret)
+- goto out_remove_link;
+-
+- ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
+- if (ret)
+- goto out_remove_link;
++ ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
++ if (ret)
++ goto out_free_bf;
+
+- /* Save a pointer to p2p device vif, so it can later be used to
+- * update the p2p device MAC when a GO is started/stopped
+- */
++ /* Save a pointer to p2p device vif, so it can later be used to
++ * update the p2p device MAC when a GO is started/stopped
++ */
++ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mvm->p2p_device_vif = vif;
+- } else {
+- ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+- if (ret)
+- goto out_free_bf;
+- }
+
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+@@ -119,10 +91,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+
+ goto out_unlock;
+
+- out_remove_link:
+- iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+- out_unref_phy:
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+ out_free_bf:
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+@@ -130,7 +98,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+ }
+ out_remove_mac:
+- mvmvif->deflink.phy_ctxt = NULL;
+ mvmvif->link[0] = NULL;
+ iwl_mvm_mld_mac_ctxt_remove(mvm, vif);
+ out_unlock:
+@@ -185,14 +152,18 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
+
+ iwl_mvm_power_update_mac(mvm);
+
++ /* Before the interface removal, mac80211 would cancel the ROC, and the
++ * ROC worker would be scheduled if needed. The worker would be flushed
++ * in iwl_mvm_prepare_mac_removal() and thus at this point the link is
++ * not active. So need only to remove the link.
++ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
++ if (mvmvif->deflink.phy_ctxt) {
++ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
++ mvmvif->deflink.phy_ctxt = NULL;
++ }
+ mvm->p2p_device_vif = NULL;
+-
+- /* P2P device uses only one link */
+- iwl_mvm_mld_rm_bcast_sta(mvm, vif, &vif->bss_conf);
+- iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+- mvmvif->deflink.phy_ctxt = NULL;
++ iwl_mvm_remove_link(mvm, vif, &vif->bss_conf);
+ } else {
+ iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+ }
+@@ -653,7 +624,7 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
+ }
+
+ /* Update EHT Puncturing info */
+- if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc && has_eht)
++ if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc)
+ link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
+
+ if (link_changes) {
+@@ -968,36 +939,29 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw,
+ return 0;
+ }
+
+-static int iwl_mvm_link_switch_phy_ctx(struct iwl_mvm *mvm,
+- struct ieee80211_vif *vif,
+- struct iwl_mvm_phy_ctxt *new_phy_ctxt)
++static int iwl_mvm_mld_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ {
+- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+- int ret = 0;
++ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+- /* Inorder to change the phy_ctx of a link, the link needs to be
+- * inactive. Therefore, first deactivate the link, then change its
+- * phy_ctx, and then activate it again.
+- */
+- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+- LINK_CONTEXT_MODIFY_ACTIVE, false);
+- if (WARN(ret, "Failed to deactivate link\n"))
++ /* The PHY context ID might have changed so need to set it */
++ ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
++ if (WARN(ret, "Failed to set PHY context ID\n"))
+ return ret;
+
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+-
+- mvmvif->deflink.phy_ctxt = new_phy_ctxt;
++ ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
++ LINK_CONTEXT_MODIFY_ACTIVE |
++ LINK_CONTEXT_MODIFY_RATES_INFO,
++ true);
+
+- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
+- if (WARN(ret, "Failed to deactivate link\n"))
++ if (WARN(ret, "Failed linking P2P_DEVICE\n"))
+ return ret;
+
+- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+- LINK_CONTEXT_MODIFY_ACTIVE, true);
+- WARN(ret, "Failed binding P2P_DEVICE\n");
+- return ret;
++ /* The station and queue allocation must be done only after the linking
++ * is done, as otherwise the FW might incorrectly configure its state.
++ */
++ return iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
+ }
+
+ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -1006,7 +970,7 @@ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ {
+ static const struct iwl_mvm_roc_ops ops = {
+ .add_aux_sta_for_hs20 = iwl_mvm_mld_add_aux_sta,
+- .switch_phy_ctxt = iwl_mvm_link_switch_phy_ctx,
++ .link = iwl_mvm_mld_roc_link,
+ };
+
+ return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
+@@ -1089,9 +1053,6 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
+ }
+ }
+
+- if (err)
+- goto out_err;
+-
+ err = 0;
+ if (new_links == 0) {
+ mvmvif->link[0] = &mvmvif->deflink;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+index 524852cf5cd2d..1ccbe8c1eeb42 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+@@ -347,7 +347,7 @@ static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm,
+ return -EINVAL;
+
+ if (flush)
+- iwl_mvm_flush_sta(mvm, int_sta, true);
++ iwl_mvm_flush_sta(mvm, int_sta->sta_id, int_sta->tfd_queue_msk);
+
+ iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid);
+
+@@ -705,8 +705,10 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+- if (WARN_ON(!link_conf || !mvm_link_sta))
++ if (WARN_ON(!link_conf || !mvm_link_sta)) {
++ ret = -EINVAL;
+ goto err;
++ }
+
+ ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
+ mvm_link_sta);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index b18c91c5dd5d1..218f3bc31104b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -1658,7 +1658,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
+ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
+ #endif
+ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk);
+-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
++int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask);
+ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids);
+
+ /* Utils to extract sta related data */
+@@ -1942,13 +1942,12 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
+ *
+ * @add_aux_sta_for_hs20: pointer to the function that adds an aux sta
+ * for Hot Spot 2.0
+- * @switch_phy_ctxt: pointer to the function that switches a vif from one
+- * phy_ctx to another
++ * @link: For a P2P Device interface, pointer to a function that links the
++ * MAC/Link to the PHY context
+ */
+ struct iwl_mvm_roc_ops {
+ int (*add_aux_sta_for_hs20)(struct iwl_mvm *mvm, u32 lmac_id);
+- int (*switch_phy_ctxt)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+- struct iwl_mvm_phy_ctxt *new_phy_ctxt);
++ int (*link)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+ };
+
+ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index 3b9a343d4f672..2c231f4623893 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -2059,7 +2059,8 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ *status = IWL_MVM_QUEUE_FREE;
+ }
+
+- if (vif->type == NL80211_IFTYPE_STATION) {
++ if (vif->type == NL80211_IFTYPE_STATION &&
++ mvm_link->ap_sta_id == sta_id) {
+ /* if associated - we can't remove the AP STA now */
+ if (vif->cfg.assoc)
+ return true;
+@@ -2097,7 +2098,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ return ret;
+
+ /* flush its queues here since we are freeing mvm_sta */
+- ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
++ ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id,
++ mvm_sta->tfd_queue_msk);
+ if (ret)
+ return ret;
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+@@ -2408,7 +2410,8 @@ void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
+
+ lockdep_assert_held(&mvm->mutex);
+
+- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true);
++ iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
++ mvmvif->deflink.bcast_sta.tfd_queue_msk);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+@@ -2664,7 +2667,8 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+
+ lockdep_assert_held(&mvm->mutex);
+
+- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true);
++ iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id,
++ mvmvif->deflink.mcast_sta.tfd_queue_msk);
+
+ iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,
+ &mvmvif->deflink.cab_queue, 0);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+index 5f0e7144a951c..158266719ffd7 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+@@ -78,9 +78,29 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
+ */
+
+ if (!WARN_ON(!mvm->p2p_device_vif)) {
+- mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
+- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta,
+- true);
++ struct ieee80211_vif *vif = mvm->p2p_device_vif;
++
++ mvmvif = iwl_mvm_vif_from_mac80211(vif);
++ iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
++ mvmvif->deflink.bcast_sta.tfd_queue_msk);
++
++ if (mvm->mld_api_is_used) {
++ iwl_mvm_mld_rm_bcast_sta(mvm, vif,
++ &vif->bss_conf);
++
++ iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
++ LINK_CONTEXT_MODIFY_ACTIVE,
++ false);
++ } else {
++ iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
++ iwl_mvm_binding_remove_vif(mvm, vif);
++ }
++
++ /* Do not remove the PHY context as removing and adding
++ * a PHY context has timing overheads. Leaving it
++ * configured in FW would be useful in case the next ROC
++ * is with the same channel.
++ */
+ }
+ }
+
+@@ -93,7 +113,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
+ */
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
+ /* do the same in case of hot spot 2.0 */
+- iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
++ iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
++ mvm->aux_sta.tfd_queue_msk);
+
+ if (mvm->mld_api_is_used) {
+ iwl_mvm_mld_rm_aux_sta(mvm);
+@@ -880,8 +901,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
+ /* End TE, notify mac80211 */
+ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
+- ieee80211_remain_on_channel_expired(mvm->hw);
+ iwl_mvm_p2p_roc_finished(mvm);
++ ieee80211_remain_on_channel_expired(mvm->hw);
+ } else if (le32_to_cpu(notif->start)) {
+ if (WARN_ON(mvmvif->time_event_data.id !=
+ le32_to_cpu(notif->conf_id)))
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 898dca3936435..177a4628a913e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -536,16 +536,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+
+ /*
+- * For data packets rate info comes from the fw. Only
+- * set rate/antenna during connection establishment or in case
+- * no station is given.
++ * For data and mgmt packets rate info comes from the fw. Only
++ * set rate/antenna for injected frames with fixed rate, or
++ * when no sta is given.
+ */
+- if (!sta || !ieee80211_is_data(hdr->frame_control) ||
+- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
++ if (unlikely(!sta ||
++ info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags =
+ iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
+ hdr->frame_control);
++ } else if (!ieee80211_is_data(hdr->frame_control) ||
++ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
++ /* These are important frames */
++ flags |= IWL_TX_FLAGS_HIGH_PRI;
+ }
+
+ if (mvm->trans->trans_cfg->device_family >=
+@@ -1599,7 +1603,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+
+ /* we can free until ssn % q.n_bd not inclusive */
+- iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
++ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+@@ -1951,7 +1955,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway).
+ */
+- iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
++ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+@@ -2293,24 +2297,10 @@ free_rsp:
+ return ret;
+ }
+
+-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
++int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask)
+ {
+- u32 sta_id, tfd_queue_msk;
+-
+- if (internal) {
+- struct iwl_mvm_int_sta *int_sta = sta;
+-
+- sta_id = int_sta->sta_id;
+- tfd_queue_msk = int_sta->tfd_queue_msk;
+- } else {
+- struct iwl_mvm_sta *mvm_sta = sta;
+-
+- sta_id = mvm_sta->deflink.sta_id;
+- tfd_queue_msk = mvm_sta->tfd_queue_msk;
+- }
+-
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff);
+
+- return iwl_mvm_flush_tx_path(mvm, tfd_queue_msk);
++ return iwl_mvm_flush_tx_path(mvm, tfd_queue_mask);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index fa46dad5fd680..2ecf6db95fb31 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -161,6 +161,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
++ iwl_pcie_synchronize_irqs(trans);
+ iwl_pcie_rx_napi_sync(trans);
+ iwl_txq_gen2_tx_free(trans);
+ iwl_pcie_rx_stop(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 198933f853c55..583d1011963ec 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1263,6 +1263,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
++ iwl_pcie_synchronize_irqs(trans);
+ iwl_pcie_rx_napi_sync(trans);
+ iwl_pcie_tx_stop(trans);
+ iwl_pcie_rx_stop(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+index 340240b8954f6..ca74b1b63cac1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+@@ -1575,7 +1575,7 @@ void iwl_txq_progress(struct iwl_txq *txq)
+
+ /* Frees buffers until index _not_ inclusive */
+ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+- struct sk_buff_head *skbs)
++ struct sk_buff_head *skbs, bool is_flush)
+ {
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ int tfd_num, read_ptr, last_to_free;
+@@ -1650,9 +1650,11 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ if (iwl_txq_space(trans, txq) > txq->low_mark &&
+ test_bit(txq_id, trans->txqs.queue_stopped)) {
+ struct sk_buff_head overflow_skbs;
++ struct sk_buff *skb;
+
+ __skb_queue_head_init(&overflow_skbs);
+- skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
++ skb_queue_splice_init(&txq->overflow_q,
++ is_flush ? skbs : &overflow_skbs);
+
+ /*
+ * We are going to transmit from the overflow queue.
+@@ -1672,8 +1674,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ */
+ spin_unlock_bh(&txq->lock);
+
+- while (!skb_queue_empty(&overflow_skbs)) {
+- struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
++ while ((skb = __skb_dequeue(&overflow_skbs))) {
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
+
+ dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+index b7d3808588bfb..4c09bc1930fa1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+@@ -179,7 +179,7 @@ void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs);
+ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+- struct sk_buff_head *skbs);
++ struct sk_buff_head *skbs, bool is_flush);
+ void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
+ void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
+ bool freeze);
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index dc8f4e157eb29..6ca7b494c2c26 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -330,9 +330,6 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ if (e->txwi == DMA_DUMMY_DATA)
+ e->txwi = NULL;
+
+- if (e->skb == DMA_DUMMY_DATA)
+- e->skb = NULL;
+-
+ *prev_e = *e;
+ memset(e, 0, sizeof(*e));
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
+index d158320bc15db..dbab400969202 100644
+--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
+@@ -1697,11 +1697,16 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
+ }
+ EXPORT_SYMBOL_GPL(mt76_init_queue);
+
+-u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
++u16 mt76_calculate_default_rate(struct mt76_phy *phy,
++ struct ieee80211_vif *vif, int rateidx)
+ {
++ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
++ struct cfg80211_chan_def *chandef = mvif->ctx ?
++ &mvif->ctx->def :
++ &phy->chandef;
+ int offset = 0;
+
+- if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
++ if (chandef->chan->band != NL80211_BAND_2GHZ)
+ offset = 4;
+
+ /* pick the lowest rate for hidden nodes */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index e8757865a3d06..dae5410d67e83 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -709,6 +709,7 @@ struct mt76_vif {
+ u8 basic_rates_idx;
+ u8 mcast_rates_idx;
+ u8 beacon_rates_idx;
++ struct ieee80211_chanctx_conf *ctx;
+ };
+
+ struct mt76_phy {
+@@ -1100,7 +1101,8 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
+ struct mt76_queue *
+ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
+ int ring_base, u32 flags);
+-u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
++u16 mt76_calculate_default_rate(struct mt76_phy *phy,
++ struct ieee80211_vif *vif, int rateidx);
+ static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
+ int n_desc, int ring_base, u32 flags)
+ {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+index 888678732f290..c223f7c19e6da 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+@@ -9,6 +9,23 @@ struct beacon_bc_data {
+ int count[MT7603_MAX_INTERFACES];
+ };
+
++static void
++mt7603_mac_stuck_beacon_recovery(struct mt7603_dev *dev)
++{
++ if (dev->beacon_check % 5 != 4)
++ return;
++
++ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
++ mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
++ mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
++ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
++
++ mt76_set(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
++ mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
++ mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
++ mt76_clear(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
++}
++
+ static void
+ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ {
+@@ -16,6 +33,8 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct sk_buff *skb = NULL;
++ u32 om_idx = mvif->idx;
++ u32 val;
+
+ if (!(mdev->beacon_mask & BIT(mvif->idx)))
+ return;
+@@ -24,20 +43,33 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ if (!skb)
+ return;
+
+- mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
+- MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
++ if (om_idx)
++ om_idx |= 0x10;
++ val = MT_DMA_FQCR0_BUSY | MT_DMA_FQCR0_MODE |
++ FIELD_PREP(MT_DMA_FQCR0_TARGET_BSS, om_idx) |
++ FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
++ FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8);
+
+ spin_lock_bh(&dev->ps_lock);
+- mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+- FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
+- FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
+- dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) |
+- FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
+- FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
+
+- if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000))
++ mt76_wr(dev, MT_DMA_FQCR0, val |
++ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BCN));
++ if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
+ dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
++ goto out;
++ }
++
++ mt76_wr(dev, MT_DMA_FQCR0, val |
++ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BMC));
++ if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
++ dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
++ goto out;
++ }
+
++ mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
++ MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
++
++out:
+ spin_unlock_bh(&dev->ps_lock);
+ }
+
+@@ -81,6 +113,18 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ data.dev = dev;
+ __skb_queue_head_init(&data.q);
+
++ /* Flush all previous CAB queue packets and beacons */
++ mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
++
++ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
++ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
++
++ if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > 0)
++ dev->beacon_check++;
++ else
++ dev->beacon_check = 0;
++ mt7603_mac_stuck_beacon_recovery(dev);
++
+ q = dev->mphy.q_tx[MT_TXQ_BEACON];
+ spin_lock(&q->lock);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+@@ -89,14 +133,9 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ mt76_queue_kick(dev, q);
+ spin_unlock(&q->lock);
+
+- /* Flush all previous CAB queue packets */
+- mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
+-
+- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
+-
+ mt76_csa_check(mdev);
+ if (mdev->csa_complete)
+- goto out;
++ return;
+
+ q = dev->mphy.q_tx[MT_TXQ_CAB];
+ do {
+@@ -108,7 +147,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ skb_queue_len(&data.q) < 8);
+
+ if (skb_queue_empty(&data.q))
+- goto out;
++ return;
+
+ for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ if (!data.tail[i])
+@@ -136,11 +175,6 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ MT_WF_ARB_CAB_START_BSSn(0) |
+ (MT_WF_ARB_CAB_START_BSS0n(1) *
+ ((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
+-
+-out:
+- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
+- if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask))
+- dev->beacon_check++;
+ }
+
+ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+index 60a996b63c0c0..915b8349146af 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+@@ -42,11 +42,13 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
++ dev->rx_pse_check = 0;
+ mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
++ dev->rx_pse_check = 0;
+ mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index 99ae080502d80..cf21d06257e53 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -1441,15 +1441,6 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+
+ mt7603_beacon_set_timer(dev, -1, 0);
+
+- if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
+- dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
+- dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
+- dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
+- mt7603_pse_reset(dev);
+-
+- if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+- goto skip_dma_reset;
+-
+ mt7603_mac_stop(dev);
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+@@ -1459,28 +1450,32 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+
+ mt7603_irq_disable(dev, mask);
+
+- mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+-
+ mt7603_pse_client_reset(dev);
+
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+
++ mt7603_dma_sched_reset(dev);
++
++ mt76_tx_status_check(&dev->mt76, true);
++
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ mt76_queue_rx_reset(dev, i);
+ }
+
+- mt76_tx_status_check(&dev->mt76, true);
++ if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
++ dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY)
++ mt7603_pse_reset(dev);
+
+- mt7603_dma_sched_reset(dev);
++ if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
++ mt7603_mac_dma_start(dev);
+
+- mt7603_mac_dma_start(dev);
++ mt7603_irq_enable(dev, mask);
+
+- mt7603_irq_enable(dev, mask);
++ clear_bit(MT76_RESET, &dev->mphy.state);
++ }
+
+-skip_dma_reset:
+- clear_bit(MT76_RESET, &dev->mphy.state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+@@ -1570,20 +1565,29 @@ static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
+ {
+ u32 addr, val;
+
+- if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
+- return true;
+-
+ if (mt7603_rx_fifo_busy(dev))
+- return false;
++ goto out;
+
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
+ mt76_wr(dev, addr, 3);
+ val = mt76_rr(dev, addr) >> 16;
+
+- if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
+- return true;
++ if (!(val & BIT(0)))
++ return false;
+
+- return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
++ if (is_mt7628(dev))
++ val &= 0xa000;
++ else
++ val &= 0x8000;
++ if (!val)
++ return false;
++
++out:
++ if (mt76_rr(dev, MT_INT_SOURCE_CSR) &
++ (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1)))
++ return false;
++
++ return true;
+ }
+
+ static bool
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+index a39c9a0fcb1cb..524bceb8e9581 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+@@ -469,6 +469,11 @@ enum {
+ #define MT_WF_SEC_BASE 0x21a00
+ #define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs))
+
++#define MT_WF_CFG_OFF_BASE 0x21e00
++#define MT_WF_CFG_OFF(ofs) (MT_WF_CFG_OFF_BASE + (ofs))
++#define MT_WF_CFG_OFF_WOCCR MT_WF_CFG_OFF(0x004)
++#define MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS BIT(4)
++
+ #define MT_SEC_SCR MT_WF_SEC(0x004)
+ #define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index 8d745c9730c72..955974a82180f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -2147,7 +2147,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
+ };
+
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
++ phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+index 0019890fdb784..fbb1181c58ff3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+@@ -106,7 +106,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ else
+ mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
+
+- tx_info->skb = DMA_DUMMY_DATA;
++ tx_info->skb = NULL;
+
+ return 0;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+index 68ca0844cbbfa..87bfa441a9374 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+@@ -257,6 +257,8 @@ enum tx_mgnt_type {
+ #define MT_TXD7_UDP_TCP_SUM BIT(15)
+ #define MT_TXD7_TX_TIME GENMASK(9, 0)
+
++#define MT_TXD9_WLAN_IDX GENMASK(23, 8)
++
+ #define MT_TX_RATE_STBC BIT(14)
+ #define MT_TX_RATE_NSS GENMASK(13, 10)
+ #define MT_TX_RATE_MODE GENMASK(9, 6)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index ee5177fd6ddea..87479c6c2b505 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -151,23 +151,6 @@ void mt76_connac_tx_complete_skb(struct mt76_dev *mdev,
+ return;
+ }
+
+- /* error path */
+- if (e->skb == DMA_DUMMY_DATA) {
+- struct mt76_connac_txp_common *txp;
+- struct mt76_txwi_cache *t;
+- u16 token;
+-
+- txp = mt76_connac_txwi_to_txp(mdev, e->txwi);
+- if (is_mt76_fw_txp(mdev))
+- token = le16_to_cpu(txp->fw.token);
+- else
+- token = le16_to_cpu(txp->hw.msdu_id[0]) &
+- ~MT_MSDU_ID_VALID;
+-
+- t = mt76_token_put(mdev, token);
+- e->skb = t ? t->skb : NULL;
+- }
+-
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+ }
+@@ -310,7 +293,10 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
+ struct ieee80211_vif *vif,
+ bool beacon, bool mcast)
+ {
+- u8 nss = 0, mode = 0, band = mphy->chandef.chan->band;
++ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
++ struct cfg80211_chan_def *chandef = mvif->ctx ?
++ &mvif->ctx->def : &mphy->chandef;
++ u8 nss = 0, mode = 0, band = chandef->chan->band;
+ int rateidx = 0, mcast_rate;
+
+ if (!vif)
+@@ -343,7 +329,7 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ legacy:
+- rateidx = mt76_calculate_default_rate(mphy, rateidx);
++ rateidx = mt76_calculate_default_rate(mphy, vif, rateidx);
+ mode = rateidx >> 8;
+ rateidx &= GENMASK(7, 0);
+ out:
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 0f0a519f956f8..8274a57e1f0fb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -829,7 +829,9 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ u8 rcpi, u8 sta_state)
+ {
+- struct cfg80211_chan_def *chandef = &mphy->chandef;
++ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
++ struct cfg80211_chan_def *chandef = mvif->ctx ?
++ &mvif->ctx->def : &mphy->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_dev *dev = mphy->dev;
+ struct sta_rec_ra_info *ra_info;
+@@ -1369,7 +1371,10 @@ EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
+ const struct ieee80211_sta_he_cap *
+ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
+ {
+- enum nl80211_band band = phy->chandef.chan->band;
++ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
++ struct cfg80211_chan_def *chandef = mvif->ctx ?
++ &mvif->ctx->def : &phy->chandef;
++ enum nl80211_band band = chandef->chan->band;
+ struct ieee80211_supported_band *sband;
+
+ sband = phy->hw->wiphy->bands[band];
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index b8b0c0fda7522..2222fb9aa103e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -809,7 +809,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
+ else
+ txp->rept_wds_wcid = cpu_to_le16(0x3ff);
+- tx_info->skb = DMA_DUMMY_DATA;
++ tx_info->skb = NULL;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 8ebbf186fab23..d85105a43d704 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -646,11 +646,13 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ mt7915_update_bss_color(hw, vif, &info->he_bss_color);
+
+ if (changed & (BSS_CHANGED_BEACON |
+- BSS_CHANGED_BEACON_ENABLED |
+- BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+- BSS_CHANGED_FILS_DISCOVERY))
++ BSS_CHANGED_BEACON_ENABLED))
+ mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed);
+
++ if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
++ BSS_CHANGED_FILS_DISCOVERY))
++ mt7915_mcu_add_inband_discov(dev, vif, changed);
++
+ if (set_bss_info == 0)
+ mt7915_mcu_add_bss_info(phy, vif, false);
+ if (set_sta == 0)
+@@ -1386,7 +1388,7 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
+ if (sset != ETH_SS_STATS)
+ return;
+
+- memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
++ memcpy(data, mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
+ data += sizeof(mt7915_gstrings_stats);
+ page_pool_ethtool_stats_get_strings(data);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index 50ae7bf3af91c..5d8e985cd7d45 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -1015,13 +1015,13 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool bfee)
+ {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+- int tx_ant = hweight8(phy->mt76->chainmask) - 1;
++ int sts = hweight16(phy->mt76->chainmask);
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return false;
+
+- if (!bfee && tx_ant < 2)
++ if (!bfee && sts < 2)
+ return false;
+
+ if (sta->deflink.he_cap.has_he) {
+@@ -1882,10 +1882,9 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+ }
+
+-static void
+-mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+- struct sk_buff *rskb, struct bss_info_bcn *bcn,
+- u32 changed)
++int
++mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
++ u32 changed)
+ {
+ #define OFFLOAD_TX_MODE_SU BIT(0)
+ #define OFFLOAD_TX_MODE_MU BIT(1)
+@@ -1895,14 +1894,27 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
+ struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
++ struct bss_info_bcn *bcn;
+ struct bss_info_inband_discovery *discov;
+ struct ieee80211_tx_info *info;
+- struct sk_buff *skb = NULL;
+- struct tlv *tlv;
++ struct sk_buff *rskb, *skb = NULL;
++ struct tlv *tlv, *sub_tlv;
+ bool ext_phy = phy != &dev->phy;
+ u8 *buf, interval;
+ int len;
+
++ if (vif->bss_conf.nontransmitted)
++ return 0;
++
++ rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL,
++ MT7915_MAX_BSS_OFFLOAD_SIZE);
++ if (IS_ERR(rskb))
++ return PTR_ERR(rskb);
++
++ tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
++ bcn = (struct bss_info_bcn *)tlv;
++ bcn->enable = true;
++
+ if (changed & BSS_CHANGED_FILS_DISCOVERY &&
+ vif->bss_conf.fils_discovery.max_interval) {
+ interval = vif->bss_conf.fils_discovery.max_interval;
+@@ -1913,27 +1925,29 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
+ skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+ }
+
+- if (!skb)
+- return;
++ if (!skb) {
++ dev_kfree_skb(rskb);
++ return -EINVAL;
++ }
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->band = band;
+-
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy);
+
+ len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
+ len = (len & 0x3) ? ((len | 0x3) + 1) : len;
+
+- if (len > (MT7915_MAX_BSS_OFFLOAD_SIZE - rskb->len)) {
++ if (skb->len > MT7915_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
++ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+- return;
++ return -EINVAL;
+ }
+
+- tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
+- len, &bcn->sub_ntlv, &bcn->len);
+- discov = (struct bss_info_inband_discovery *)tlv;
++ sub_tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
++ len, &bcn->sub_ntlv, &bcn->len);
++ discov = (struct bss_info_inband_discovery *)sub_tlv;
+ discov->tx_mode = OFFLOAD_TX_MODE_SU;
+ /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */
+ discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
+@@ -1941,13 +1955,16 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
+ discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ discov->enable = true;
+
+- buf = (u8 *)tlv + sizeof(*discov);
++ buf = (u8 *)sub_tlv + sizeof(*discov);
+
+ mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL,
+ 0, changed);
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+
+ dev_kfree_skb(skb);
++
++ return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
++ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+ }
+
+ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -1980,11 +1997,14 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ goto out;
+
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+- if (!skb)
++ if (!skb) {
++ dev_kfree_skb(rskb);
+ return -EINVAL;
++ }
+
+- if (skb->len > MT7915_MAX_BEACON_SIZE - MT_TXD_SIZE) {
++ if (skb->len > MT7915_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
++ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1997,11 +2017,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
+ dev_kfree_skb(skb);
+
+- if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
+- changed & BSS_CHANGED_FILS_DISCOVERY)
+- mt7915_mcu_beacon_inband_discov(dev, vif, rskb,
+- bcn, changed);
+-
+ out:
+ return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+@@ -2725,10 +2740,10 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
+ if (mt76_connac_spe_idx(phy->mt76->antenna_mask))
+ req.tx_path_num = fls(phy->mt76->antenna_mask);
+
+- if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
++ if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
++ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
++ phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+index b9ea297f382c3..1592b5d6751a0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+@@ -495,10 +495,14 @@ enum {
+ SER_RECOVER
+ };
+
+-#define MT7915_MAX_BEACON_SIZE 512
+-#define MT7915_MAX_INBAND_FRAME_SIZE 256
+-#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \
+- MT7915_MAX_INBAND_FRAME_SIZE + \
++#define MT7915_MAX_BEACON_SIZE 1308
++#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
++ sizeof(struct bss_info_bcn) + \
++ sizeof(struct bss_info_bcn_cntdwn) + \
++ sizeof(struct bss_info_bcn_mbss) + \
++ MT_TXD_SIZE + \
++ sizeof(struct bss_info_bcn_cont))
++#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \
+ MT7915_BEACON_UPDATE_SIZE)
+
+ #define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+@@ -511,12 +515,6 @@ enum {
+ sizeof(struct bss_info_bmc_rate) +\
+ sizeof(struct bss_info_ext_bss))
+
+-#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
+- sizeof(struct bss_info_bcn_cntdwn) + \
+- sizeof(struct bss_info_bcn_mbss) + \
+- sizeof(struct bss_info_bcn_cont) + \
+- sizeof(struct bss_info_inband_discovery))
+-
+ static inline s8
+ mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower)
+ {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index 0456e56f63480..21984e9723709 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -447,6 +447,8 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
+ bool add);
+ int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *he_bss_color);
++int mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
++ u32 changed);
+ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int enable, u32 changed);
+ int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 0844d28b3223d..d8851cb5f400b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -756,7 +756,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
+- true, mvif->ctx);
++ true, mvif->mt76.ctx);
+
+ ewma_avg_signal_init(&msta->avg_ack_signal);
+
+@@ -791,7 +791,7 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ if (!sta->tdls)
+ mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
+ &mvif->sta.wcid, false,
+- mvif->ctx);
++ mvif->mt76.ctx);
+ }
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+@@ -1208,7 +1208,7 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ mt792x_mutex_acquire(dev);
+
+ err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
+- true, mvif->ctx);
++ true, mvif->mt76.ctx);
+ if (err)
+ goto out;
+
+@@ -1240,7 +1240,7 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ goto out;
+
+ mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false,
+- mvif->ctx);
++ mvif->mt76.ctx);
+
+ out:
+ mt792x_mutex_release(dev);
+@@ -1265,7 +1265,7 @@ static void mt7921_ctx_iter(void *priv, u8 *mac,
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct ieee80211_chanctx_conf *ctx = priv;
+
+- if (ctx != mvif->ctx)
++ if (ctx != mvif->mt76.ctx)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+@@ -1298,7 +1298,7 @@ static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw,
+ jiffies_to_msecs(HZ);
+
+ mt792x_mutex_acquire(dev);
+- mt7921_set_roc(mvif->phy, mvif, mvif->ctx->def.chan, duration,
++ mt7921_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration,
+ MT7921_ROC_REQ_JOIN);
+ mt792x_mutex_release(dev);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+index 3dda84a937175..f04e7095e1810 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -17,6 +17,8 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922),
+ .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
++ { PCI_DEVICE(PCI_VENDOR_ID_ITTIM, 0x7922),
++ .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+index e7a995e7e70a3..c866144ff0613 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+@@ -48,7 +48,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ memset(txp, 0, sizeof(struct mt76_connac_hw_txp));
+ mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
+
+- tx_info->skb = DMA_DUMMY_DATA;
++ tx_info->skb = NULL;
+
+ return 0;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
+index 5d5ab8630041b..6c347495e1185 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
++++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
+@@ -91,7 +91,6 @@ struct mt792x_vif {
+ struct ewma_rssi rssi;
+
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+- struct ieee80211_chanctx_conf *ctx;
+ };
+
+ struct mt792x_phy {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+index 46be7f996c7e1..f111c47fdca56 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+@@ -243,7 +243,7 @@ int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+- mvif->ctx = ctx;
++ mvif->mt76.ctx = ctx;
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+@@ -259,7 +259,7 @@ void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+- mvif->ctx = NULL;
++ mvif->mt76.ctx = NULL;
+ mutex_unlock(&dev->mt76.mutex);
+ }
+ EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx);
+@@ -358,7 +358,7 @@ void mt792x_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ if (sset != ETH_SS_STATS)
+ return;
+
+- memcpy(data, *mt792x_gstrings_stats, sizeof(mt792x_gstrings_stats));
++ memcpy(data, mt792x_gstrings_stats, sizeof(mt792x_gstrings_stats));
+
+ data += sizeof(mt792x_gstrings_stats);
+ page_pool_ethtool_stats_get_strings(data);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+index 26e03b28935f2..66d8cc0eeabee 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+@@ -733,16 +733,17 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
+
++ val = max_t(u8, sts - 1, 3);
+ eht_cap_elem->phy_cap_info[0] |=
+- u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
++ u8_encode_bits(u8_get_bits(val, BIT(0)),
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[1] =
+- u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
++ u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)),
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
+- u8_encode_bits(sts - 1,
++ u8_encode_bits(val,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) |
+- u8_encode_bits(sts - 1,
++ u8_encode_bits(val,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] =
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index ac8759febe485..c43839a205088 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -433,7 +433,9 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
++ /* rxv reports bw 320-1 and 320-2 separately */
+ case IEEE80211_STA_RX_BW_320:
++ case IEEE80211_STA_RX_BW_320 + 1:
+ status->bw = RATE_INFO_BW_320;
+ break;
+ default:
+@@ -991,11 +993,9 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ }
+
+ txp->fw.token = cpu_to_le16(id);
+- if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
+- txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx);
+- else
+- txp->fw.rept_wds_wcid = cpu_to_le16(0xfff);
+- tx_info->skb = DMA_DUMMY_DATA;
++ txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
++
++ tx_info->skb = NULL;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+@@ -1051,7 +1051,7 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7996_tx_check_aggr(sta, txwi);
+ } else {
+- wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
++ wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
+ }
+
+ __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+index c3a479dc3f533..620880e560e00 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+@@ -190,7 +190,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
+ mvif->mt76.omac_idx = idx;
+ mvif->phy = phy;
+ mvif->mt76.band_idx = band_idx;
+- mvif->mt76.wmm_idx = band_idx;
++ mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+
+ ret = mt7996_mcu_add_dev_info(phy, vif, true);
+ if (ret)
+@@ -414,10 +414,16 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const struct ieee80211_tx_queue_params *params)
+ {
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
++ const u8 mq_to_aci[] = {
++ [IEEE80211_AC_VO] = 3,
++ [IEEE80211_AC_VI] = 2,
++ [IEEE80211_AC_BE] = 0,
++ [IEEE80211_AC_BK] = 1,
++ };
+
++ /* firmware uses access class index */
++ mvif->queue_params[mq_to_aci[queue]] = *params;
+ /* no need to update right away, we'll get BSS_CHANGED_QOS */
+- queue = mt76_connac_lmac_mapping(queue);
+- mvif->queue_params[queue] = *params;
+
+ return 0;
+ }
+@@ -618,8 +624,8 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
+ mt7996_mcu_add_beacon(hw, vif, info->enable_beacon);
+ }
+
+- if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
+- changed & BSS_CHANGED_FILS_DISCOVERY)
++ if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
++ BSS_CHANGED_FILS_DISCOVERY))
+ mt7996_mcu_beacon_inband_discov(dev, vif, changed);
+
+ if (changed & BSS_CHANGED_MU_GROUPS)
+@@ -1192,7 +1198,7 @@ void mt7996_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *mt7996_gstrings_stats,
++ memcpy(data, mt7996_gstrings_stats,
+ sizeof(mt7996_gstrings_stats));
+ }
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index 4a30db49ef33f..7575d3506ea4e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -2016,7 +2016,7 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ bcn->bcc_ie_pos = cpu_to_le16(offset - 3);
+ }
+
+- buf = (u8 *)bcn + sizeof(*bcn) - MAX_BEACON_SIZE;
++ buf = (u8 *)bcn + sizeof(*bcn);
+ mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0,
+ BSS_CHANGED_BEACON);
+
+@@ -2034,26 +2034,22 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
+ struct sk_buff *skb, *rskb;
+ struct tlv *tlv;
+ struct bss_bcn_content_tlv *bcn;
++ int len;
+
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+- MT7996_BEACON_UPDATE_SIZE);
++ MT7996_MAX_BSS_OFFLOAD_SIZE);
+ if (IS_ERR(rskb))
+ return PTR_ERR(rskb);
+
+- tlv = mt7996_mcu_add_uni_tlv(rskb,
+- UNI_BSS_INFO_BCN_CONTENT, sizeof(*bcn));
+- bcn = (struct bss_bcn_content_tlv *)tlv;
+- bcn->enable = en;
+-
+- if (!en)
+- goto out;
+-
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+- if (!skb)
++ if (!skb) {
++ dev_kfree_skb(rskb);
+ return -EINVAL;
++ }
+
+- if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) {
++ if (skb->len > MT7996_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
++ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -2061,11 +2057,18 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
+
++ len = sizeof(*bcn) + MT_TXD_SIZE + skb->len;
++ tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
++ bcn = (struct bss_bcn_content_tlv *)tlv;
++ bcn->enable = en;
++ if (!en)
++ goto out;
++
+ mt7996_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
+ /* TODO: subtag - 11v MBSSID */
+ mt7996_mcu_beacon_cntdwn(vif, rskb, skb, &offs);
+- dev_kfree_skb(skb);
+ out:
++ dev_kfree_skb(skb);
+ return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+ }
+@@ -2086,9 +2089,13 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ struct sk_buff *rskb, *skb = NULL;
+ struct tlv *tlv;
+ u8 *buf, interval;
++ int len;
++
++ if (vif->bss_conf.nontransmitted)
++ return 0;
+
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+- MT7996_INBAND_FRAME_SIZE);
++ MT7996_MAX_BSS_OFFLOAD_SIZE);
+ if (IS_ERR(rskb))
+ return PTR_ERR(rskb);
+
+@@ -2102,11 +2109,14 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+ }
+
+- if (!skb)
++ if (!skb) {
++ dev_kfree_skb(rskb);
+ return -EINVAL;
++ }
+
+- if (skb->len > MAX_INBAND_FRAME_SIZE - MT_TXD_SIZE) {
++ if (skb->len > MT7996_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
++ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -2116,7 +2126,9 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ info->band = band;
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
+
+- tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, sizeof(*discov));
++ len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
++
++ tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len);
+
+ discov = (struct bss_inband_discovery_tlv *)tlv;
+ discov->tx_mode = OFFLOAD_TX_MODE_SU;
+@@ -2127,7 +2139,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ discov->enable = true;
+ discov->wcid = cpu_to_le16(MT7996_WTBL_RESERVED);
+
+- buf = (u8 *)tlv + sizeof(*discov) - MAX_INBAND_FRAME_SIZE;
++ buf = (u8 *)tlv + sizeof(*discov);
+
+ mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0, changed);
+
+@@ -2679,7 +2691,7 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
+
+ e = (struct edca *)tlv;
+ e->set = WMM_PARAM_SET;
+- e->queue = ac + mvif->mt76.wmm_idx * MT7996_MAX_WMM_SETS;
++ e->queue = ac;
+ e->aifs = q->aifs;
+ e->txop = cpu_to_le16(q->txop);
+
+@@ -2960,10 +2972,10 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
+ .channel_band = ch_band[chandef->chan->band],
+ };
+
+- if (tag == UNI_CHANNEL_RX_PATH ||
+- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
++ if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
++ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
++ phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
+@@ -3307,8 +3319,8 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action)
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en));
+ req_mod_en = (struct bf_mod_en_ctrl *)tlv;
+- req_mod_en->bf_num = 2;
+- req_mod_en->bf_bitmap = GENMASK(0, 0);
++ req_mod_en->bf_num = 3;
++ req_mod_en->bf_bitmap = GENMASK(2, 0);
+ break;
+ }
+ default:
+@@ -3548,7 +3560,9 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ int cmd)
+ {
+ struct {
+- u8 _rsv[4];
++ /* fixed field */
++ u8 bss;
++ u8 _rsv[3];
+
+ __le16 tag;
+ __le16 len;
+@@ -3566,7 +3580,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ u8 exponent;
+ u8 is_ap;
+ u8 agrt_params;
+- u8 __rsv2[135];
++ u8 __rsv2[23];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_CMD_TWT_ARGT_UPDATE),
+ .len = cpu_to_le16(sizeof(req) - 4),
+@@ -3576,6 +3590,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ .flowid = flow->id,
+ .peer_id = cpu_to_le16(flow->wcid),
+ .duration = flow->duration,
++ .bss = mvif->mt76.idx,
+ .bss_idx = mvif->mt76.idx,
+ .start_tsf = cpu_to_le64(flow->tsf),
+ .mantissa = flow->mantissa,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+index 078f828586212..e4b31228ba0d2 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+@@ -270,8 +270,6 @@ struct bss_inband_discovery_tlv {
+ u8 enable;
+ __le16 wcid;
+ __le16 prob_rsp_len;
+-#define MAX_INBAND_FRAME_SIZE 512
+- u8 pkt[MAX_INBAND_FRAME_SIZE];
+ } __packed;
+
+ struct bss_bcn_content_tlv {
+@@ -283,8 +281,6 @@ struct bss_bcn_content_tlv {
+ u8 enable;
+ u8 type;
+ __le16 pkt_len;
+-#define MAX_BEACON_SIZE 512
+- u8 pkt[MAX_BEACON_SIZE];
+ } __packed;
+
+ struct bss_bcn_cntdwn_tlv {
+@@ -591,13 +587,14 @@ enum {
+ sizeof(struct sta_rec_hdr_trans) + \
+ sizeof(struct tlv))
+
++#define MT7996_MAX_BEACON_SIZE 1342
+ #define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \
+ sizeof(struct bss_bcn_content_tlv) + \
++ MT_TXD_SIZE + \
+ sizeof(struct bss_bcn_cntdwn_tlv) + \
+ sizeof(struct bss_bcn_mbss_tlv))
+-
+-#define MT7996_INBAND_FRAME_SIZE (sizeof(struct bss_req_hdr) + \
+- sizeof(struct bss_inband_discovery_tlv))
++#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
++ MT7996_BEACON_UPDATE_SIZE)
+
+ enum {
+ UNI_BAND_CONFIG_RADIO_ENABLE,
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
+index 58bbf50081e47..9eb115c79c90a 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
+@@ -1492,7 +1492,7 @@ int wilc_wlan_init(struct net_device *dev)
+ }
+
+ if (!wilc->vmm_table)
+- wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
++ wilc->vmm_table = kcalloc(WILC_VMM_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+
+ if (!wilc->vmm_table) {
+ ret = -ENOBUFS;
+diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
+index 94ee831b5de35..506d2f31efb5a 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
++++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
+@@ -666,7 +666,7 @@ static void plfxlc_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *et_strings, sizeof(et_strings));
++ memcpy(data, et_strings, sizeof(et_strings));
+ }
+
+ static void plfxlc_get_et_stats(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+index 6f61d6a106272..5a34894a533be 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+@@ -799,7 +799,7 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ }
+
+ if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ bt_change_edca = true;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+index 0b6a15c2e5ccd..d92aad60edfe9 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+@@ -640,7 +640,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ }
+
+ if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ bt_change_edca = true;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+index 8ada31380efa4..0ff8e355c23a4 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+@@ -466,7 +466,7 @@ static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ }
+
+ if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ bt_change_edca = true;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
+index f8ba133baff06..35bc37a3c469d 100644
+--- a/drivers/net/wireless/realtek/rtw88/debug.c
++++ b/drivers/net/wireless/realtek/rtw88/debug.c
+@@ -1233,9 +1233,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
+ #define rtw_debugfs_add_core(name, mode, fopname, parent) \
+ do { \
+ rtw_debug_priv_ ##name.rtwdev = rtwdev; \
+- if (!debugfs_create_file(#name, mode, \
++ if (IS_ERR(debugfs_create_file(#name, mode, \
+ parent, &rtw_debug_priv_ ##name,\
+- &file_ops_ ##fopname)) \
++ &file_ops_ ##fopname))) \
+ pr_debug("Unable to initialize debugfs:%s\n", \
+ #name); \
+ } while (0)
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index d879d7e3dc81f..e6ab1ac6d7093 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -611,8 +611,7 @@ static void rtw_usb_cancel_rx_bufs(struct rtw_usb *rtwusb)
+
+ for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
+ rxcb = &rtwusb->rx_cb[i];
+- if (rxcb->rx_urb)
+- usb_kill_urb(rxcb->rx_urb);
++ usb_kill_urb(rxcb->rx_urb);
+ }
+ }
+
+@@ -623,10 +622,8 @@ static void rtw_usb_free_rx_bufs(struct rtw_usb *rtwusb)
+
+ for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
+ rxcb = &rtwusb->rx_cb[i];
+- if (rxcb->rx_urb) {
+- usb_kill_urb(rxcb->rx_urb);
+- usb_free_urb(rxcb->rx_urb);
+- }
++ usb_kill_urb(rxcb->rx_urb);
++ usb_free_urb(rxcb->rx_urb);
+ }
+ }
+
+diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c
+index 6a5e52a96d183..caa22226b01bc 100644
+--- a/drivers/net/wireless/silabs/wfx/data_tx.c
++++ b/drivers/net/wireless/silabs/wfx/data_tx.c
+@@ -226,53 +226,40 @@ static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+
+ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
+ {
+- int i;
+- bool finished;
++ bool has_rate0 = false;
++ int i, j;
+
+- /* Firmware is not able to mix rates with different flags */
+- for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+- if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+- rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
+- if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
++ for (i = 1, j = 1; j < IEEE80211_TX_MAX_RATES; j++) {
++ if (rates[j].idx == -1)
++ break;
++ /* The device use the rates in descending order, whatever the request from minstrel.
++ * We have to trade off here. Most important is to respect the primary rate
++ * requested by minstrel. So, we drops the entries with rate higher than the
++ * previous.
++ */
++ if (rates[j].idx >= rates[i - 1].idx) {
++ rates[i - 1].count += rates[j].count;
++ rates[i - 1].count = min_t(u16, 15, rates[i - 1].count);
++ } else {
++ memcpy(rates + i, rates + j, sizeof(rates[i]));
++ if (rates[i].idx == 0)
++ has_rate0 = true;
++ /* The device apply Short GI only on the first rate */
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+- if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
+- rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
+- }
+-
+- /* Sort rates and remove duplicates */
+- do {
+- finished = true;
+- for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
+- if (rates[i + 1].idx == rates[i].idx &&
+- rates[i].idx != -1) {
+- rates[i].count += rates[i + 1].count;
+- if (rates[i].count > 15)
+- rates[i].count = 15;
+- rates[i + 1].idx = -1;
+- rates[i + 1].count = 0;
+-
+- finished = false;
+- }
+- if (rates[i + 1].idx > rates[i].idx) {
+- swap(rates[i + 1], rates[i]);
+- finished = false;
+- }
++ i++;
+ }
+- } while (!finished);
++ }
+ /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
+- for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+- if (rates[i].idx == 0)
+- break;
+- if (rates[i].idx == -1) {
+- rates[i].idx = 0;
+- rates[i].count = 8; /* == hw->max_rate_tries */
+- rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
+- break;
+- }
++ if (!has_rate0 && i < IEEE80211_TX_MAX_RATES) {
++ rates[i].idx = 0;
++ rates[i].count = 8; /* == hw->max_rate_tries */
++ rates[i].flags = rates[0].flags & IEEE80211_TX_RC_MCS;
++ i++;
++ }
++ for (; i < IEEE80211_TX_MAX_RATES; i++) {
++ memset(rates + i, 0, sizeof(rates[i]));
++ rates[i].idx = -1;
+ }
+- /* All retries use long GI */
+- for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+- rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+ }
+
+ static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
+diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
+index 1f524030b186e..f5a0880da3fcc 100644
+--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
++++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
+@@ -3170,7 +3170,7 @@ static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *mac80211_hwsim_gstrings_stats,
++ memcpy(data, mac80211_hwsim_gstrings_stats,
+ sizeof(mac80211_hwsim_gstrings_stats));
+ }
+
+diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
+index 1b9f5b8a6167e..d3fca0ab62900 100644
+--- a/drivers/nvdimm/of_pmem.c
++++ b/drivers/nvdimm/of_pmem.c
+@@ -30,7 +30,13 @@ static int of_pmem_region_probe(struct platform_device *pdev)
+ if (!priv)
+ return -ENOMEM;
+
+- priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
++ priv->bus_desc.provider_name = devm_kstrdup(&pdev->dev, pdev->name,
++ GFP_KERNEL);
++ if (!priv->bus_desc.provider_name) {
++ kfree(priv);
++ return -ENOMEM;
++ }
++
+ priv->bus_desc.module = THIS_MODULE;
+ priv->bus_desc.of_node = np;
+
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index 0a81f87f6f6c0..e2f1fb99707fc 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -939,7 +939,8 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
+ {
+ unsigned int cpu, lane;
+
+- cpu = get_cpu();
++ migrate_disable();
++ cpu = smp_processor_id();
+ if (nd_region->num_lanes < nr_cpu_ids) {
+ struct nd_percpu_lane *ndl_lock, *ndl_count;
+
+@@ -958,16 +959,15 @@ EXPORT_SYMBOL(nd_region_acquire_lane);
+ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
+ {
+ if (nd_region->num_lanes < nr_cpu_ids) {
+- unsigned int cpu = get_cpu();
++ unsigned int cpu = smp_processor_id();
+ struct nd_percpu_lane *ndl_lock, *ndl_count;
+
+ ndl_count = per_cpu_ptr(nd_region->lane, cpu);
+ ndl_lock = per_cpu_ptr(nd_region->lane, lane);
+ if (--ndl_count->count == 0)
+ spin_unlock(&ndl_lock->lock);
+- put_cpu();
+ }
+- put_cpu();
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(nd_region_release_lane);
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 21783aa2ee8e1..c09048984a277 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2026,6 +2026,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ if (ret)
+ return ret;
+
++ if (id->ncap == 0) {
++ /* namespace not allocated or attached */
++ info->is_removed = true;
++ ret = -ENODEV;
++ goto error;
++ }
++
+ blk_mq_freeze_queue(ns->disk->queue);
+ lbaf = nvme_lbaf_index(id->flbas);
+ ns->lba_shift = id->lbaf[lbaf].ds;
+@@ -2083,6 +2090,8 @@ out:
+ set_bit(NVME_NS_READY, &ns->flags);
+ ret = 0;
+ }
++
++error:
+ kfree(id);
+ return ret;
+ }
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 8175d49f29090..92ba315cfe19e 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -645,8 +645,10 @@ static const match_table_t opt_tokens = {
+ { NVMF_OPT_TOS, "tos=%d" },
+ { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
+ { NVMF_OPT_DISCOVERY, "discovery" },
++#ifdef CONFIG_NVME_HOST_AUTH
+ { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
+ { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
++#endif
+ { NVMF_OPT_ERR, NULL }
+ };
+
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index 747c879e8982b..529b9954d2b8c 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -510,10 +510,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+ req->bio = pdu->bio;
+- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
++ if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
+ pdu->nvme_status = -EINTR;
+- else
++ } else {
+ pdu->nvme_status = nvme_req(req)->status;
++ if (!pdu->nvme_status)
++ pdu->nvme_status = blk_status_to_errno(err);
++ }
+ pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+
+ /*
+diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
+index 43b5bd8bb6a52..d8da840a1c0ed 100644
+--- a/drivers/nvme/target/fabrics-cmd.c
++++ b/drivers/nvme/target/fabrics-cmd.c
+@@ -244,6 +244,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
+ goto out;
+ }
+
++ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
++ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
+ le32_to_cpu(c->kato), &ctrl);
+ if (status)
+@@ -313,6 +315,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
+ goto out;
+ }
+
++ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
++ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
+ le16_to_cpu(d->cntlid), req);
+ if (!ctrl) {
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index e692809ff8227..3219c51777507 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -100,6 +100,32 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
+ return IORESOURCE_MEM;
+ }
+
++static u64 of_bus_default_flags_map(__be32 *addr, const __be32 *range, int na,
++ int ns, int pna)
++{
++ u64 cp, s, da;
++
++ /* Check that flags match */
++ if (*addr != *range)
++ return OF_BAD_ADDR;
++
++ /* Read address values, skipping high cell */
++ cp = of_read_number(range + 1, na - 1);
++ s = of_read_number(range + na + pna, ns);
++ da = of_read_number(addr + 1, na - 1);
++
++ pr_debug("default flags map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
++
++ if (da < cp || da >= (cp + s))
++ return OF_BAD_ADDR;
++ return da - cp;
++}
++
++static int of_bus_default_flags_translate(__be32 *addr, u64 offset, int na)
++{
++ /* Keep "flags" part (high cell) in translated address */
++ return of_bus_default_translate(addr + 1, offset, na - 1);
++}
+
+ #ifdef CONFIG_PCI
+ static unsigned int of_bus_pci_get_flags(const __be32 *addr)
+@@ -374,8 +400,8 @@ static struct of_bus of_busses[] = {
+ .addresses = "reg",
+ .match = of_bus_default_flags_match,
+ .count_cells = of_bus_default_count_cells,
+- .map = of_bus_default_map,
+- .translate = of_bus_default_translate,
++ .map = of_bus_default_flags_map,
++ .translate = of_bus_default_flags_translate,
+ .has_flags = true,
+ .get_flags = of_bus_default_flags_get_flags,
+ },
+diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
+index 6f5e5f0230d39..332bcc0053a5e 100644
+--- a/drivers/parisc/power.c
++++ b/drivers/parisc/power.c
+@@ -197,6 +197,14 @@ static struct notifier_block parisc_panic_block = {
+ .priority = INT_MAX,
+ };
+
++/* qemu soft power-off function */
++static int qemu_power_off(struct sys_off_data *data)
++{
++ /* this turns the system off via SeaBIOS */
++ gsc_writel(0, (unsigned long) data->cb_data);
++ pdc_soft_power_button(1);
++ return NOTIFY_DONE;
++}
+
+ static int __init power_init(void)
+ {
+@@ -226,7 +234,13 @@ static int __init power_init(void)
+ soft_power_reg);
+ }
+
+- power_task = kthread_run(kpowerswd, (void*)soft_power_reg, KTHREAD_NAME);
++ power_task = NULL;
++ if (running_on_qemu && soft_power_reg)
++ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT,
++ qemu_power_off, (void *)soft_power_reg);
++ else
++ power_task = kthread_run(kpowerswd, (void*)soft_power_reg,
++ KTHREAD_NAME);
+ if (IS_ERR(power_task)) {
+ printk(KERN_ERR DRIVER_NAME ": thread creation failed. Driver not loaded.\n");
+ pdc_soft_power_button(0);
+diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
+index 6319082301d68..c6bede3469320 100644
+--- a/drivers/pci/controller/dwc/pci-exynos.c
++++ b/drivers/pci/controller/dwc/pci-exynos.c
+@@ -375,7 +375,7 @@ fail_probe:
+ return ret;
+ }
+
+-static int __exit exynos_pcie_remove(struct platform_device *pdev)
++static int exynos_pcie_remove(struct platform_device *pdev)
+ {
+ struct exynos_pcie *ep = platform_get_drvdata(pdev);
+
+@@ -431,7 +431,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
+
+ static struct platform_driver exynos_pcie_driver = {
+ .probe = exynos_pcie_probe,
+- .remove = __exit_p(exynos_pcie_remove),
++ .remove = exynos_pcie_remove,
+ .driver = {
+ .name = "exynos-pcie",
+ .of_match_table = exynos_pcie_of_match,
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 49aea6ce3e878..0def919f89faf 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -1100,7 +1100,7 @@ static const struct of_device_id ks_pcie_of_match[] = {
+ { },
+ };
+
+-static int __init ks_pcie_probe(struct platform_device *pdev)
++static int ks_pcie_probe(struct platform_device *pdev)
+ {
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+@@ -1302,7 +1302,7 @@ err_link:
+ return ret;
+ }
+
+-static int __exit ks_pcie_remove(struct platform_device *pdev)
++static int ks_pcie_remove(struct platform_device *pdev)
+ {
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ struct device_link **link = ks_pcie->link;
+@@ -1318,9 +1318,9 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct platform_driver ks_pcie_driver __refdata = {
++static struct platform_driver ks_pcie_driver = {
+ .probe = ks_pcie_probe,
+- .remove = __exit_p(ks_pcie_remove),
++ .remove = ks_pcie_remove,
+ .driver = {
+ .name = "keystone-pcie",
+ .of_match_table = ks_pcie_of_match,
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 1c1c7348972b0..2b60d20dfdf59 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -732,6 +732,53 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+
+ }
+
++static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
++{
++ u32 lnkcap, lwsc, plc;
++ u8 cap;
++
++ if (!num_lanes)
++ return;
++
++ /* Set the number of lanes */
++ plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
++ plc &= ~PORT_LINK_FAST_LINK_MODE;
++ plc &= ~PORT_LINK_MODE_MASK;
++
++ /* Set link width speed control register */
++ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
++ lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
++ switch (num_lanes) {
++ case 1:
++ plc |= PORT_LINK_MODE_1_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
++ break;
++ case 2:
++ plc |= PORT_LINK_MODE_2_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
++ break;
++ case 4:
++ plc |= PORT_LINK_MODE_4_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
++ break;
++ case 8:
++ plc |= PORT_LINK_MODE_8_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
++ break;
++ default:
++ dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
++ return;
++ }
++ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
++ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
++
++ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
++ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
++ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
++ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
++ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
++}
++
+ void dw_pcie_iatu_detect(struct dw_pcie *pci)
+ {
+ int max_region, ob, ib;
+@@ -1013,49 +1060,5 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+- if (!pci->num_lanes) {
+- dev_dbg(pci->dev, "Using h/w default number of lanes\n");
+- return;
+- }
+-
+- /* Set the number of lanes */
+- val &= ~PORT_LINK_FAST_LINK_MODE;
+- val &= ~PORT_LINK_MODE_MASK;
+- switch (pci->num_lanes) {
+- case 1:
+- val |= PORT_LINK_MODE_1_LANES;
+- break;
+- case 2:
+- val |= PORT_LINK_MODE_2_LANES;
+- break;
+- case 4:
+- val |= PORT_LINK_MODE_4_LANES;
+- break;
+- case 8:
+- val |= PORT_LINK_MODE_8_LANES;
+- break;
+- default:
+- dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
+- return;
+- }
+- dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+-
+- /* Set link width speed control register */
+- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+- val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+- switch (pci->num_lanes) {
+- case 1:
+- val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+- break;
+- case 2:
+- val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+- break;
+- case 4:
+- val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+- break;
+- case 8:
+- val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+- break;
+- }
+- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
++ dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
+index d93bc29069502..2ee146767971c 100644
+--- a/drivers/pci/controller/dwc/pcie-kirin.c
++++ b/drivers/pci/controller/dwc/pcie-kirin.c
+@@ -741,7 +741,7 @@ err:
+ return ret;
+ }
+
+-static int __exit kirin_pcie_remove(struct platform_device *pdev)
++static int kirin_pcie_remove(struct platform_device *pdev)
+ {
+ struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
+
+@@ -818,7 +818,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
+
+ static struct platform_driver kirin_pcie_driver = {
+ .probe = kirin_pcie_probe,
+- .remove = __exit_p(kirin_pcie_remove),
++ .remove = kirin_pcie_remove,
+ .driver = {
+ .name = "kirin-pcie",
+ .of_match_table = kirin_pcie_match,
+diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+index 8bd8107690a6c..9b62ee6992f0e 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
++++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+@@ -123,6 +123,7 @@
+
+ /* ELBI registers */
+ #define ELBI_SYS_STTS 0x08
++#define ELBI_CS2_ENABLE 0xa4
+
+ /* DBI registers */
+ #define DBI_CON_STATUS 0x44
+@@ -263,6 +264,21 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+ disable_irq(pcie_ep->perst_irq);
+ }
+
++static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
++ u32 reg, size_t size, u32 val)
++{
++ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
++ int ret;
++
++ writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
++
++ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
++ if (ret)
++ dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
++
++ writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
++}
++
+ static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep)
+ {
+ struct dw_pcie *pci = &pcie_ep->pci;
+@@ -519,6 +535,7 @@ static const struct dw_pcie_ops pci_ops = {
+ .link_up = qcom_pcie_dw_link_up,
+ .start_link = qcom_pcie_dw_start_link,
+ .stop_link = qcom_pcie_dw_stop_link,
++ .write_dbi2 = qcom_pcie_dw_write_dbi2,
+ };
+
+ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 4bba31502ce1d..248cd9347e8fd 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -9,6 +9,7 @@
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/debugfs.h>
+ #include <linux/delay.h>
+@@ -346,8 +347,7 @@ static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
+ */
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+ if (val & PCI_EXP_LNKSTA_LBMS) {
+- current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
+- PCI_EXP_LNKSTA_NLW_SHIFT;
++ current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+ if (pcie->init_link_width > current_link_width) {
+ dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+@@ -760,8 +760,7 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+- pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
+- PCI_EXP_LNKSTA_NLW_SHIFT;
++ pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL);
+@@ -920,7 +919,7 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
+ /* Configure Max lane width from DT */
+ val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_MLW;
+- val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
++ val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
+ dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
+index 60810a1fbfb75..29fe09c99e7d9 100644
+--- a/drivers/pci/controller/pci-mvebu.c
++++ b/drivers/pci/controller/pci-mvebu.c
+@@ -264,7 +264,7 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+ */
+ lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+- lnkcap |= (port->is_x4 ? 4 : 1) << 4;
++ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
+ mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+
+ /* Disable Root Bridge I/O space, memory space and bus mastering. */
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index ad56df98b8e63..1c1c1aa940a51 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -525,8 +525,7 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
+ base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ PCI_DEVFN(dev, 0), 0);
+
+- hdr_type = readb(base + PCI_HEADER_TYPE) &
+- PCI_HEADER_TYPE_MASK;
++ hdr_type = readb(base + PCI_HEADER_TYPE);
+
+ functions = (hdr_type & 0x80) ? 8 : 1;
+ for (fn = 0; fn < functions; fn++) {
+diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
+index 5a4a8b0be6262..a7d3a92391a41 100644
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -869,7 +869,6 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+
+ put_dev:
+ put_device(&epc->dev);
+- kfree(epc);
+
+ err_ret:
+ return ERR_PTR(ret);
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index a05350a4e49cb..05b7357bd2586 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -911,7 +911,7 @@ pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
+ {
+ int acpi_state, d_max;
+
+- if (pdev->no_d3cold)
++ if (pdev->no_d3cold || !pdev->d3cold_allowed)
+ d_max = ACPI_STATE_D3_HOT;
+ else
+ d_max = ACPI_STATE_D3_COLD;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index d9eede2dbc0e1..3317b93547167 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -12,7 +12,7 @@
+ * Modeled after usb's driverfs.c
+ */
+
+-
++#include <linux/bitfield.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/pci.h>
+@@ -230,8 +230,7 @@ static ssize_t current_link_width_show(struct device *dev,
+ if (err)
+ return -EINVAL;
+
+- return sysfs_emit(buf, "%u\n",
+- (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
++ return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
+ }
+ static DEVICE_ATTR_RO(current_link_width);
+
+@@ -530,10 +529,7 @@ static ssize_t d3cold_allowed_store(struct device *dev,
+ return -EINVAL;
+
+ pdev->d3cold_allowed = !!val;
+- if (pdev->d3cold_allowed)
+- pci_d3cold_enable(pdev);
+- else
+- pci_d3cold_disable(pdev);
++ pci_bridge_d3_update(pdev);
+
+ pm_runtime_resume(dev);
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 59c01d68c6d5e..a607f277ccf10 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -732,15 +732,18 @@ u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
+ {
+ u16 vsec = 0;
+ u32 header;
++ int ret;
+
+ if (vendor != dev->vendor)
+ return 0;
+
+ while ((vsec = pci_find_next_ext_capability(dev, vsec,
+ PCI_EXT_CAP_ID_VNDR))) {
+- if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
+- &header) == PCIBIOS_SUCCESSFUL &&
+- PCI_VNDR_HEADER_ID(header) == cap)
++ ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
++ if (ret != PCIBIOS_SUCCESSFUL)
++ continue;
++
++ if (PCI_VNDR_HEADER_ID(header) == cap)
+ return vsec;
+ }
+
+@@ -3752,14 +3755,14 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
+ return 0;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
+- cap &= PCI_REBAR_CAP_SIZES;
++ cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
+
+ /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+ if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+- bar == 0 && cap == 0x7000)
+- cap = 0x3f000;
++ bar == 0 && cap == 0x700)
++ return 0x3f00;
+
+- return cap >> 4;
++ return cap;
+ }
+ EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
+
+@@ -6257,8 +6260,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+
+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+- next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+- PCI_EXP_LNKSTA_NLW_SHIFT;
++ next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
+
+ next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
+
+@@ -6330,7 +6332,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap)
+- return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
++ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+
+ return PCIE_LNK_WIDTH_UNKNOWN;
+ }
+diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
+index 9c8fd69ae5ad8..40d84cb0c601e 100644
+--- a/drivers/pci/pcie/aer.c
++++ b/drivers/pci/pcie/aer.c
+@@ -29,6 +29,7 @@
+ #include <linux/kfifo.h>
+ #include <linux/slab.h>
+ #include <acpi/apei.h>
++#include <acpi/ghes.h>
+ #include <ras/ras_event.h>
+
+ #include "../pci.h"
+@@ -997,6 +998,15 @@ static void aer_recover_work_func(struct work_struct *work)
+ continue;
+ }
+ cper_print_aer(pdev, entry.severity, entry.regs);
++ /*
++ * Memory for aer_capability_regs(entry.regs) is being allocated from the
++ * ghes_estatus_pool to protect it from overwriting when multiple sections
++ * are present in the error status. Thus free the same after processing
++ * the data.
++ */
++ ghes_estatus_pool_region_free((unsigned long)entry.regs,
++ sizeof(struct aer_capability_regs));
++
+ if (entry.severity == AER_NONFATAL)
+ pcie_do_recovery(pdev, pci_channel_io_normal,
+ aer_root_reset);
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 1bf6300592644..fc18e42f0a6ed 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -1059,7 +1059,8 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+ if (state & PCIE_LINK_STATE_L0S)
+ link->aspm_disable |= ASPM_STATE_L0S;
+ if (state & PCIE_LINK_STATE_L1)
+- link->aspm_disable |= ASPM_STATE_L1;
++ /* L1 PM substates require L1 */
++ link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
+ if (state & PCIE_LINK_STATE_L1_1)
+ link->aspm_disable |= ASPM_STATE_L1_1;
+ if (state & PCIE_LINK_STATE_L1_2)
+@@ -1247,6 +1248,8 @@ static ssize_t aspm_attr_store_common(struct device *dev,
+ link->aspm_disable &= ~ASPM_STATE_L1;
+ } else {
+ link->aspm_disable |= state;
++ if (state & ASPM_STATE_L1)
++ link->aspm_disable |= ASPM_STATE_L1SS;
+ }
+
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 795534589b985..43159965e09e9 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1652,15 +1652,15 @@ static void pci_set_removable(struct pci_dev *dev)
+ static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
+ {
+ #ifdef CONFIG_PCI_QUIRKS
+- int pos;
++ int pos, ret;
+ u32 header, tmp;
+
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
+
+ for (pos = PCI_CFG_SPACE_SIZE;
+ pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
+- if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
+- || header != tmp)
++ ret = pci_read_config_dword(dev, pos, &tmp);
++ if ((ret != PCIBIOS_SUCCESSFUL) || (header != tmp))
+ return false;
+ }
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index eeec1d6f90238..ae95d09507722 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -690,7 +690,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
+ /*
+ * In the AMD NL platform, this device ([1022:7912]) has a class code of
+ * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
+- * claim it.
++ * claim it. The same applies on the VanGogh platform device ([1022:163a]).
+ *
+ * But the dwc3 driver is a more specific driver for this device, and we'd
+ * prefer to use it instead of xhci. To prevent xhci from claiming the
+@@ -698,7 +698,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
+ * defines as "USB device (not host controller)". The dwc3 driver can then
+ * claim it based on its Vendor and Device ID.
+ */
+-static void quirk_amd_nl_class(struct pci_dev *pdev)
++static void quirk_amd_dwc_class(struct pci_dev *pdev)
+ {
+ u32 class = pdev->class;
+
+@@ -708,7 +708,9 @@ static void quirk_amd_nl_class(struct pci_dev *pdev)
+ class, pdev->class);
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
+- quirk_amd_nl_class);
++ quirk_amd_dwc_class);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VANGOGH_USB,
++ quirk_amd_dwc_class);
+
+ /*
+ * Synopsys USB 3.x host HAPS platform has a class code of
+@@ -5383,7 +5385,7 @@ int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+ */
+ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+ {
+- int pos, i = 0;
++ int pos, i = 0, ret;
+ u8 next_cap;
+ u16 reg16, *cap;
+ struct pci_cap_saved_state *state;
+@@ -5429,8 +5431,8 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+ pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+
+ pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
+- if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
+- PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
++ ret = pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status);
++ if ((ret != PCIBIOS_SUCCESSFUL) || (PCI_POSSIBLE_ERROR(status)))
+ pdev->cfg_size = PCI_CFG_SPACE_SIZE;
+
+ if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
+@@ -5507,6 +5509,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
+
+ #ifdef CONFIG_PCI_ATS
++static void quirk_no_ats(struct pci_dev *pdev)
++{
++ pci_info(pdev, "disabling ATS\n");
++ pdev->ats_cap = 0;
++}
++
+ /*
+ * Some devices require additional driver setup to enable ATS. Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+@@ -5520,14 +5528,10 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
+ (pdev->subsystem_device == 0xce19 ||
+ pdev->subsystem_device == 0xcc10 ||
+ pdev->subsystem_device == 0xcc08))
+- goto no_ats;
+- else
+- return;
++ quirk_no_ats(pdev);
++ } else {
++ quirk_no_ats(pdev);
+ }
+-
+-no_ats:
+- pci_info(pdev, "disabling ATS\n");
+- pdev->ats_cap = 0;
+ }
+
+ /* AMD Stoney platform GPU */
+@@ -5550,6 +5554,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
+ /* AMD Raven platform iGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
++
++/*
++ * Intel IPU E2000 revisions before C0 implement incorrect endianness
++ * in ATS Invalidate Request message body. Disable ATS for those devices.
++ */
++static void quirk_intel_e2000_no_ats(struct pci_dev *pdev)
++{
++ if (pdev->revision < 0x20)
++ quirk_no_ats(pdev);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1451, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1452, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1453, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1454, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1455, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1457, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1459, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145a, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145c, quirk_intel_e2000_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+
+ /* Freescale PCIe doesn't support MSI in RC mode */
+@@ -6188,3 +6211,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5020, of_pci_make_dev_node);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5021, of_pci_make_dev_node);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REDHAT, 0x0005, of_pci_make_dev_node);
++
++/*
++ * Devices known to require a longer delay before first config space access
++ * after reset recovery or resume from D3cold:
++ *
++ * VideoPropulsion (aka Genroco) Torrent QN16e MPEG QAM Modulator
++ */
++static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
++{
++ pdev->d3cold_delay = 1000;
++}
++DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
+diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
+index 5658745c398f5..b33be1e63c98f 100644
+--- a/drivers/pcmcia/cs.c
++++ b/drivers/pcmcia/cs.c
+@@ -605,6 +605,7 @@ static int pccardd(void *__skt)
+ dev_warn(&skt->dev, "PCMCIA: unable to register socket\n");
+ skt->thread = NULL;
+ complete(&skt->thread_done);
++ put_device(&skt->dev);
+ return 0;
+ }
+ ret = pccard_sysfs_add_socket(&skt->dev);
+diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
+index d500e5dbbc3f5..b4b8363d1de21 100644
+--- a/drivers/pcmcia/ds.c
++++ b/drivers/pcmcia/ds.c
+@@ -513,9 +513,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+ /* by default don't allow DMA */
+ p_dev->dma_mask = 0;
+ p_dev->dev.dma_mask = &p_dev->dma_mask;
+- dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
+- if (!dev_name(&p_dev->dev))
+- goto err_free;
+ p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
+ if (!p_dev->devname)
+ goto err_free;
+@@ -573,8 +570,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+
+ pcmcia_device_query(p_dev);
+
+- if (device_register(&p_dev->dev))
+- goto err_unreg;
++ dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
++ if (device_register(&p_dev->dev)) {
++ mutex_lock(&s->ops_mutex);
++ list_del(&p_dev->socket_device_list);
++ s->device_count--;
++ mutex_unlock(&s->ops_mutex);
++ put_device(&p_dev->dev);
++ return NULL;
++ }
+
+ return p_dev;
+
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 6b50bc5519846..caae2d3e9d3ea 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -112,7 +112,9 @@
+
+ #define CMN_DTM_PMEVCNTSR 0x240
+
+-#define CMN_DTM_UNIT_INFO 0x0910
++#define CMN650_DTM_UNIT_INFO 0x0910
++#define CMN_DTM_UNIT_INFO 0x0960
++#define CMN_DTM_UNIT_INFO_DTC_DOMAIN GENMASK_ULL(1, 0)
+
+ #define CMN_DTM_NUM_COUNTERS 4
+ /* Want more local counters? Why not replicate the whole DTM! Ugh... */
+@@ -2117,6 +2119,16 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+ return 0;
+ }
+
++static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region)
++{
++ int offset = CMN_DTM_UNIT_INFO;
++
++ if (cmn->part == PART_CMN650 || cmn->part == PART_CI700)
++ offset = CMN650_DTM_UNIT_INFO;
++
++ return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset));
++}
++
+ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
+ {
+ int level;
+@@ -2248,7 +2260,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ if (cmn->part == PART_CMN600)
+ xp->dtc = 0xf;
+ else
+- xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
++ xp->dtc = 1 << arm_cmn_dtc_domain(cmn, xp_region);
+
+ xp->dtm = dtm - cmn->dtms;
+ arm_cmn_init_dtm(dtm++, xp, 0);
+diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
+index e2b7827c45635..9363c31f31b89 100644
+--- a/drivers/perf/arm_cspmu/arm_cspmu.c
++++ b/drivers/perf/arm_cspmu/arm_cspmu.c
+@@ -635,6 +635,9 @@ static int arm_cspmu_event_init(struct perf_event *event)
+
+ cspmu = to_arm_cspmu(event->pmu);
+
++ if (event->attr.type != event->pmu->type)
++ return -ENOENT;
++
+ /*
+ * Following other "uncore" PMUs, we do not support sampling mode or
+ * attach to a task (per-process mode).
+diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
+index 8fcaa26f0f8a6..d681638ec6b82 100644
+--- a/drivers/perf/arm_pmuv3.c
++++ b/drivers/perf/arm_pmuv3.c
+@@ -428,12 +428,12 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
+ #define ARMV8_IDX_TO_COUNTER(x) \
+ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
+
+-static inline u32 armv8pmu_pmcr_read(void)
++static inline u64 armv8pmu_pmcr_read(void)
+ {
+ return read_pmcr();
+ }
+
+-static inline void armv8pmu_pmcr_write(u32 val)
++static inline void armv8pmu_pmcr_write(u64 val)
+ {
+ val &= ARMV8_PMU_PMCR_MASK;
+ isb();
+@@ -957,7 +957,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
+ static void armv8pmu_reset(void *info)
+ {
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
+- u32 pmcr;
++ u64 pmcr;
+
+ /* The counter and interrupt enable registers are unknown at reset. */
+ armv8pmu_disable_counter(U32_MAX);
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index 5a00adb2de8c9..051efffc44c82 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -353,6 +353,10 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
++ /* Check the type first before going on, otherwise it's not our event */
++ if (event->attr.type != event->pmu->type)
++ return -ENOENT;
++
+ event->cpu = pcie_pmu->on_cpu;
+
+ if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
+@@ -360,9 +364,6 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+ else
+ hwc->event_base = HISI_PCIE_CNT;
+
+- if (event->attr.type != event->pmu->type)
+- return -ENOENT;
+-
+ /* Sampling is not supported. */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+index d941e746b4248..797cf201996a9 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+@@ -505,8 +505,8 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
+ ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+- &pa_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
++ &pa_pmu->node);
+ return ret;
+ }
+
+diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+index 6fe534a665eda..e706ca5676764 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+@@ -450,8 +450,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
+ ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+- &sllc_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
++ &sllc_pmu->node);
+ return ret;
+ }
+
+diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
+index e0457d84af6b3..16869bf5bf4cc 100644
+--- a/drivers/perf/hisilicon/hns3_pmu.c
++++ b/drivers/perf/hisilicon/hns3_pmu.c
+@@ -1556,8 +1556,8 @@ static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+ ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
+ if (ret) {
+ pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+- &hns3_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
++ &hns3_pmu->node);
+ }
+
+ return ret;
+@@ -1568,8 +1568,8 @@ static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
+ struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
+
+ perf_pmu_unregister(&hns3_pmu->pmu);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+- &hns3_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
++ &hns3_pmu->node);
+ }
+
+ static int hns3_pmu_init_dev(struct pci_dev *pdev)
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index 96c7f670c8f0d..cd8a2b9efd787 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -543,8 +543,7 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
+
+ if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
+ (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
+- on_each_cpu_mask(mm_cpumask(event->owner->mm),
+- pmu_sbi_set_scounteren, (void *)event, 1);
++ pmu_sbi_set_scounteren((void *)event);
+ }
+
+ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
+@@ -554,8 +553,7 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
+
+ if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
+ (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
+- on_each_cpu_mask(mm_cpumask(event->owner->mm),
+- pmu_sbi_reset_scounteren, (void *)event, 1);
++ pmu_sbi_reset_scounteren((void *)event);
+
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
+ if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
+@@ -689,6 +687,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
+
+ /* Firmware counter don't support overflow yet */
+ fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
++ if (fidx == RISCV_MAX_COUNTERS) {
++ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
++ return IRQ_NONE;
++ }
++
+ event = cpu_hw_evt->events[fidx];
+ if (!event) {
+ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
+index d1670bbe6d6bc..e4502958fd62d 100644
+--- a/drivers/phy/Kconfig
++++ b/drivers/phy/Kconfig
+@@ -87,7 +87,6 @@ source "drivers/phy/motorola/Kconfig"
+ source "drivers/phy/mscc/Kconfig"
+ source "drivers/phy/qualcomm/Kconfig"
+ source "drivers/phy/ralink/Kconfig"
+-source "drivers/phy/realtek/Kconfig"
+ source "drivers/phy/renesas/Kconfig"
+ source "drivers/phy/rockchip/Kconfig"
+ source "drivers/phy/samsung/Kconfig"
+diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
+index 868a220ed0f6d..fb3dc9de61115 100644
+--- a/drivers/phy/Makefile
++++ b/drivers/phy/Makefile
+@@ -26,7 +26,6 @@ obj-y += allwinner/ \
+ mscc/ \
+ qualcomm/ \
+ ralink/ \
+- realtek/ \
+ renesas/ \
+ rockchip/ \
+ samsung/ \
+diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+index 52c275fbb2a1c..d4fb85c20eb0f 100644
+--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
++++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+@@ -24,23 +24,73 @@
+ #define EUSB2_FORCE_VAL_5 0xeD
+ #define V_CLK_19P2M_EN BIT(6)
+
++#define EUSB2_TUNE_USB2_CROSSOVER 0x50
+ #define EUSB2_TUNE_IUSB2 0x51
++#define EUSB2_TUNE_RES_FSDIF 0x52
++#define EUSB2_TUNE_HSDISC 0x53
+ #define EUSB2_TUNE_SQUELCH_U 0x54
++#define EUSB2_TUNE_USB2_SLEW 0x55
++#define EUSB2_TUNE_USB2_EQU 0x56
+ #define EUSB2_TUNE_USB2_PREEM 0x57
++#define EUSB2_TUNE_USB2_HS_COMP_CUR 0x58
++#define EUSB2_TUNE_EUSB_SLEW 0x59
++#define EUSB2_TUNE_EUSB_EQU 0x5A
++#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
+
+-#define QCOM_EUSB2_REPEATER_INIT_CFG(o, v) \
++#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v) \
+ { \
+- .offset = o, \
++ .reg = r, \
+ .val = v, \
+ }
+
+-struct eusb2_repeater_init_tbl {
+- unsigned int offset;
+- unsigned int val;
++enum reg_fields {
++ F_TUNE_EUSB_HS_COMP_CUR,
++ F_TUNE_EUSB_EQU,
++ F_TUNE_EUSB_SLEW,
++ F_TUNE_USB2_HS_COMP_CUR,
++ F_TUNE_USB2_PREEM,
++ F_TUNE_USB2_EQU,
++ F_TUNE_USB2_SLEW,
++ F_TUNE_SQUELCH_U,
++ F_TUNE_HSDISC,
++ F_TUNE_RES_FSDIF,
++ F_TUNE_IUSB2,
++ F_TUNE_USB2_CROSSOVER,
++ F_NUM_TUNE_FIELDS,
++
++ F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
++ F_FORCE_EN_5,
++
++ F_EN_CTL1,
++
++ F_RPTR_STATUS,
++ F_NUM_FIELDS,
++};
++
++static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
++ [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
++ [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
++ [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
++ [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
++ [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
++ [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
++ [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
++ [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
++ [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
++ [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
++ [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
++ [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
++
++ [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
++ [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
++
++ [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
++
++ [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
+ };
+
+ struct eusb2_repeater_cfg {
+- const struct eusb2_repeater_init_tbl *init_tbl;
++ const u32 *init_tbl;
+ int init_tbl_num;
+ const char * const *vreg_list;
+ int num_vregs;
+@@ -48,11 +98,10 @@ struct eusb2_repeater_cfg {
+
+ struct eusb2_repeater {
+ struct device *dev;
+- struct regmap *regmap;
++ struct regmap_field *regs[F_NUM_FIELDS];
+ struct phy *phy;
+ struct regulator_bulk_data *vregs;
+ const struct eusb2_repeater_cfg *cfg;
+- u16 base;
+ enum phy_mode mode;
+ };
+
+@@ -60,10 +109,10 @@ static const char * const pm8550b_vreg_l[] = {
+ "vdd18", "vdd3",
+ };
+
+-static const struct eusb2_repeater_init_tbl pm8550b_init_tbl[] = {
+- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_IUSB2, 0x8),
+- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_SQUELCH_U, 0x3),
+- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_USB2_PREEM, 0x5),
++static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
++ [F_TUNE_IUSB2] = 0x8,
++ [F_TUNE_SQUELCH_U] = 0x3,
++ [F_TUNE_USB2_PREEM] = 0x5,
+ };
+
+ static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
+@@ -91,9 +140,9 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
+
+ static int eusb2_repeater_init(struct phy *phy)
+ {
++ struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
+ struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+- const struct eusb2_repeater_init_tbl *init_tbl = rptr->cfg->init_tbl;
+- int num = rptr->cfg->init_tbl_num;
++ const u32 *init_tbl = rptr->cfg->init_tbl;
+ u32 val;
+ int ret;
+ int i;
+@@ -102,17 +151,21 @@ static int eusb2_repeater_init(struct phy *phy)
+ if (ret)
+ return ret;
+
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_EN_CTL1,
+- EUSB2_RPTR_EN, EUSB2_RPTR_EN);
++ regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+
+- for (i = 0; i < num; i++)
+- regmap_update_bits(rptr->regmap,
+- rptr->base + init_tbl[i].offset,
+- init_tbl[i].val, init_tbl[i].val);
++ for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
++ if (init_tbl[i]) {
++ regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
++ } else {
++ /* Write 0 if there's no value set */
++ u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
++
++ regmap_field_update_bits(rptr->regs[i], mask, 0);
++ }
++ }
+
+- ret = regmap_read_poll_timeout(rptr->regmap,
+- rptr->base + EUSB2_RPTR_STATUS, val,
+- val & RPTR_OK, 10, 5);
++ ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
++ val, val & RPTR_OK, 10, 5);
+ if (ret)
+ dev_err(rptr->dev, "initialization timed-out\n");
+
+@@ -131,10 +184,10 @@ static int eusb2_repeater_set_mode(struct phy *phy,
+ * per eUSB 1.2 Spec. Below implement software workaround until
+ * PHY and controller is fixing seen observation.
+ */
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
+- F_CLK_19P2M_EN, F_CLK_19P2M_EN);
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
+- V_CLK_19P2M_EN, V_CLK_19P2M_EN);
++ regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
++ F_CLK_19P2M_EN, F_CLK_19P2M_EN);
++ regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
++ V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+ break;
+ case PHY_MODE_USB_DEVICE:
+ /*
+@@ -143,10 +196,10 @@ static int eusb2_repeater_set_mode(struct phy *phy,
+ * repeater doesn't clear previous value due to shared
+ * regulators (say host <-> device mode switch).
+ */
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
+- F_CLK_19P2M_EN, 0);
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
+- V_CLK_19P2M_EN, 0);
++ regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
++ F_CLK_19P2M_EN, 0);
++ regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
++ V_CLK_19P2M_EN, 0);
+ break;
+ default:
+ return -EINVAL;
+@@ -175,8 +228,9 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct device_node *np = dev->of_node;
++ struct regmap *regmap;
++ int i, ret;
+ u32 res;
+- int ret;
+
+ rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
+ if (!rptr)
+@@ -189,15 +243,22 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
+ if (!rptr->cfg)
+ return -EINVAL;
+
+- rptr->regmap = dev_get_regmap(dev->parent, NULL);
+- if (!rptr->regmap)
++ regmap = dev_get_regmap(dev->parent, NULL);
++ if (!regmap)
+ return -ENODEV;
+
+ ret = of_property_read_u32(np, "reg", &res);
+ if (ret < 0)
+ return ret;
+
+- rptr->base = res;
++ for (i = 0; i < F_NUM_FIELDS; i++)
++ eusb2_repeater_tune_reg_fields[i].reg += res;
++
++ ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
++ eusb2_repeater_tune_reg_fields,
++ F_NUM_FIELDS);
++ if (ret)
++ return ret;
+
+ ret = eusb2_repeater_init_vregs(rptr);
+ if (ret < 0) {
+diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
+deleted file mode 100644
+index 75ac7e7c31aec..0000000000000
+--- a/drivers/phy/realtek/Kconfig
++++ /dev/null
+@@ -1,32 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0
+-#
+-# Phy drivers for Realtek platforms
+-#
+-
+-if ARCH_REALTEK || COMPILE_TEST
+-
+-config PHY_RTK_RTD_USB2PHY
+- tristate "Realtek RTD USB2 PHY Transceiver Driver"
+- depends on USB_SUPPORT
+- select GENERIC_PHY
+- select USB_PHY
+- select USB_COMMON
+- help
+- Enable this to support Realtek SoC USB2 phy transceiver.
+- The DHC (digital home center) RTD series SoCs used the Synopsys
+- DWC3 USB IP. This driver will do the PHY initialization
+- of the parameters.
+-
+-config PHY_RTK_RTD_USB3PHY
+- tristate "Realtek RTD USB3 PHY Transceiver Driver"
+- depends on USB_SUPPORT
+- select GENERIC_PHY
+- select USB_PHY
+- select USB_COMMON
+- help
+- Enable this to support Realtek SoC USB3 phy transceiver.
+- The DHC (digital home center) RTD series SoCs used the Synopsys
+- DWC3 USB IP. This driver will do the PHY initialization
+- of the parameters.
+-
+-endif # ARCH_REALTEK || COMPILE_TEST
+diff --git a/drivers/phy/realtek/Makefile b/drivers/phy/realtek/Makefile
+deleted file mode 100644
+index ed7b47ff8a268..0000000000000
+--- a/drivers/phy/realtek/Makefile
++++ /dev/null
+@@ -1,3 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0
+-obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o
+-obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o
+diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
+deleted file mode 100644
+index aedc78bd37f73..0000000000000
+--- a/drivers/phy/realtek/phy-rtk-usb2.c
++++ /dev/null
+@@ -1,1325 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * phy-rtk-usb2.c RTK usb2.0 PHY driver
+- *
+- * Copyright (C) 2023 Realtek Semiconductor Corporation
+- *
+- */
+-
+-#include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/of_device.h>
+-#include <linux/of_address.h>
+-#include <linux/uaccess.h>
+-#include <linux/debugfs.h>
+-#include <linux/nvmem-consumer.h>
+-#include <linux/regmap.h>
+-#include <linux/sys_soc.h>
+-#include <linux/mfd/syscon.h>
+-#include <linux/phy/phy.h>
+-#include <linux/usb.h>
+-#include <linux/usb/phy.h>
+-#include <linux/usb/hcd.h>
+-
+-/* GUSB2PHYACCn register */
+-#define PHY_NEW_REG_REQ BIT(25)
+-#define PHY_VSTS_BUSY BIT(23)
+-#define PHY_VCTRL_SHIFT 8
+-#define PHY_REG_DATA_MASK 0xff
+-
+-#define GET_LOW_NIBBLE(addr) ((addr) & 0x0f)
+-#define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4)
+-
+-#define EFUS_USB_DC_CAL_RATE 2
+-#define EFUS_USB_DC_CAL_MAX 7
+-
+-#define EFUS_USB_DC_DIS_RATE 1
+-#define EFUS_USB_DC_DIS_MAX 7
+-
+-#define MAX_PHY_DATA_SIZE 20
+-#define OFFEST_PHY_READ 0x20
+-
+-#define MAX_USB_PHY_NUM 4
+-#define MAX_USB_PHY_PAGE0_DATA_SIZE 16
+-#define MAX_USB_PHY_PAGE1_DATA_SIZE 16
+-#define MAX_USB_PHY_PAGE2_DATA_SIZE 8
+-
+-#define SET_PAGE_OFFSET 0xf4
+-#define SET_PAGE_0 0x9b
+-#define SET_PAGE_1 0xbb
+-#define SET_PAGE_2 0xdb
+-
+-#define PAGE_START 0xe0
+-#define PAGE0_0XE4 0xe4
+-#define PAGE0_0XE6 0xe6
+-#define PAGE0_0XE7 0xe7
+-#define PAGE1_0XE0 0xe0
+-#define PAGE1_0XE2 0xe2
+-
+-#define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6))
+-#define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2)
+-#define DEFAULT_DC_DRIVING_VALUE (0x8)
+-#define DEFAULT_DC_DISCONNECTION_VALUE (0x6)
+-#define HS_CLK_SELECT BIT(6)
+-
+-struct phy_reg {
+- void __iomem *reg_wrap_vstatus;
+- void __iomem *reg_gusb2phyacc0;
+- int vstatus_index;
+-};
+-
+-struct phy_data {
+- u8 addr;
+- u8 data;
+-};
+-
+-struct phy_cfg {
+- int page0_size;
+- struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE];
+- int page1_size;
+- struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE];
+- int page2_size;
+- struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE];
+-
+- int num_phy;
+-
+- bool check_efuse;
+- int check_efuse_version;
+-#define CHECK_EFUSE_V1 1
+-#define CHECK_EFUSE_V2 2
+- int efuse_dc_driving_rate;
+- int efuse_dc_disconnect_rate;
+- int dc_driving_mask;
+- int dc_disconnect_mask;
+- bool usb_dc_disconnect_at_page0;
+- int driving_updated_for_dev_dis;
+-
+- bool do_toggle;
+- bool do_toggle_driving;
+- bool use_default_parameter;
+- bool is_double_sensitivity_mode;
+-};
+-
+-struct phy_parameter {
+- struct phy_reg phy_reg;
+-
+- /* Get from efuse */
+- s8 efuse_usb_dc_cal;
+- s8 efuse_usb_dc_dis;
+-
+- /* Get from dts */
+- bool inverse_hstx_sync_clock;
+- u32 driving_level;
+- s32 driving_level_compensate;
+- s32 disconnection_compensate;
+-};
+-
+-struct rtk_phy {
+- struct usb_phy phy;
+- struct device *dev;
+-
+- struct phy_cfg *phy_cfg;
+- int num_phy;
+- struct phy_parameter *phy_parameter;
+-
+- struct dentry *debug_dir;
+-};
+-
+-/* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */
+-static inline int page_addr_to_array_index(u8 addr)
+-{
+- return (int)((((addr) - PAGE_START) & 0x7) +
+- ((((addr) - PAGE_START) & 0x10) >> 1));
+-}
+-
+-static inline u8 array_index_to_page_addr(int index)
+-{
+- return ((((index) + PAGE_START) & 0x7) +
+- ((((index) & 0x8) << 1) + PAGE_START));
+-}
+-
+-#define PHY_IO_TIMEOUT_USEC (50000)
+-#define PHY_IO_DELAY_US (100)
+-
+-static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+-{
+- int ret;
+- unsigned int val;
+-
+- ret = read_poll_timeout(readl, val, ((val & mask) == result),
+- PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+- if (ret) {
+- pr_err("%s can't program USB phy\n", __func__);
+- return -ETIMEDOUT;
+- }
+-
+- return 0;
+-}
+-
+-static char rtk_phy_read(struct phy_reg *phy_reg, char addr)
+-{
+- void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+- unsigned int val;
+- int ret = 0;
+-
+- addr -= OFFEST_PHY_READ;
+-
+- /* polling until VBusy == 0 */
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return (char)ret;
+-
+- /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */
+- val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+- writel(val, reg_gusb2phyacc0);
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return (char)ret;
+-
+- /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */
+- val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+- writel(val, reg_gusb2phyacc0);
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return (char)ret;
+-
+- val = readl(reg_gusb2phyacc0);
+-
+- return (char)(val & PHY_REG_DATA_MASK);
+-}
+-
+-static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data)
+-{
+- unsigned int val;
+- void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus;
+- void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+- int shift_bits = phy_reg->vstatus_index * 8;
+- int ret = 0;
+-
+- /* write data to VStatusOut2 (data output to phy) */
+- writel((u32)data << shift_bits, reg_wrap_vstatus);
+-
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return ret;
+-
+- /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */
+- val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+-
+- writel(val, reg_gusb2phyacc0);
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return ret;
+-
+- /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */
+- val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+-
+- writel(val, reg_gusb2phyacc0);
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return ret;
+-
+- return 0;
+-}
+-
+-static int rtk_phy_set_page(struct phy_reg *phy_reg, int page)
+-{
+- switch (page) {
+- case 0:
+- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0);
+- case 1:
+- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1);
+- case 2:
+- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2);
+- default:
+- pr_err("%s error page=%d\n", __func__, page);
+- }
+-
+- return -EINVAL;
+-}
+-
+-static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg,
+- struct phy_parameter *phy_parameter, u8 data)
+-{
+- u8 ret;
+- s32 val;
+- s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+- int offset = 4;
+-
+- val = (s32)((data >> offset) & dc_disconnect_mask)
+- + phy_parameter->efuse_usb_dc_dis
+- + phy_parameter->disconnection_compensate;
+-
+- if (val > dc_disconnect_mask)
+- val = dc_disconnect_mask;
+- else if (val < 0)
+- val = 0;
+-
+- ret = (data & (~(dc_disconnect_mask << offset))) |
+- (val & dc_disconnect_mask) << offset;
+-
+- return ret;
+-}
+-
+-/* updated disconnect level at page0 */
+-static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter, bool update)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+- struct phy_data *phy_data_page;
+- struct phy_data *phy_data;
+- u8 addr, data;
+- int offset = 4;
+- s32 dc_disconnect_mask;
+- int i;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_reg = &phy_parameter->phy_reg;
+-
+- /* Set page 0 */
+- phy_data_page = phy_cfg->page0;
+- rtk_phy_set_page(phy_reg, 0);
+-
+- i = page_addr_to_array_index(PAGE0_0XE4);
+- phy_data = phy_data_page + i;
+- if (!phy_data->addr) {
+- phy_data->addr = PAGE0_0XE4;
+- phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+- }
+-
+- addr = phy_data->addr;
+- data = phy_data->data;
+- dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+-
+- if (update)
+- data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data);
+- else
+- data = (data & ~(dc_disconnect_mask << offset)) |
+- (DEFAULT_DC_DISCONNECTION_VALUE << offset);
+-
+- if (rtk_phy_write(phy_reg, addr, data))
+- dev_err(rtk_phy->dev,
+- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+- __func__, addr, data);
+-}
+-
+-static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg,
+- struct phy_parameter *phy_parameter, u8 data)
+-{
+- u8 ret;
+- s32 val;
+- s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+-
+- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+- val = (s32)(data & dc_disconnect_mask)
+- + phy_parameter->efuse_usb_dc_dis
+- + phy_parameter->disconnection_compensate;
+- } else { /* for CHECK_EFUSE_V2 or no efuse */
+- if (phy_parameter->efuse_usb_dc_dis)
+- val = (s32)(phy_parameter->efuse_usb_dc_dis +
+- phy_parameter->disconnection_compensate);
+- else
+- val = (s32)((data & dc_disconnect_mask) +
+- phy_parameter->disconnection_compensate);
+- }
+-
+- if (val > dc_disconnect_mask)
+- val = dc_disconnect_mask;
+- else if (val < 0)
+- val = 0;
+-
+- ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask);
+-
+- return ret;
+-}
+-
+-/* updated disconnect level at page1 */
+-static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter, bool update)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_data *phy_data_page;
+- struct phy_data *phy_data;
+- struct phy_reg *phy_reg;
+- u8 addr, data;
+- s32 dc_disconnect_mask;
+- int i;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_reg = &phy_parameter->phy_reg;
+-
+- /* Set page 1 */
+- phy_data_page = phy_cfg->page1;
+- rtk_phy_set_page(phy_reg, 1);
+-
+- i = page_addr_to_array_index(PAGE1_0XE2);
+- phy_data = phy_data_page + i;
+- if (!phy_data->addr) {
+- phy_data->addr = PAGE1_0XE2;
+- phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2);
+- }
+-
+- addr = phy_data->addr;
+- data = phy_data->data;
+- dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+-
+- if (update)
+- data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data);
+- else
+- data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE;
+-
+- if (rtk_phy_write(phy_reg, addr, data))
+- dev_err(rtk_phy->dev,
+- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+- __func__, addr, data);
+-}
+-
+-static void update_dc_disconnect_level(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter, bool update)
+-{
+- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+-
+- if (phy_cfg->usb_dc_disconnect_at_page0)
+- update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update);
+- else
+- update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update);
+-}
+-
+-static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg,
+- struct phy_parameter *phy_parameter, u8 data)
+-{
+- s32 driving_level_compensate = phy_parameter->driving_level_compensate;
+- s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+- s32 val;
+- u8 ret;
+-
+- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+- val = (s32)(data & dc_driving_mask) + driving_level_compensate
+- + phy_parameter->efuse_usb_dc_cal;
+- } else { /* for CHECK_EFUSE_V2 or no efuse */
+- if (phy_parameter->efuse_usb_dc_cal)
+- val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask)
+- + driving_level_compensate);
+- else
+- val = (s32)(data & dc_driving_mask);
+- }
+-
+- if (val > dc_driving_mask)
+- val = dc_driving_mask;
+- else if (val < 0)
+- val = 0;
+-
+- ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask);
+-
+- return ret;
+-}
+-
+-static void update_dc_driving_level(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+-
+- phy_reg = &phy_parameter->phy_reg;
+- phy_cfg = rtk_phy->phy_cfg;
+- if (!phy_cfg->page0[4].addr) {
+- rtk_phy_set_page(phy_reg, 0);
+- phy_cfg->page0[4].addr = PAGE0_0XE4;
+- phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+- }
+-
+- if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) {
+- u32 dc_driving_mask;
+- u8 driving_level;
+- u8 data;
+-
+- data = phy_cfg->page0[4].data;
+- dc_driving_mask = phy_cfg->dc_driving_mask;
+- driving_level = data & dc_driving_mask;
+-
+- dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n",
+- __func__, driving_level, phy_parameter->driving_level);
+-
+- phy_cfg->page0[4].data = (data & (~dc_driving_mask)) |
+- (phy_parameter->driving_level & dc_driving_mask);
+- }
+-
+- phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg,
+- phy_parameter,
+- phy_cfg->page0[4].data);
+-}
+-
+-static void update_hs_clk_select(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_reg = &phy_parameter->phy_reg;
+-
+- if (phy_parameter->inverse_hstx_sync_clock) {
+- if (!phy_cfg->page0[6].addr) {
+- rtk_phy_set_page(phy_reg, 0);
+- phy_cfg->page0[6].addr = PAGE0_0XE6;
+- phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6);
+- }
+-
+- phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT;
+- }
+-}
+-
+-static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy,
+- int index, bool connect)
+-{
+- struct phy_parameter *phy_parameter;
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+- struct phy_data *phy_data_page;
+- u8 addr, data;
+- int i;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- if (!phy_cfg->do_toggle)
+- goto out;
+-
+- if (phy_cfg->is_double_sensitivity_mode)
+- goto do_toggle_driving;
+-
+- /* Set page 0 */
+- rtk_phy_set_page(phy_reg, 0);
+-
+- addr = PAGE0_0XE7;
+- data = rtk_phy_read(phy_reg, addr);
+-
+- if (connect)
+- rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL));
+- else
+- rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL));
+-
+-do_toggle_driving:
+-
+- if (!phy_cfg->do_toggle_driving)
+- goto do_toggle;
+-
+- /* Page 0 addr 0xE4 driving capability */
+-
+- /* Set page 0 */
+- phy_data_page = phy_cfg->page0;
+- rtk_phy_set_page(phy_reg, 0);
+-
+- i = page_addr_to_array_index(PAGE0_0XE4);
+- addr = phy_data_page[i].addr;
+- data = phy_data_page[i].data;
+-
+- if (connect) {
+- rtk_phy_write(phy_reg, addr, data);
+- } else {
+- u8 value;
+- s32 tmp;
+- s32 driving_updated =
+- phy_cfg->driving_updated_for_dev_dis;
+- s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+-
+- tmp = (s32)(data & dc_driving_mask) + driving_updated;
+-
+- if (tmp > dc_driving_mask)
+- tmp = dc_driving_mask;
+- else if (tmp < 0)
+- tmp = 0;
+-
+- value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask);
+-
+- rtk_phy_write(phy_reg, addr, value);
+- }
+-
+-do_toggle:
+- /* restore dc disconnect level before toggle */
+- update_dc_disconnect_level(rtk_phy, phy_parameter, false);
+-
+- /* Set page 1 */
+- rtk_phy_set_page(phy_reg, 1);
+-
+- addr = PAGE1_0XE0;
+- data = rtk_phy_read(phy_reg, addr);
+-
+- rtk_phy_write(phy_reg, addr, data &
+- (~ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+- mdelay(1);
+- rtk_phy_write(phy_reg, addr, data |
+- (ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+-
+- /* update dc disconnect level after toggle */
+- update_dc_disconnect_level(rtk_phy, phy_parameter, true);
+-
+-out:
+- return;
+-}
+-
+-static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+-{
+- struct phy_parameter *phy_parameter;
+- struct phy_cfg *phy_cfg;
+- struct phy_data *phy_data_page;
+- struct phy_reg *phy_reg;
+- int i;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- if (phy_cfg->use_default_parameter) {
+- dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n",
+- __func__, index);
+- goto do_toggle;
+- }
+-
+- /* Set page 0 */
+- phy_data_page = phy_cfg->page0;
+- rtk_phy_set_page(phy_reg, 0);
+-
+- for (i = 0; i < phy_cfg->page0_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = phy_data->addr;
+- u8 data = phy_data->data;
+-
+- if (!addr)
+- continue;
+-
+- if (rtk_phy_write(phy_reg, addr, data)) {
+- dev_err(rtk_phy->dev,
+- "%s: Error to set page0 parameter addr=0x%x value=0x%x\n",
+- __func__, addr, data);
+- return -EINVAL;
+- }
+- }
+-
+- /* Set page 1 */
+- phy_data_page = phy_cfg->page1;
+- rtk_phy_set_page(phy_reg, 1);
+-
+- for (i = 0; i < phy_cfg->page1_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = phy_data->addr;
+- u8 data = phy_data->data;
+-
+- if (!addr)
+- continue;
+-
+- if (rtk_phy_write(phy_reg, addr, data)) {
+- dev_err(rtk_phy->dev,
+- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+- __func__, addr, data);
+- return -EINVAL;
+- }
+- }
+-
+- if (phy_cfg->page2_size == 0)
+- goto do_toggle;
+-
+- /* Set page 2 */
+- phy_data_page = phy_cfg->page2;
+- rtk_phy_set_page(phy_reg, 2);
+-
+- for (i = 0; i < phy_cfg->page2_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = phy_data->addr;
+- u8 data = phy_data->data;
+-
+- if (!addr)
+- continue;
+-
+- if (rtk_phy_write(phy_reg, addr, data)) {
+- dev_err(rtk_phy->dev,
+- "%s: Error to set page2 parameter addr=0x%x value=0x%x\n",
+- __func__, addr, data);
+- return -EINVAL;
+- }
+- }
+-
+-do_toggle:
+- do_rtk_phy_toggle(rtk_phy, index, false);
+-
+- return 0;
+-}
+-
+-static int rtk_phy_init(struct phy *phy)
+-{
+- struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+- unsigned long phy_init_time = jiffies;
+- int i, ret = 0;
+-
+- if (!rtk_phy)
+- return -EINVAL;
+-
+- for (i = 0; i < rtk_phy->num_phy; i++)
+- ret = do_rtk_phy_init(rtk_phy, i);
+-
+- dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n",
+- jiffies_to_msecs(jiffies - phy_init_time));
+- return ret;
+-}
+-
+-static int rtk_phy_exit(struct phy *phy)
+-{
+- return 0;
+-}
+-
+-static const struct phy_ops ops = {
+- .init = rtk_phy_init,
+- .exit = rtk_phy_exit,
+- .owner = THIS_MODULE,
+-};
+-
+-static void rtk_phy_toggle(struct usb_phy *usb2_phy, bool connect, int port)
+-{
+- int index = port;
+- struct rtk_phy *rtk_phy = NULL;
+-
+- rtk_phy = dev_get_drvdata(usb2_phy->dev);
+-
+- if (index > rtk_phy->num_phy) {
+- dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+- __func__, index, rtk_phy->num_phy);
+- return;
+- }
+-
+- do_rtk_phy_toggle(rtk_phy, index, connect);
+-}
+-
+-static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
+- u16 portstatus, u16 portchange)
+-{
+- bool connect = false;
+-
+- pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
+- __func__, port, (int)portstatus, (int)portchange);
+- if (portstatus & USB_PORT_STAT_CONNECTION)
+- connect = true;
+-
+- if (portchange & USB_PORT_STAT_C_CONNECTION)
+- rtk_phy_toggle(x, connect, port);
+-
+- return 0;
+-}
+-
+-#ifdef CONFIG_DEBUG_FS
+-static struct dentry *create_phy_debug_root(void)
+-{
+- struct dentry *phy_debug_root;
+-
+- phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+- if (!phy_debug_root)
+- phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+-
+- return phy_debug_root;
+-}
+-
+-static int rtk_usb2_parameter_show(struct seq_file *s, void *unused)
+-{
+- struct rtk_phy *rtk_phy = s->private;
+- struct phy_cfg *phy_cfg;
+- int i, index;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+-
+- seq_puts(s, "Property:\n");
+- seq_printf(s, " check_efuse: %s\n",
+- phy_cfg->check_efuse ? "Enable" : "Disable");
+- seq_printf(s, " check_efuse_version: %d\n",
+- phy_cfg->check_efuse_version);
+- seq_printf(s, " efuse_dc_driving_rate: %d\n",
+- phy_cfg->efuse_dc_driving_rate);
+- seq_printf(s, " dc_driving_mask: 0x%x\n",
+- phy_cfg->dc_driving_mask);
+- seq_printf(s, " efuse_dc_disconnect_rate: %d\n",
+- phy_cfg->efuse_dc_disconnect_rate);
+- seq_printf(s, " dc_disconnect_mask: 0x%x\n",
+- phy_cfg->dc_disconnect_mask);
+- seq_printf(s, " usb_dc_disconnect_at_page0: %s\n",
+- phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false");
+- seq_printf(s, " do_toggle: %s\n",
+- phy_cfg->do_toggle ? "Enable" : "Disable");
+- seq_printf(s, " do_toggle_driving: %s\n",
+- phy_cfg->do_toggle_driving ? "Enable" : "Disable");
+- seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n",
+- phy_cfg->driving_updated_for_dev_dis);
+- seq_printf(s, " use_default_parameter: %s\n",
+- phy_cfg->use_default_parameter ? "Enable" : "Disable");
+- seq_printf(s, " is_double_sensitivity_mode: %s\n",
+- phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable");
+-
+- for (index = 0; index < rtk_phy->num_phy; index++) {
+- struct phy_parameter *phy_parameter;
+- struct phy_reg *phy_reg;
+- struct phy_data *phy_data_page;
+-
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- seq_printf(s, "PHY %d:\n", index);
+-
+- seq_puts(s, "Page 0:\n");
+- /* Set page 0 */
+- phy_data_page = phy_cfg->page0;
+- rtk_phy_set_page(phy_reg, 0);
+-
+- for (i = 0; i < phy_cfg->page0_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = array_index_to_page_addr(i);
+- u8 data = phy_data->data;
+- u8 value = rtk_phy_read(phy_reg, addr);
+-
+- if (phy_data->addr)
+- seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+- addr, data, value);
+- else
+- seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n",
+- addr, value);
+- }
+-
+- seq_puts(s, "Page 1:\n");
+- /* Set page 1 */
+- phy_data_page = phy_cfg->page1;
+- rtk_phy_set_page(phy_reg, 1);
+-
+- for (i = 0; i < phy_cfg->page1_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = array_index_to_page_addr(i);
+- u8 data = phy_data->data;
+- u8 value = rtk_phy_read(phy_reg, addr);
+-
+- if (phy_data->addr)
+- seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+- addr, data, value);
+- else
+- seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n",
+- addr, value);
+- }
+-
+- if (phy_cfg->page2_size == 0)
+- goto out;
+-
+- seq_puts(s, "Page 2:\n");
+- /* Set page 2 */
+- phy_data_page = phy_cfg->page2;
+- rtk_phy_set_page(phy_reg, 2);
+-
+- for (i = 0; i < phy_cfg->page2_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = array_index_to_page_addr(i);
+- u8 data = phy_data->data;
+- u8 value = rtk_phy_read(phy_reg, addr);
+-
+- if (phy_data->addr)
+- seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+- addr, data, value);
+- else
+- seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n",
+- addr, value);
+- }
+-
+-out:
+- seq_puts(s, "PHY Property:\n");
+- seq_printf(s, " efuse_usb_dc_cal: %d\n",
+- (int)phy_parameter->efuse_usb_dc_cal);
+- seq_printf(s, " efuse_usb_dc_dis: %d\n",
+- (int)phy_parameter->efuse_usb_dc_dis);
+- seq_printf(s, " inverse_hstx_sync_clock: %s\n",
+- phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable");
+- seq_printf(s, " driving_level: %d\n",
+- phy_parameter->driving_level);
+- seq_printf(s, " driving_level_compensate: %d\n",
+- phy_parameter->driving_level_compensate);
+- seq_printf(s, " disconnection_compensate: %d\n",
+- phy_parameter->disconnection_compensate);
+- }
+-
+- return 0;
+-}
+-DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter);
+-
+-static inline void create_debug_files(struct rtk_phy *rtk_phy)
+-{
+- struct dentry *phy_debug_root = NULL;
+-
+- phy_debug_root = create_phy_debug_root();
+- if (!phy_debug_root)
+- return;
+-
+- rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
+- phy_debug_root);
+-
+- debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+- &rtk_usb2_parameter_fops);
+-
+- return;
+-}
+-
+-static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+-{
+- debugfs_remove_recursive(rtk_phy->debug_dir);
+-}
+-#else
+-static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+-static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+-#endif /* CONFIG_DEBUG_FS */
+-
+-static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter, int index)
+-{
+- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+- u8 value = 0;
+- struct nvmem_cell *cell;
+- struct soc_device_attribute rtk_soc_groot[] = {
+- { .family = "Realtek Groot",},
+- { /* empty */ } };
+-
+- if (!phy_cfg->check_efuse)
+- goto out;
+-
+- /* Read efuse for usb dc cal */
+- cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal");
+- if (IS_ERR(cell)) {
+- dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n",
+- __func__, PTR_ERR(cell));
+- } else {
+- unsigned char *buf;
+- size_t buf_size;
+-
+- buf = nvmem_cell_read(cell, &buf_size);
+- if (!IS_ERR(buf)) {
+- value = buf[0] & phy_cfg->dc_driving_mask;
+- kfree(buf);
+- }
+- nvmem_cell_put(cell);
+- }
+-
+- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+- int rate = phy_cfg->efuse_dc_driving_rate;
+-
+- if (value <= EFUS_USB_DC_CAL_MAX)
+- phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate);
+- else
+- phy_parameter->efuse_usb_dc_cal = -(int8_t)
+- ((EFUS_USB_DC_CAL_MAX & value) * rate);
+-
+- if (soc_device_match(rtk_soc_groot)) {
+- dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n");
+-
+- /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */
+- if (value <= EFUS_USB_DC_CAL_MAX)
+- phy_parameter->efuse_usb_dc_cal = (int8_t)(value);
+-
+- /* We set max dc cal compensate is 0x8 if otp is 0x7 */
+- if (value == 0x7)
+- phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1);
+- }
+- } else { /* for CHECK_EFUSE_V2 */
+- phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask;
+- }
+-
+- /* Read efuse for usb dc disconnect level */
+- value = 0;
+- cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis");
+- if (IS_ERR(cell)) {
+- dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n",
+- __func__, PTR_ERR(cell));
+- } else {
+- unsigned char *buf;
+- size_t buf_size;
+-
+- buf = nvmem_cell_read(cell, &buf_size);
+- if (!IS_ERR(buf)) {
+- value = buf[0] & phy_cfg->dc_disconnect_mask;
+- kfree(buf);
+- }
+- nvmem_cell_put(cell);
+- }
+-
+- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+- int rate = phy_cfg->efuse_dc_disconnect_rate;
+-
+- if (value <= EFUS_USB_DC_DIS_MAX)
+- phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate);
+- else
+- phy_parameter->efuse_usb_dc_dis = -(int8_t)
+- ((EFUS_USB_DC_DIS_MAX & value) * rate);
+- } else { /* for CHECK_EFUSE_V2 */
+- phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask;
+- }
+-
+-out:
+- return 0;
+-}
+-
+-static int parse_phy_data(struct rtk_phy *rtk_phy)
+-{
+- struct device *dev = rtk_phy->dev;
+- struct device_node *np = dev->of_node;
+- struct phy_parameter *phy_parameter;
+- int ret = 0;
+- int index;
+-
+- rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+- rtk_phy->num_phy, GFP_KERNEL);
+- if (!rtk_phy->phy_parameter)
+- return -ENOMEM;
+-
+- for (index = 0; index < rtk_phy->num_phy; index++) {
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+-
+- phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0);
+- phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index;
+- phy_parameter->phy_reg.vstatus_index = index;
+-
+- if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock"))
+- phy_parameter->inverse_hstx_sync_clock = true;
+- else
+- phy_parameter->inverse_hstx_sync_clock = false;
+-
+- if (of_property_read_u32_index(np, "realtek,driving-level",
+- index, &phy_parameter->driving_level))
+- phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE;
+-
+- if (of_property_read_u32_index(np, "realtek,driving-level-compensate",
+- index, &phy_parameter->driving_level_compensate))
+- phy_parameter->driving_level_compensate = 0;
+-
+- if (of_property_read_u32_index(np, "realtek,disconnection-compensate",
+- index, &phy_parameter->disconnection_compensate))
+- phy_parameter->disconnection_compensate = 0;
+-
+- get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+-
+- update_dc_driving_level(rtk_phy, phy_parameter);
+-
+- update_hs_clk_select(rtk_phy, phy_parameter);
+- }
+-
+- return ret;
+-}
+-
+-static int rtk_usb2phy_probe(struct platform_device *pdev)
+-{
+- struct rtk_phy *rtk_phy;
+- struct device *dev = &pdev->dev;
+- struct phy *generic_phy;
+- struct phy_provider *phy_provider;
+- const struct phy_cfg *phy_cfg;
+- int ret = 0;
+-
+- phy_cfg = of_device_get_match_data(dev);
+- if (!phy_cfg) {
+- dev_err(dev, "phy config are not assigned!\n");
+- return -EINVAL;
+- }
+-
+- rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+- if (!rtk_phy)
+- return -ENOMEM;
+-
+- rtk_phy->dev = &pdev->dev;
+- rtk_phy->phy.dev = rtk_phy->dev;
+- rtk_phy->phy.label = "rtk-usb2phy";
+- rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
+-
+- rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+-
+- memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+-
+- rtk_phy->num_phy = phy_cfg->num_phy;
+-
+- ret = parse_phy_data(rtk_phy);
+- if (ret)
+- goto err;
+-
+- platform_set_drvdata(pdev, rtk_phy);
+-
+- generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+- if (IS_ERR(generic_phy))
+- return PTR_ERR(generic_phy);
+-
+- phy_set_drvdata(generic_phy, rtk_phy);
+-
+- phy_provider = devm_of_phy_provider_register(rtk_phy->dev,
+- of_phy_simple_xlate);
+- if (IS_ERR(phy_provider))
+- return PTR_ERR(phy_provider);
+-
+- ret = usb_add_phy_dev(&rtk_phy->phy);
+- if (ret)
+- goto err;
+-
+- create_debug_files(rtk_phy);
+-
+-err:
+- return ret;
+-}
+-
+-static void rtk_usb2phy_remove(struct platform_device *pdev)
+-{
+- struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+-
+- remove_debug_files(rtk_phy);
+-
+- usb_remove_phy(&rtk_phy->phy);
+-}
+-
+-static const struct phy_cfg rtd1295_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0x90},
+- [3] = {0xe3, 0x3a},
+- [4] = {0xe4, 0x68},
+- [6] = {0xe6, 0x91},
+- [13] = {0xf5, 0x81},
+- [15] = {0xf7, 0x02}, },
+- .page1_size = 8,
+- .page1 = { /* default parameter */ },
+- .page2_size = 0,
+- .page2 = { /* no parameter */ },
+- .num_phy = 1,
+- .check_efuse = false,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = false,
+-};
+-
+-static const struct phy_cfg rtd1395_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [4] = {0xe4, 0xac},
+- [13] = {0xf5, 0x00},
+- [15] = {0xf7, 0x02}, },
+- .page1_size = 8,
+- .page1 = { /* default parameter */ },
+- .page2_size = 0,
+- .page2 = { /* no parameter */ },
+- .num_phy = 1,
+- .check_efuse = false,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = false,
+-};
+-
+-static const struct phy_cfg rtd1395_phy_cfg_2port = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [4] = {0xe4, 0xac},
+- [13] = {0xf5, 0x00},
+- [15] = {0xf7, 0x02}, },
+- .page1_size = 8,
+- .page1 = { /* default parameter */ },
+- .page2_size = 0,
+- .page2 = { /* no parameter */ },
+- .num_phy = 2,
+- .check_efuse = false,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = false,
+-};
+-
+-static const struct phy_cfg rtd1619_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [4] = {0xe4, 0x68}, },
+- .page1_size = 8,
+- .page1 = { /* default parameter */ },
+- .page2_size = 0,
+- .page2 = { /* no parameter */ },
+- .num_phy = 1,
+- .check_efuse = true,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = false,
+-};
+-
+-static const struct phy_cfg rtd1319_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0x18},
+- [4] = {0xe4, 0x6a},
+- [7] = {0xe7, 0x71},
+- [13] = {0xf5, 0x15},
+- [15] = {0xf7, 0x32}, },
+- .page1_size = 8,
+- .page1 = { [3] = {0xe3, 0x44}, },
+- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+- .page2 = { [0] = {0xe0, 0x01}, },
+- .num_phy = 1,
+- .check_efuse = true,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = true,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = true,
+-};
+-
+-static const struct phy_cfg rtd1312c_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0x14},
+- [4] = {0xe4, 0x67},
+- [5] = {0xe5, 0x55}, },
+- .page1_size = 8,
+- .page1 = { [3] = {0xe3, 0x23},
+- [6] = {0xe6, 0x58}, },
+- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+- .page2 = { /* default parameter */ },
+- .num_phy = 1,
+- .check_efuse = true,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = true,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = true,
+-};
+-
+-static const struct phy_cfg rtd1619b_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0xa3},
+- [4] = {0xe4, 0x88},
+- [5] = {0xe5, 0x4f},
+- [6] = {0xe6, 0x02}, },
+- .page1_size = 8,
+- .page1 = { [3] = {0xe3, 0x64}, },
+- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+- .page2 = { [7] = {0xe7, 0x45}, },
+- .num_phy = 1,
+- .check_efuse = true,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+- .dc_driving_mask = 0x1f,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = false,
+- .do_toggle = true,
+- .do_toggle_driving = true,
+- .driving_updated_for_dev_dis = 0x8,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = true,
+-};
+-
+-static const struct phy_cfg rtd1319d_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0xa3},
+- [4] = {0xe4, 0x8e},
+- [5] = {0xe5, 0x4f},
+- [6] = {0xe6, 0x02}, },
+- .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+- .page1 = { [14] = {0xf5, 0x1}, },
+- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+- .page2 = { [7] = {0xe7, 0x44}, },
+- .check_efuse = true,
+- .num_phy = 1,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+- .dc_driving_mask = 0x1f,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = false,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0x8,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = true,
+-};
+-
+-static const struct phy_cfg rtd1315e_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0xa3},
+- [4] = {0xe4, 0x8c},
+- [5] = {0xe5, 0x4f},
+- [6] = {0xe6, 0x02}, },
+- .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+- .page1 = { [3] = {0xe3, 0x7f},
+- [14] = {0xf5, 0x01}, },
+- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+- .page2 = { [7] = {0xe7, 0x44}, },
+- .num_phy = 1,
+- .check_efuse = true,
+- .check_efuse_version = CHECK_EFUSE_V2,
+- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+- .dc_driving_mask = 0x1f,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = false,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0x8,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = true,
+-};
+-
+-static const struct of_device_id usbphy_rtk_dt_match[] = {
+- { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg },
+- { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg },
+- { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg },
+- { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg },
+- { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg },
+- { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg },
+- { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port },
+- { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg },
+- { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg },
+- {},
+-};
+-MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+-
+-static struct platform_driver rtk_usb2phy_driver = {
+- .probe = rtk_usb2phy_probe,
+- .remove_new = rtk_usb2phy_remove,
+- .driver = {
+- .name = "rtk-usb2phy",
+- .of_match_table = usbphy_rtk_dt_match,
+- },
+-};
+-
+-module_platform_driver(rtk_usb2phy_driver);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_ALIAS("platform: rtk-usb2phy");
+-MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+-MODULE_DESCRIPTION("Realtek usb 2.0 phy driver");
+diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
+deleted file mode 100644
+index dfb3122f3f114..0000000000000
+--- a/drivers/phy/realtek/phy-rtk-usb3.c
++++ /dev/null
+@@ -1,761 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * phy-rtk-usb3.c RTK usb3.0 phy driver
+- *
+- * copyright (c) 2023 realtek semiconductor corporation
+- *
+- */
+-
+-#include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/of_device.h>
+-#include <linux/of_address.h>
+-#include <linux/uaccess.h>
+-#include <linux/debugfs.h>
+-#include <linux/nvmem-consumer.h>
+-#include <linux/regmap.h>
+-#include <linux/sys_soc.h>
+-#include <linux/mfd/syscon.h>
+-#include <linux/phy/phy.h>
+-#include <linux/usb.h>
+-#include <linux/usb/hcd.h>
+-#include <linux/usb/phy.h>
+-
+-#define USB_MDIO_CTRL_PHY_BUSY BIT(7)
+-#define USB_MDIO_CTRL_PHY_WRITE BIT(0)
+-#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8
+-#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16
+-
+-#define MAX_USB_PHY_DATA_SIZE 0x30
+-#define PHY_ADDR_0X09 0x09
+-#define PHY_ADDR_0X0B 0x0b
+-#define PHY_ADDR_0X0D 0x0d
+-#define PHY_ADDR_0X10 0x10
+-#define PHY_ADDR_0X1F 0x1f
+-#define PHY_ADDR_0X20 0x20
+-#define PHY_ADDR_0X21 0x21
+-#define PHY_ADDR_0X30 0x30
+-
+-#define REG_0X09_FORCE_CALIBRATION BIT(9)
+-#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc
+-#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6)
+-#define REG_0X10_DEBUG_MODE_SETTING 0x3c0
+-#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8
+-#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e
+-
+-#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4
+-#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf
+-#define AMPLITUDE_CONTROL_COARSE_MASK 0xff
+-#define AMPLITUDE_CONTROL_FINE_MASK 0xffff
+-#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff
+-#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff
+-
+-#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr)
+-#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index)
+-
+-struct phy_reg {
+- void __iomem *reg_mdio_ctl;
+-};
+-
+-struct phy_data {
+- u8 addr;
+- u16 data;
+-};
+-
+-struct phy_cfg {
+- int param_size;
+- struct phy_data param[MAX_USB_PHY_DATA_SIZE];
+-
+- bool check_efuse;
+- bool do_toggle;
+- bool do_toggle_once;
+- bool use_default_parameter;
+- bool check_rx_front_end_offset;
+-};
+-
+-struct phy_parameter {
+- struct phy_reg phy_reg;
+-
+- /* Get from efuse */
+- u8 efuse_usb_u3_tx_lfps_swing_trim;
+-
+- /* Get from dts */
+- u32 amplitude_control_coarse;
+- u32 amplitude_control_fine;
+-};
+-
+-struct rtk_phy {
+- struct usb_phy phy;
+- struct device *dev;
+-
+- struct phy_cfg *phy_cfg;
+- int num_phy;
+- struct phy_parameter *phy_parameter;
+-
+- struct dentry *debug_dir;
+-};
+-
+-#define PHY_IO_TIMEOUT_USEC (50000)
+-#define PHY_IO_DELAY_US (100)
+-
+-static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+-{
+- int ret;
+- unsigned int val;
+-
+- ret = read_poll_timeout(readl, val, ((val & mask) == result),
+- PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+- if (ret) {
+- pr_err("%s can't program USB phy\n", __func__);
+- return -ETIMEDOUT;
+- }
+-
+- return 0;
+-}
+-
+-static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg)
+-{
+- return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0);
+-}
+-
+-static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr)
+-{
+- unsigned int tmp;
+- u32 value;
+-
+- tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT);
+-
+- writel(tmp, phy_reg->reg_mdio_ctl);
+-
+- rtk_phy3_wait_vbusy(phy_reg);
+-
+- value = readl(phy_reg->reg_mdio_ctl);
+- value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT;
+-
+- return (u16)value;
+-}
+-
+-static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data)
+-{
+- unsigned int val;
+-
+- val = USB_MDIO_CTRL_PHY_WRITE |
+- (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) |
+- (data << USB_MDIO_CTRL_PHY_DATA_SHIFT);
+-
+- writel(val, phy_reg->reg_mdio_ctl);
+-
+- rtk_phy3_wait_vbusy(phy_reg);
+-
+- return 0;
+-}
+-
+-static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect)
+-{
+- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+- struct phy_reg *phy_reg;
+- struct phy_parameter *phy_parameter;
+- struct phy_data *phy_data;
+- u8 addr;
+- u16 data;
+- int i;
+-
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- if (!phy_cfg->do_toggle)
+- return;
+-
+- i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09);
+- phy_data = phy_cfg->param + i;
+- addr = phy_data->addr;
+- data = phy_data->data;
+-
+- if (!addr && !data) {
+- addr = PHY_ADDR_0X09;
+- data = rtk_phy_read(phy_reg, addr);
+- phy_data->addr = addr;
+- phy_data->data = data;
+- }
+-
+- rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION));
+- mdelay(1);
+- rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION);
+-}
+-
+-static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+- struct phy_parameter *phy_parameter;
+- int i = 0;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- if (phy_cfg->use_default_parameter)
+- goto do_toggle;
+-
+- for (i = 0; i < phy_cfg->param_size; i++) {
+- struct phy_data *phy_data = phy_cfg->param + i;
+- u8 addr = phy_data->addr;
+- u16 data = phy_data->data;
+-
+- if (!addr && !data)
+- continue;
+-
+- rtk_phy_write(phy_reg, addr, data);
+- }
+-
+-do_toggle:
+- if (phy_cfg->do_toggle_once)
+- phy_cfg->do_toggle = true;
+-
+- do_rtk_usb3_phy_toggle(rtk_phy, index, false);
+-
+- if (phy_cfg->do_toggle_once) {
+- u16 check_value = 0;
+- int count = 10;
+- u16 value_0x0d, value_0x10;
+-
+- /* Enable Debug mode by set 0x0D and 0x10 */
+- value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D);
+- value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10);
+-
+- rtk_phy_write(phy_reg, PHY_ADDR_0X0D,
+- value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN);
+- rtk_phy_write(phy_reg, PHY_ADDR_0X10,
+- (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) |
+- REG_0X10_DEBUG_MODE_SETTING);
+-
+- check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+-
+- while (!(check_value & BIT(15))) {
+- check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+- mdelay(1);
+- if (count-- < 0)
+- break;
+- }
+-
+- if (!(check_value & BIT(15)))
+- dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n",
+- PHY_ADDR_0X30, check_value);
+-
+- /* Disable Debug mode by set 0x0D and 0x10 to default*/
+- rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d);
+- rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10);
+-
+- phy_cfg->do_toggle = false;
+- }
+-
+- if (phy_cfg->check_rx_front_end_offset) {
+- u16 rx_offset_code, rx_offset_range;
+- u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK;
+- u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK;
+- bool do_update = false;
+-
+- rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F);
+- if (((rx_offset_code & code_mask) == 0x0) ||
+- ((rx_offset_code & code_mask) == code_mask))
+- do_update = true;
+-
+- rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B);
+- if (((rx_offset_range & range_mask) == range_mask) && do_update) {
+- dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n",
+- rx_offset_code, rx_offset_range);
+- do_update = false;
+- }
+-
+- if (do_update) {
+- u16 tmp1, tmp2;
+-
+- tmp1 = rx_offset_range & (~range_mask);
+- tmp2 = rx_offset_range & range_mask;
+- tmp2 += (1 << 2);
+- rx_offset_range = tmp1 | (tmp2 & range_mask);
+- rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range);
+- goto do_toggle;
+- }
+- }
+-
+- return 0;
+-}
+-
+-static int rtk_phy_init(struct phy *phy)
+-{
+- struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+- int ret = 0;
+- int i;
+- unsigned long phy_init_time = jiffies;
+-
+- for (i = 0; i < rtk_phy->num_phy; i++)
+- ret = do_rtk_phy_init(rtk_phy, i);
+-
+- dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n",
+- jiffies_to_msecs(jiffies - phy_init_time));
+-
+- return ret;
+-}
+-
+-static int rtk_phy_exit(struct phy *phy)
+-{
+- return 0;
+-}
+-
+-static const struct phy_ops ops = {
+- .init = rtk_phy_init,
+- .exit = rtk_phy_exit,
+- .owner = THIS_MODULE,
+-};
+-
+-static void rtk_phy_toggle(struct usb_phy *usb3_phy, bool connect, int port)
+-{
+- int index = port;
+- struct rtk_phy *rtk_phy = NULL;
+-
+- rtk_phy = dev_get_drvdata(usb3_phy->dev);
+-
+- if (index > rtk_phy->num_phy) {
+- dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+- __func__, index, rtk_phy->num_phy);
+- return;
+- }
+-
+- do_rtk_usb3_phy_toggle(rtk_phy, index, connect);
+-}
+-
+-static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
+- u16 portstatus, u16 portchange)
+-{
+- bool connect = false;
+-
+- pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
+- __func__, port, (int)portstatus, (int)portchange);
+- if (portstatus & USB_PORT_STAT_CONNECTION)
+- connect = true;
+-
+- if (portchange & USB_PORT_STAT_C_CONNECTION)
+- rtk_phy_toggle(x, connect, port);
+-
+- return 0;
+-}
+-
+-#ifdef CONFIG_DEBUG_FS
+-static struct dentry *create_phy_debug_root(void)
+-{
+- struct dentry *phy_debug_root;
+-
+- phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+- if (!phy_debug_root)
+- phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+-
+- return phy_debug_root;
+-}
+-
+-static int rtk_usb3_parameter_show(struct seq_file *s, void *unused)
+-{
+- struct rtk_phy *rtk_phy = s->private;
+- struct phy_cfg *phy_cfg;
+- int i, index;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+-
+- seq_puts(s, "Property:\n");
+- seq_printf(s, " check_efuse: %s\n",
+- phy_cfg->check_efuse ? "Enable" : "Disable");
+- seq_printf(s, " do_toggle: %s\n",
+- phy_cfg->do_toggle ? "Enable" : "Disable");
+- seq_printf(s, " do_toggle_once: %s\n",
+- phy_cfg->do_toggle_once ? "Enable" : "Disable");
+- seq_printf(s, " use_default_parameter: %s\n",
+- phy_cfg->use_default_parameter ? "Enable" : "Disable");
+-
+- for (index = 0; index < rtk_phy->num_phy; index++) {
+- struct phy_reg *phy_reg;
+- struct phy_parameter *phy_parameter;
+-
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- seq_printf(s, "PHY %d:\n", index);
+-
+- for (i = 0; i < phy_cfg->param_size; i++) {
+- struct phy_data *phy_data = phy_cfg->param + i;
+- u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i);
+- u16 data = phy_data->data;
+-
+- if (!phy_data->addr && !data)
+- seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n",
+- addr, rtk_phy_read(phy_reg, addr));
+- else
+- seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n",
+- addr, data, rtk_phy_read(phy_reg, addr));
+- }
+-
+- seq_puts(s, "PHY Property:\n");
+- seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n",
+- (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim);
+- seq_printf(s, " amplitude_control_coarse: 0x%x\n",
+- (int)phy_parameter->amplitude_control_coarse);
+- seq_printf(s, " amplitude_control_fine: 0x%x\n",
+- (int)phy_parameter->amplitude_control_fine);
+- }
+-
+- return 0;
+-}
+-DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter);
+-
+-static inline void create_debug_files(struct rtk_phy *rtk_phy)
+-{
+- struct dentry *phy_debug_root = NULL;
+-
+- phy_debug_root = create_phy_debug_root();
+-
+- if (!phy_debug_root)
+- return;
+-
+- rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
+-
+- debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+- &rtk_usb3_parameter_fops);
+-
+- return;
+-}
+-
+-static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+-{
+- debugfs_remove_recursive(rtk_phy->debug_dir);
+-}
+-#else
+-static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+-static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+-#endif /* CONFIG_DEBUG_FS */
+-
+-static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter, int index)
+-{
+- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+- u8 value = 0;
+- struct nvmem_cell *cell;
+-
+- if (!phy_cfg->check_efuse)
+- goto out;
+-
+- cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim");
+- if (IS_ERR(cell)) {
+- dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n",
+- __func__, PTR_ERR(cell));
+- } else {
+- unsigned char *buf;
+- size_t buf_size;
+-
+- buf = nvmem_cell_read(cell, &buf_size);
+- if (!IS_ERR(buf)) {
+- value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK;
+- kfree(buf);
+- }
+- nvmem_cell_put(cell);
+- }
+-
+- if (value > 0 && value < 0x8)
+- phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8;
+- else
+- phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value;
+-
+-out:
+- return 0;
+-}
+-
+-static void update_amplitude_control_value(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+-
+- phy_reg = &phy_parameter->phy_reg;
+- phy_cfg = rtk_phy->phy_cfg;
+-
+- if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) {
+- u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK;
+- u16 data;
+-
+- if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+- phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+- data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+- } else {
+- data = phy_cfg->param[PHY_ADDR_0X20].data;
+- }
+-
+- data &= (~val_mask);
+- data |= (phy_parameter->amplitude_control_coarse & val_mask);
+-
+- phy_cfg->param[PHY_ADDR_0X20].data = data;
+- }
+-
+- if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) {
+- u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim;
+- u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK;
+- int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT;
+- u16 data;
+-
+- if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+- phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+- data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+- } else {
+- data = phy_cfg->param[PHY_ADDR_0X20].data;
+- }
+-
+- data &= ~(val_mask << val_shift);
+- data |= ((efuse_val & val_mask) << val_shift);
+-
+- phy_cfg->param[PHY_ADDR_0X20].data = data;
+- }
+-
+- if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) {
+- u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK;
+-
+- if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data)
+- phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21;
+-
+- phy_cfg->param[PHY_ADDR_0X21].data =
+- phy_parameter->amplitude_control_fine & val_mask;
+- }
+-}
+-
+-static int parse_phy_data(struct rtk_phy *rtk_phy)
+-{
+- struct device *dev = rtk_phy->dev;
+- struct phy_parameter *phy_parameter;
+- int ret = 0;
+- int index;
+-
+- rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+- rtk_phy->num_phy, GFP_KERNEL);
+- if (!rtk_phy->phy_parameter)
+- return -ENOMEM;
+-
+- for (index = 0; index < rtk_phy->num_phy; index++) {
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+-
+- phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index;
+-
+- /* Amplitude control address 0x20 bit 0 to bit 7 */
+- if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning",
+- &phy_parameter->amplitude_control_coarse))
+- phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT;
+-
+- /* Amplitude control address 0x21 bit 0 to bit 16 */
+- if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning",
+- &phy_parameter->amplitude_control_fine))
+- phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT;
+-
+- get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+-
+- update_amplitude_control_value(rtk_phy, phy_parameter);
+- }
+-
+- return ret;
+-}
+-
+-static int rtk_usb3phy_probe(struct platform_device *pdev)
+-{
+- struct rtk_phy *rtk_phy;
+- struct device *dev = &pdev->dev;
+- struct phy *generic_phy;
+- struct phy_provider *phy_provider;
+- const struct phy_cfg *phy_cfg;
+- int ret;
+-
+- phy_cfg = of_device_get_match_data(dev);
+- if (!phy_cfg) {
+- dev_err(dev, "phy config are not assigned!\n");
+- return -EINVAL;
+- }
+-
+- rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+- if (!rtk_phy)
+- return -ENOMEM;
+-
+- rtk_phy->dev = &pdev->dev;
+- rtk_phy->phy.dev = rtk_phy->dev;
+- rtk_phy->phy.label = "rtk-usb3phy";
+- rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
+-
+- rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+-
+- memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+-
+- rtk_phy->num_phy = 1;
+-
+- ret = parse_phy_data(rtk_phy);
+- if (ret)
+- goto err;
+-
+- platform_set_drvdata(pdev, rtk_phy);
+-
+- generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+- if (IS_ERR(generic_phy))
+- return PTR_ERR(generic_phy);
+-
+- phy_set_drvdata(generic_phy, rtk_phy);
+-
+- phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate);
+- if (IS_ERR(phy_provider))
+- return PTR_ERR(phy_provider);
+-
+- ret = usb_add_phy_dev(&rtk_phy->phy);
+- if (ret)
+- goto err;
+-
+- create_debug_files(rtk_phy);
+-
+-err:
+- return ret;
+-}
+-
+-static void rtk_usb3phy_remove(struct platform_device *pdev)
+-{
+- struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+-
+- remove_debug_files(rtk_phy);
+-
+- usb_remove_phy(&rtk_phy->phy);
+-}
+-
+-static const struct phy_cfg rtd1295_phy_cfg = {
+- .param_size = MAX_USB_PHY_DATA_SIZE,
+- .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046},
+- [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779},
+- [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3},
+- [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00},
+- [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c},
+- [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904},
+- [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c},
+- [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000},
+- [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00},
+- [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81},
+- [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000},
+- [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000},
+- [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004},
+- [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00},
+- [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f},
+- [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807},
+- [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa},
+- [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66},
+- [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000},
+- [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6},
+- [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080},
+- [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078},
+- [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff},
+- [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, },
+- .check_efuse = false,
+- .do_toggle = true,
+- .do_toggle_once = false,
+- .use_default_parameter = false,
+- .check_rx_front_end_offset = false,
+-};
+-
+-static const struct phy_cfg rtd1619_phy_cfg = {
+- .param_size = MAX_USB_PHY_DATA_SIZE,
+- .param = { [8] = {0x08, 0x3591},
+- [38] = {0x26, 0x840b},
+- [40] = {0x28, 0xf842}, },
+- .check_efuse = false,
+- .do_toggle = true,
+- .do_toggle_once = false,
+- .use_default_parameter = false,
+- .check_rx_front_end_offset = false,
+-};
+-
+-static const struct phy_cfg rtd1319_phy_cfg = {
+- .param_size = MAX_USB_PHY_DATA_SIZE,
+- .param = { [1] = {0x01, 0xac86},
+- [6] = {0x06, 0x0003},
+- [9] = {0x09, 0x924c},
+- [10] = {0x0a, 0xa608},
+- [11] = {0x0b, 0xb905},
+- [14] = {0x0e, 0x2010},
+- [32] = {0x20, 0x705a},
+- [33] = {0x21, 0xf645},
+- [34] = {0x22, 0x0013},
+- [35] = {0x23, 0xcb66},
+- [41] = {0x29, 0xff00}, },
+- .check_efuse = true,
+- .do_toggle = true,
+- .do_toggle_once = false,
+- .use_default_parameter = false,
+- .check_rx_front_end_offset = false,
+-};
+-
+-static const struct phy_cfg rtd1619b_phy_cfg = {
+- .param_size = MAX_USB_PHY_DATA_SIZE,
+- .param = { [1] = {0x01, 0xac8c},
+- [6] = {0x06, 0x0017},
+- [9] = {0x09, 0x724c},
+- [10] = {0x0a, 0xb610},
+- [11] = {0x0b, 0xb90d},
+- [13] = {0x0d, 0xef2a},
+- [15] = {0x0f, 0x9050},
+- [16] = {0x10, 0x000c},
+- [32] = {0x20, 0x70ff},
+- [34] = {0x22, 0x0013},
+- [35] = {0x23, 0xdb66},
+- [38] = {0x26, 0x8609},
+- [41] = {0x29, 0xff13},
+- [42] = {0x2a, 0x3070}, },
+- .check_efuse = true,
+- .do_toggle = false,
+- .do_toggle_once = true,
+- .use_default_parameter = false,
+- .check_rx_front_end_offset = false,
+-};
+-
+-static const struct phy_cfg rtd1319d_phy_cfg = {
+- .param_size = MAX_USB_PHY_DATA_SIZE,
+- .param = { [1] = {0x01, 0xac89},
+- [4] = {0x04, 0xf2f5},
+- [6] = {0x06, 0x0017},
+- [9] = {0x09, 0x424c},
+- [10] = {0x0a, 0x9610},
+- [11] = {0x0b, 0x9901},
+- [12] = {0x0c, 0xf000},
+- [13] = {0x0d, 0xef2a},
+- [14] = {0x0e, 0x1000},
+- [15] = {0x0f, 0x9050},
+- [32] = {0x20, 0x7077},
+- [35] = {0x23, 0x0b62},
+- [37] = {0x25, 0x10ec},
+- [42] = {0x2a, 0x3070}, },
+- .check_efuse = true,
+- .do_toggle = false,
+- .do_toggle_once = true,
+- .use_default_parameter = false,
+- .check_rx_front_end_offset = true,
+-};
+-
+-static const struct of_device_id usbphy_rtk_dt_match[] = {
+- { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg },
+- { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg },
+- { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg },
+- { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg },
+- { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg },
+- {},
+-};
+-MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+-
+-static struct platform_driver rtk_usb3phy_driver = {
+- .probe = rtk_usb3phy_probe,
+- .remove_new = rtk_usb3phy_remove,
+- .driver = {
+- .name = "rtk-usb3phy",
+- .of_match_table = usbphy_rtk_dt_match,
+- },
+-};
+-
+-module_platform_driver(rtk_usb3phy_driver);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_ALIAS("platform: rtk-usb3phy");
+-MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+-MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index e9dc9638120a5..184ec92241ca8 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1253,17 +1253,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
+ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
+ {
+ struct pinctrl_setting *setting, *setting2;
+- struct pinctrl_state *old_state = p->state;
++ struct pinctrl_state *old_state = READ_ONCE(p->state);
+ int ret;
+
+- if (p->state) {
++ if (old_state) {
+ /*
+ * For each pinmux setting in the old state, forget SW's record
+ * of mux owner for that pingroup. Any pingroups which are
+ * still owned by the new state will be re-acquired by the call
+ * to pinmux_enable_setting() in the loop below.
+ */
+- list_for_each_entry(setting, &p->state->settings, node) {
++ list_for_each_entry(setting, &old_state->settings, node) {
+ if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
+ continue;
+ pinmux_disable_setting(setting);
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index faa8b7ff5bcf3..ec76e43527c5c 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -983,11 +983,18 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
+
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+- if (arg)
++ if (arg) {
+ conf |= BYT_DEBOUNCE_EN;
+- else
++ } else {
+ conf &= ~BYT_DEBOUNCE_EN;
+
++ /*
++ * No need to update the pulse value.
++ * Debounce is going to be disabled.
++ */
++ break;
++ }
++
+ switch (arg) {
+ case 375:
+ db_pulse = BYT_DEBOUNCE_PULSE_375US;
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 37cdfe4b04f9a..2ea6ef99cc70b 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -1175,6 +1175,8 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+ u32 port;
+ u8 bit;
+
++ irq_chip_disable_parent(d);
++
+ port = RZG2L_PIN_ID_TO_PORT(hwirq);
+ bit = RZG2L_PIN_ID_TO_PIN(hwirq);
+
+@@ -1189,7 +1191,6 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ gpiochip_disable_irq(gc, hwirq);
+- irq_chip_disable_parent(d);
+ }
+
+ static void rzg2l_gpio_irq_enable(struct irq_data *d)
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
+index a73385a431de9..346a31f31bba8 100644
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
+@@ -1283,9 +1283,11 @@ static struct stm32_desc_pin *stm32_pctrl_get_desc_pin_from_gpio(struct stm32_pi
+ int i;
+
+ /* With few exceptions (e.g. bank 'Z'), pin number matches with pin index in array */
+- pin_desc = pctl->pins + stm32_pin_nb;
+- if (pin_desc->pin.number == stm32_pin_nb)
+- return pin_desc;
++ if (stm32_pin_nb < pctl->npins) {
++ pin_desc = pctl->pins + stm32_pin_nb;
++ if (pin_desc->pin.number == stm32_pin_nb)
++ return pin_desc;
++ }
+
+ /* Otherwise, loop all array to find the pin with the right number */
+ for (i = 0; i < pctl->npins; i++) {
+@@ -1378,6 +1380,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
+ }
+
+ names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
++ if (!names) {
++ err = -ENOMEM;
++ goto err_clk;
++ }
++
+ for (i = 0; i < npins; i++) {
+ stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
+ if (stm32_pin && stm32_pin->pin.name)
+diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
+index 5d36fbc75e1bb..badc68bbae8cc 100644
+--- a/drivers/platform/chrome/cros_ec.c
++++ b/drivers/platform/chrome/cros_ec.c
+@@ -321,17 +321,8 @@ void cros_ec_unregister(struct cros_ec_device *ec_dev)
+ EXPORT_SYMBOL(cros_ec_unregister);
+
+ #ifdef CONFIG_PM_SLEEP
+-/**
+- * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
+- * @ec_dev: Device to suspend.
+- *
+- * This can be called by drivers to handle a suspend event.
+- *
+- * Return: 0 on success or negative error code.
+- */
+-int cros_ec_suspend(struct cros_ec_device *ec_dev)
++static void cros_ec_send_suspend_event(struct cros_ec_device *ec_dev)
+ {
+- struct device *dev = ec_dev->dev;
+ int ret;
+ u8 sleep_event;
+
+@@ -343,7 +334,26 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev)
+ if (ret < 0)
+ dev_dbg(ec_dev->dev, "Error %d sending suspend event to ec\n",
+ ret);
++}
+
++/**
++ * cros_ec_suspend_prepare() - Handle a suspend prepare operation for the ChromeOS EC device.
++ * @ec_dev: Device to suspend.
++ *
++ * This can be called by drivers to handle a suspend prepare stage of suspend.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_suspend_prepare(struct cros_ec_device *ec_dev)
++{
++ cros_ec_send_suspend_event(ec_dev);
++ return 0;
++}
++EXPORT_SYMBOL(cros_ec_suspend_prepare);
++
++static void cros_ec_disable_irq(struct cros_ec_device *ec_dev)
++{
++ struct device *dev = ec_dev->dev;
+ if (device_may_wakeup(dev))
+ ec_dev->wake_enabled = !enable_irq_wake(ec_dev->irq);
+ else
+@@ -351,7 +361,35 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev)
+
+ disable_irq(ec_dev->irq);
+ ec_dev->suspended = true;
++}
+
++/**
++ * cros_ec_suspend_late() - Handle a suspend late operation for the ChromeOS EC device.
++ * @ec_dev: Device to suspend.
++ *
++ * This can be called by drivers to handle a suspend late stage of suspend.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_suspend_late(struct cros_ec_device *ec_dev)
++{
++ cros_ec_disable_irq(ec_dev);
++ return 0;
++}
++EXPORT_SYMBOL(cros_ec_suspend_late);
++
++/**
++ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
++ * @ec_dev: Device to suspend.
++ *
++ * This can be called by drivers to handle a suspend event.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_suspend(struct cros_ec_device *ec_dev)
++{
++ cros_ec_send_suspend_event(ec_dev);
++ cros_ec_disable_irq(ec_dev);
+ return 0;
+ }
+ EXPORT_SYMBOL(cros_ec_suspend);
+@@ -370,22 +408,11 @@ static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
+ }
+ }
+
+-/**
+- * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
+- * @ec_dev: Device to resume.
+- *
+- * This can be called by drivers to handle a resume event.
+- *
+- * Return: 0 on success or negative error code.
+- */
+-int cros_ec_resume(struct cros_ec_device *ec_dev)
++static void cros_ec_send_resume_event(struct cros_ec_device *ec_dev)
+ {
+ int ret;
+ u8 sleep_event;
+
+- ec_dev->suspended = false;
+- enable_irq(ec_dev->irq);
+-
+ sleep_event = (!IS_ENABLED(CONFIG_ACPI) || pm_suspend_via_firmware()) ?
+ HOST_SLEEP_EVENT_S3_RESUME :
+ HOST_SLEEP_EVENT_S0IX_RESUME;
+@@ -394,6 +421,24 @@ int cros_ec_resume(struct cros_ec_device *ec_dev)
+ if (ret < 0)
+ dev_dbg(ec_dev->dev, "Error %d sending resume event to ec\n",
+ ret);
++}
++
++/**
++ * cros_ec_resume_complete() - Handle a resume complete operation for the ChromeOS EC device.
++ * @ec_dev: Device to resume.
++ *
++ * This can be called by drivers to handle a resume complete stage of resume.
++ */
++void cros_ec_resume_complete(struct cros_ec_device *ec_dev)
++{
++ cros_ec_send_resume_event(ec_dev);
++}
++EXPORT_SYMBOL(cros_ec_resume_complete);
++
++static void cros_ec_enable_irq(struct cros_ec_device *ec_dev)
++{
++ ec_dev->suspended = false;
++ enable_irq(ec_dev->irq);
+
+ if (ec_dev->wake_enabled)
+ disable_irq_wake(ec_dev->irq);
+@@ -403,8 +448,35 @@ int cros_ec_resume(struct cros_ec_device *ec_dev)
+ * suspend. This way the clients know what to do with them.
+ */
+ cros_ec_report_events_during_suspend(ec_dev);
++}
+
++/**
++ * cros_ec_resume_early() - Handle a resume early operation for the ChromeOS EC device.
++ * @ec_dev: Device to resume.
++ *
++ * This can be called by drivers to handle a resume early stage of resume.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_resume_early(struct cros_ec_device *ec_dev)
++{
++ cros_ec_enable_irq(ec_dev);
++ return 0;
++}
++EXPORT_SYMBOL(cros_ec_resume_early);
+
++/**
++ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
++ * @ec_dev: Device to resume.
++ *
++ * This can be called by drivers to handle a resume event.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_resume(struct cros_ec_device *ec_dev)
++{
++ cros_ec_enable_irq(ec_dev);
++ cros_ec_send_resume_event(ec_dev);
+ return 0;
+ }
+ EXPORT_SYMBOL(cros_ec_resume);
+diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
+index bbca0096868ac..566332f487892 100644
+--- a/drivers/platform/chrome/cros_ec.h
++++ b/drivers/platform/chrome/cros_ec.h
+@@ -14,7 +14,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev);
+ void cros_ec_unregister(struct cros_ec_device *ec_dev);
+
+ int cros_ec_suspend(struct cros_ec_device *ec_dev);
++int cros_ec_suspend_late(struct cros_ec_device *ec_dev);
++int cros_ec_suspend_prepare(struct cros_ec_device *ec_dev);
+ int cros_ec_resume(struct cros_ec_device *ec_dev);
++int cros_ec_resume_early(struct cros_ec_device *ec_dev);
++void cros_ec_resume_complete(struct cros_ec_device *ec_dev);
+
+ irqreturn_t cros_ec_irq_thread(int irq, void *data);
+
+diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
+index 356572452898d..42e1770887fb0 100644
+--- a/drivers/platform/chrome/cros_ec_lpc.c
++++ b/drivers/platform/chrome/cros_ec_lpc.c
+@@ -549,22 +549,36 @@ MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table);
+ static int cros_ec_lpc_prepare(struct device *dev)
+ {
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+-
+- return cros_ec_suspend(ec_dev);
++ return cros_ec_suspend_prepare(ec_dev);
+ }
+
+ static void cros_ec_lpc_complete(struct device *dev)
+ {
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+- cros_ec_resume(ec_dev);
++ cros_ec_resume_complete(ec_dev);
++}
++
++static int cros_ec_lpc_suspend_late(struct device *dev)
++{
++ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
++
++ return cros_ec_suspend_late(ec_dev);
++}
++
++static int cros_ec_lpc_resume_early(struct device *dev)
++{
++ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
++
++ return cros_ec_resume_early(ec_dev);
+ }
+ #endif
+
+ static const struct dev_pm_ops cros_ec_lpc_pm_ops = {
+ #ifdef CONFIG_PM_SLEEP
+ .prepare = cros_ec_lpc_prepare,
+- .complete = cros_ec_lpc_complete
++ .complete = cros_ec_lpc_complete,
+ #endif
++ SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_lpc_suspend_late, cros_ec_lpc_resume_early)
+ };
+
+ static struct platform_driver cros_ec_lpc_driver = {
+diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
+index 5b9748e0463bc..63e38671e95a6 100644
+--- a/drivers/platform/chrome/cros_ec_proto_test.c
++++ b/drivers/platform/chrome/cros_ec_proto_test.c
+@@ -2668,6 +2668,7 @@ static int cros_ec_proto_test_init(struct kunit *test)
+ ec_dev->dev->release = cros_ec_proto_test_release;
+ ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
++ mutex_init(&ec_dev->lock);
+
+ priv->msg = (struct cros_ec_command *)priv->_msg;
+
+diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
+index c1e788b67a748..212f164bc3dba 100644
+--- a/drivers/platform/x86/amd/pmc/pmc.c
++++ b/drivers/platform/x86/amd/pmc/pmc.c
+@@ -912,33 +912,6 @@ static const struct pci_device_id pmc_pci_ids[] = {
+ { }
+ };
+
+-static int amd_pmc_get_dram_size(struct amd_pmc_dev *dev)
+-{
+- int ret;
+-
+- switch (dev->cpu_id) {
+- case AMD_CPU_ID_YC:
+- if (!(dev->major > 90 || (dev->major == 90 && dev->minor > 39))) {
+- ret = -EINVAL;
+- goto err_dram_size;
+- }
+- break;
+- default:
+- ret = -EINVAL;
+- goto err_dram_size;
+- }
+-
+- ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
+- if (ret || !dev->dram_size)
+- goto err_dram_size;
+-
+- return 0;
+-
+-err_dram_size:
+- dev_err(dev->dev, "DRAM size command not supported for this platform\n");
+- return ret;
+-}
+-
+ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
+ {
+ u32 phys_addr_low, phys_addr_hi;
+@@ -957,8 +930,8 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
+ return -EIO;
+
+ /* Get DRAM size */
+- ret = amd_pmc_get_dram_size(dev);
+- if (ret)
++ ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
++ if (ret || !dev->dram_size)
+ dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
+
+ /* Get STB DRAM address */
+diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+index 5798b49ddaba9..6ddca857cc4d1 100644
+--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
++++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+@@ -592,13 +592,11 @@ static int hp_add_other_attributes(int attr_type)
+ int ret;
+ char *attr_name;
+
+- mutex_lock(&bioscfg_drv.mutex);
+-
+ attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
+- if (!attr_name_kobj) {
+- ret = -ENOMEM;
+- goto err_other_attr_init;
+- }
++ if (!attr_name_kobj)
++ return -ENOMEM;
++
++ mutex_lock(&bioscfg_drv.mutex);
+
+ /* Check if attribute type is supported */
+ switch (attr_type) {
+@@ -615,14 +613,14 @@ static int hp_add_other_attributes(int attr_type)
+ default:
+ pr_err("Error: Unknown attr_type: %d\n", attr_type);
+ ret = -EINVAL;
+- goto err_other_attr_init;
++ kfree(attr_name_kobj);
++ goto unlock_drv_mutex;
+ }
+
+ ret = kobject_init_and_add(attr_name_kobj, &attr_name_ktype,
+ NULL, "%s", attr_name);
+ if (ret) {
+ pr_err("Error encountered [%d]\n", ret);
+- kobject_put(attr_name_kobj);
+ goto err_other_attr_init;
+ }
+
+@@ -630,25 +628,25 @@ static int hp_add_other_attributes(int attr_type)
+ switch (attr_type) {
+ case HPWMI_SECURE_PLATFORM_TYPE:
+ ret = hp_populate_secure_platform_data(attr_name_kobj);
+- if (ret)
+- goto err_other_attr_init;
+ break;
+
+ case HPWMI_SURE_START_TYPE:
+ ret = hp_populate_sure_start_data(attr_name_kobj);
+- if (ret)
+- goto err_other_attr_init;
+ break;
+
+ default:
+ ret = -EINVAL;
+- goto err_other_attr_init;
+ }
+
++ if (ret)
++ goto err_other_attr_init;
++
+ mutex_unlock(&bioscfg_drv.mutex);
+ return 0;
+
+ err_other_attr_init:
++ kobject_put(attr_name_kobj);
++unlock_drv_mutex:
+ mutex_unlock(&bioscfg_drv.mutex);
+ kfree(obj);
+ return ret;
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index ac037540acfc6..88eefccb6ed27 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -1425,18 +1425,17 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv)
+ if (WARN_ON(priv->kbd_bl.initialized))
+ return -EEXIST;
+
+- brightness = ideapad_kbd_bl_brightness_get(priv);
+- if (brightness < 0)
+- return brightness;
+-
+- priv->kbd_bl.last_brightness = brightness;
+-
+ if (ideapad_kbd_bl_check_tristate(priv->kbd_bl.type)) {
+ priv->kbd_bl.led.max_brightness = 2;
+ } else {
+ priv->kbd_bl.led.max_brightness = 1;
+ }
+
++ brightness = ideapad_kbd_bl_brightness_get(priv);
++ if (brightness < 0)
++ return brightness;
++
++ priv->kbd_bl.last_brightness = brightness;
+ priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
+ priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get;
+ priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 41584427dc323..a46fc417cb200 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -9816,6 +9816,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
+ * Individual addressing is broken on models that expose the
+ * primary battery as BAT1.
+ */
++ TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
+ TPACPI_Q_LNV('J', '7', true), /* B5400 */
+ TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
+ TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index a78ddd83cda02..317c907304149 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -911,21 +911,13 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+ }
+ static int wmi_char_open(struct inode *inode, struct file *filp)
+ {
+- const char *driver_name = filp->f_path.dentry->d_iname;
+- struct wmi_block *wblock;
+- struct wmi_block *next;
+-
+- list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+- if (!wblock->dev.dev.driver)
+- continue;
+- if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
+- filp->private_data = wblock;
+- break;
+- }
+- }
++ /*
++ * The miscdevice already stores a pointer to itself
++ * inside filp->private_data
++ */
++ struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev);
+
+- if (!filp->private_data)
+- return -ENODEV;
++ filp->private_data = wblock;
+
+ return nonseekable_open(inode, filp);
+ }
+@@ -1270,8 +1262,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ struct wmi_block *wblock, *next;
+ union acpi_object *obj;
+ acpi_status status;
+- int retval = 0;
+ u32 i, total;
++ int retval;
+
+ status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out);
+ if (ACPI_FAILURE(status))
+@@ -1282,8 +1274,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ return -ENXIO;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+- retval = -ENXIO;
+- goto out_free_pointer;
++ kfree(obj);
++ return -ENXIO;
+ }
+
+ gblock = (const struct guid_block *)obj->buffer.pointer;
+@@ -1298,8 +1290,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+
+ wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
+ if (!wblock) {
+- retval = -ENOMEM;
+- break;
++ dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid);
++ continue;
+ }
+
+ wblock->acpi_device = device;
+@@ -1338,9 +1330,9 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ }
+ }
+
+-out_free_pointer:
+- kfree(out.pointer);
+- return retval;
++ kfree(obj);
++
++ return 0;
+ }
+
+ /*
+diff --git a/drivers/pmdomain/amlogic/meson-ee-pwrc.c b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
+index cfb796d40d9d2..0dd71cd814c52 100644
+--- a/drivers/pmdomain/amlogic/meson-ee-pwrc.c
++++ b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
+@@ -228,7 +228,7 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
+
+ static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = {
+ { G12A_HHI_NANOQ_MEM_PD_REG0, GENMASK(31, 0) },
+- { G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(23, 0) },
++ { G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(31, 0) },
+ };
+
+ #define VPU_PD(__name, __top_pd, __mem, __is_pwr_off, __resets, __clks) \
+diff --git a/drivers/pmdomain/bcm/bcm2835-power.c b/drivers/pmdomain/bcm/bcm2835-power.c
+index 1a179d4e011cf..d2f0233cb6206 100644
+--- a/drivers/pmdomain/bcm/bcm2835-power.c
++++ b/drivers/pmdomain/bcm/bcm2835-power.c
+@@ -175,7 +175,7 @@ static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable
+ }
+ writel(PM_PASSWORD | val, base + reg);
+
+- while (readl(base + reg) & ASB_ACK) {
++ while (!!(readl(base + reg) & ASB_ACK) == enable) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000)
+ return -ETIMEDOUT;
+diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
+index 90a8b2c0676ff..419ed15cc10c4 100644
+--- a/drivers/pmdomain/imx/gpc.c
++++ b/drivers/pmdomain/imx/gpc.c
+@@ -498,6 +498,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
+
+ pd_pdev->dev.parent = &pdev->dev;
+ pd_pdev->dev.of_node = np;
++ pd_pdev->dev.fwnode = of_fwnode_handle(np);
+
+ ret = platform_device_add(pd_pdev);
+ if (ret) {
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index 0b69fb7bafd85..416409e2fd6da 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -29,7 +29,7 @@
+ struct class *power_supply_class;
+ EXPORT_SYMBOL_GPL(power_supply_class);
+
+-ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
++BLOCKING_NOTIFIER_HEAD(power_supply_notifier);
+ EXPORT_SYMBOL_GPL(power_supply_notifier);
+
+ static struct device_type power_supply_dev_type;
+@@ -97,7 +97,7 @@ static void power_supply_changed_work(struct work_struct *work)
+ class_for_each_device(power_supply_class, NULL, psy,
+ __power_supply_changed_work);
+ power_supply_update_leds(psy);
+- atomic_notifier_call_chain(&power_supply_notifier,
++ blocking_notifier_call_chain(&power_supply_notifier,
+ PSY_EVENT_PROP_CHANGED, psy);
+ kobject_uevent(&psy->dev.kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+@@ -1262,13 +1262,13 @@ static void power_supply_dev_release(struct device *dev)
+
+ int power_supply_reg_notifier(struct notifier_block *nb)
+ {
+- return atomic_notifier_chain_register(&power_supply_notifier, nb);
++ return blocking_notifier_chain_register(&power_supply_notifier, nb);
+ }
+ EXPORT_SYMBOL_GPL(power_supply_reg_notifier);
+
+ void power_supply_unreg_notifier(struct notifier_block *nb)
+ {
+- atomic_notifier_chain_unregister(&power_supply_notifier, nb);
++ blocking_notifier_chain_unregister(&power_supply_notifier, nb);
+ }
+ EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);
+
+diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
+index 2ff7717530bf8..8a2f18fa3faf5 100644
+--- a/drivers/powercap/dtpm_cpu.c
++++ b/drivers/powercap/dtpm_cpu.c
+@@ -24,7 +24,6 @@
+ #include <linux/of.h>
+ #include <linux/pm_qos.h>
+ #include <linux/slab.h>
+-#include <linux/units.h>
+
+ struct dtpm_cpu {
+ struct dtpm dtpm;
+@@ -104,8 +103,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
+ if (pd->table[i].frequency < freq)
+ continue;
+
+- return scale_pd_power_uw(pd_mask, pd->table[i].power *
+- MICROWATT_PER_MILLIWATT);
++ return scale_pd_power_uw(pd_mask, pd->table[i].power);
+ }
+
+ return 0;
+@@ -122,11 +120,9 @@ static int update_pd_power_uw(struct dtpm *dtpm)
+ nr_cpus = cpumask_weight(&cpus);
+
+ dtpm->power_min = em->table[0].power;
+- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+ dtpm->power_min *= nr_cpus;
+
+ dtpm->power_max = em->table[em->nr_perf_states - 1].power;
+- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+ dtpm->power_max *= nr_cpus;
+
+ return 0;
+diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
+index 91276761a31d9..612c3b59dd5be 100644
+--- a/drivers/powercap/dtpm_devfreq.c
++++ b/drivers/powercap/dtpm_devfreq.c
+@@ -39,10 +39,8 @@ static int update_pd_power_uw(struct dtpm *dtpm)
+ struct em_perf_domain *pd = em_pd_get(dev);
+
+ dtpm->power_min = pd->table[0].power;
+- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+
+ dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
+- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+
+ return 0;
+ }
+@@ -54,13 +52,10 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+ struct device *dev = devfreq->dev.parent;
+ struct em_perf_domain *pd = em_pd_get(dev);
+ unsigned long freq;
+- u64 power;
+ int i;
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+-
+- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
+- if (power > power_limit)
++ if (pd->table[i].power > power_limit)
+ break;
+ }
+
+@@ -68,7 +63,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+
+ dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
+
+- power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
++ power_limit = pd->table[i - 1].power;
+
+ return power_limit;
+ }
+@@ -110,7 +105,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
+ if (pd->table[i].frequency < freq)
+ continue;
+
+- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
++ power = pd->table[i].power;
+ power *= status.busy_time;
+ power >>= 10;
+
+diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
+index 40a2cc649c79b..2feed036c1cd4 100644
+--- a/drivers/powercap/intel_rapl_common.c
++++ b/drivers/powercap/intel_rapl_common.c
+@@ -892,7 +892,7 @@ static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
+ return -EINVAL;
+
+ if (rd->rpl[pl].locked) {
+- pr_warn("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
++ pr_debug("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
+ return -EACCES;
+ }
+
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index 362bf756e6b78..5a3a4cc0bec82 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -490,7 +490,8 @@ ssize_t ptp_read(struct posix_clock *pc,
+
+ for (i = 0; i < cnt; i++) {
+ event[i] = queue->buf[queue->head];
+- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++ /* Paired with READ_ONCE() in queue_cnt() */
++ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ }
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 80f74e38c2da4..9a50bfb56453c 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -56,10 +56,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
+ dst->t.sec = seconds;
+ dst->t.nsec = remainder;
+
++ /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
+ if (!queue_free(queue))
+- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+
+- queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
++ WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+ }
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 75f58fc468a71..b8d4f61f14be4 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -76,9 +76,13 @@ struct ptp_vclock {
+ * that a writer might concurrently increment the tail does not
+ * matter, since the queue remains nonempty nonetheless.
+ */
+-static inline int queue_cnt(struct timestamp_event_queue *q)
++static inline int queue_cnt(const struct timestamp_event_queue *q)
+ {
+- int cnt = q->tail - q->head;
++ /*
++ * Paired with WRITE_ONCE() in enqueue_external_timestamp(),
++ * ptp_read(), extts_fifo_show().
++ */
++ int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
+ return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
+ }
+
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index 6e4d5456a8851..34ea5c16123a1 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -90,7 +90,8 @@ static ssize_t extts_fifo_show(struct device *dev,
+ qcnt = queue_cnt(queue);
+ if (qcnt) {
+ event = queue->buf[queue->head];
+- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++ /* Paired with READ_ONCE() in queue_cnt() */
++ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ }
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
+index a3faa9a3de7cc..a7d529bf76adc 100644
+--- a/drivers/pwm/pwm-brcmstb.c
++++ b/drivers/pwm/pwm-brcmstb.c
+@@ -288,7 +288,7 @@ static int brcmstb_pwm_suspend(struct device *dev)
+ {
+ struct brcmstb_pwm *p = dev_get_drvdata(dev);
+
+- clk_disable(p->clk);
++ clk_disable_unprepare(p->clk);
+
+ return 0;
+ }
+@@ -297,7 +297,7 @@ static int brcmstb_pwm_resume(struct device *dev)
+ {
+ struct brcmstb_pwm *p = dev_get_drvdata(dev);
+
+- clk_enable(p->clk);
++ clk_prepare_enable(p->clk);
+
+ return 0;
+ }
+diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
+index b1d1373648a38..c8800f84b917f 100644
+--- a/drivers/pwm/pwm-sti.c
++++ b/drivers/pwm/pwm-sti.c
+@@ -79,6 +79,7 @@ struct sti_pwm_compat_data {
+ unsigned int cpt_num_devs;
+ unsigned int max_pwm_cnt;
+ unsigned int max_prescale;
++ struct sti_cpt_ddata *ddata;
+ };
+
+ struct sti_pwm_chip {
+@@ -314,7 +315,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
+ {
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ struct sti_pwm_compat_data *cdata = pc->cdata;
+- struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
++ struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm];
+ struct device *dev = pc->dev;
+ unsigned int effective_ticks;
+ unsigned long long high, low;
+@@ -440,7 +441,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data)
+ while (cpt_int_stat) {
+ devicenum = ffs(cpt_int_stat) - 1;
+
+- ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
++ ddata = &pc->cdata->ddata[devicenum];
+
+ /*
+ * Capture input:
+@@ -638,30 +639,28 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ dev_err(dev, "failed to prepare clock\n");
+ return ret;
+ }
++
++ cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
++ if (!cdata->ddata)
++ return -ENOMEM;
+ }
+
+ pc->chip.dev = dev;
+ pc->chip.ops = &sti_pwm_ops;
+ pc->chip.npwm = pc->cdata->pwm_num_devs;
+
+- ret = pwmchip_add(&pc->chip);
+- if (ret < 0) {
+- clk_unprepare(pc->pwm_clk);
+- clk_unprepare(pc->cpt_clk);
+- return ret;
+- }
+-
+ for (i = 0; i < cdata->cpt_num_devs; i++) {
+- struct sti_cpt_ddata *ddata;
+-
+- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+- if (!ddata)
+- return -ENOMEM;
++ struct sti_cpt_ddata *ddata = &cdata->ddata[i];
+
+ init_waitqueue_head(&ddata->wait);
+ mutex_init(&ddata->lock);
++ }
+
+- pwm_set_chip_data(&pc->chip.pwms[i], ddata);
++ ret = pwmchip_add(&pc->chip);
++ if (ret < 0) {
++ clk_unprepare(pc->pwm_clk);
++ clk_unprepare(pc->cpt_clk);
++ return ret;
+ }
+
+ platform_set_drvdata(pdev, pc);
+diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
+index 65fbd95f1dbb0..4ca8fbf4b3e2e 100644
+--- a/drivers/regulator/mt6358-regulator.c
++++ b/drivers/regulator/mt6358-regulator.c
+@@ -688,12 +688,18 @@ static int mt6358_regulator_probe(struct platform_device *pdev)
+ const struct mt6358_regulator_info *mt6358_info;
+ int i, max_regulator, ret;
+
+- if (mt6397->chip_id == MT6366_CHIP_ID) {
+- max_regulator = MT6366_MAX_REGULATOR;
+- mt6358_info = mt6366_regulators;
+- } else {
++ switch (mt6397->chip_id) {
++ case MT6358_CHIP_ID:
+ max_regulator = MT6358_MAX_REGULATOR;
+ mt6358_info = mt6358_regulators;
++ break;
++ case MT6366_CHIP_ID:
++ max_regulator = MT6366_MAX_REGULATOR;
++ mt6358_info = mt6366_regulators;
++ break;
++ default:
++ dev_err(&pdev->dev, "unsupported chip ID: %d\n", mt6397->chip_id);
++ return -EINVAL;
+ }
+
+ ret = mt6358_sync_vcn33_setting(&pdev->dev);
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index d990ba19c50eb..b2e359ac31693 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -1095,7 +1095,7 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
+- RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"),
++ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
+diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
+index 3cdc015692ca6..1a65a4e0dc003 100644
+--- a/drivers/rtc/rtc-brcmstb-waketimer.c
++++ b/drivers/rtc/rtc-brcmstb-waketimer.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright © 2014-2017 Broadcom
++ * Copyright © 2014-2023 Broadcom
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+@@ -34,6 +34,7 @@ struct brcmstb_waketmr {
+ u32 rate;
+ unsigned long rtc_alarm;
+ bool alarm_en;
++ bool alarm_expired;
+ };
+
+ #define BRCMSTB_WKTMR_EVENT 0x00
+@@ -64,6 +65,11 @@ static inline void brcmstb_waketmr_clear_alarm(struct brcmstb_waketmr *timer)
+ writel_relaxed(reg - 1, timer->base + BRCMSTB_WKTMR_ALARM);
+ writel_relaxed(WKTMR_ALARM_EVENT, timer->base + BRCMSTB_WKTMR_EVENT);
+ (void)readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
++ if (timer->alarm_expired) {
++ timer->alarm_expired = false;
++ /* maintain call balance */
++ enable_irq(timer->alarm_irq);
++ }
+ }
+
+ static void brcmstb_waketmr_set_alarm(struct brcmstb_waketmr *timer,
+@@ -105,10 +111,17 @@ static irqreturn_t brcmstb_alarm_irq(int irq, void *data)
+ return IRQ_HANDLED;
+
+ if (timer->alarm_en) {
+- if (!device_may_wakeup(timer->dev))
++ if (device_may_wakeup(timer->dev)) {
++ disable_irq_nosync(irq);
++ timer->alarm_expired = true;
++ } else {
+ writel_relaxed(WKTMR_ALARM_EVENT,
+ timer->base + BRCMSTB_WKTMR_EVENT);
++ }
+ rtc_update_irq(timer->rtc, 1, RTC_IRQF | RTC_AF);
++ } else {
++ writel_relaxed(WKTMR_ALARM_EVENT,
++ timer->base + BRCMSTB_WKTMR_EVENT);
+ }
+
+ return IRQ_HANDLED;
+@@ -221,8 +234,14 @@ static int brcmstb_waketmr_alarm_enable(struct device *dev,
+ !brcmstb_waketmr_is_pending(timer))
+ return -EINVAL;
+ timer->alarm_en = true;
+- if (timer->alarm_irq)
++ if (timer->alarm_irq) {
++ if (timer->alarm_expired) {
++ timer->alarm_expired = false;
++ /* maintain call balance */
++ enable_irq(timer->alarm_irq);
++ }
+ enable_irq(timer->alarm_irq);
++ }
+ } else if (!enabled && timer->alarm_en) {
+ if (timer->alarm_irq)
+ disable_irq(timer->alarm_irq);
+@@ -352,6 +371,17 @@ static int brcmstb_waketmr_suspend(struct device *dev)
+ return brcmstb_waketmr_prepare_suspend(timer);
+ }
+
++static int brcmstb_waketmr_suspend_noirq(struct device *dev)
++{
++ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
++
++ /* Catch any alarms occurring prior to noirq */
++ if (timer->alarm_expired && device_may_wakeup(dev))
++ return -EBUSY;
++
++ return 0;
++}
++
+ static int brcmstb_waketmr_resume(struct device *dev)
+ {
+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+@@ -368,10 +398,17 @@ static int brcmstb_waketmr_resume(struct device *dev)
+
+ return ret;
+ }
++#else
++#define brcmstb_waketmr_suspend NULL
++#define brcmstb_waketmr_suspend_noirq NULL
++#define brcmstb_waketmr_resume NULL
+ #endif /* CONFIG_PM_SLEEP */
+
+-static SIMPLE_DEV_PM_OPS(brcmstb_waketmr_pm_ops,
+- brcmstb_waketmr_suspend, brcmstb_waketmr_resume);
++static const struct dev_pm_ops brcmstb_waketmr_pm_ops = {
++ .suspend = brcmstb_waketmr_suspend,
++ .suspend_noirq = brcmstb_waketmr_suspend_noirq,
++ .resume = brcmstb_waketmr_resume,
++};
+
+ static const __maybe_unused struct of_device_id brcmstb_waketmr_of_match[] = {
+ { .compatible = "brcm,brcmstb-waketimer" },
+diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
+index 06194674d71c5..540042b9eec8f 100644
+--- a/drivers/rtc/rtc-pcf85363.c
++++ b/drivers/rtc/rtc-pcf85363.c
+@@ -438,7 +438,7 @@ static int pcf85363_probe(struct i2c_client *client)
+ if (client->irq > 0 || wakeup_source) {
+ regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
+ regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
+- PIN_IO_INTA_OUT, PIN_IO_INTAPM);
++ PIN_IO_INTAPM, PIN_IO_INTA_OUT);
+ }
+
+ if (client->irq > 0) {
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 215597f73be4f..5b11ee9234573 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -674,18 +674,20 @@ static void dasd_profile_start(struct dasd_block *block,
+ * we count each request only once.
+ */
+ device = cqr->startdev;
+- if (device->profile.data) {
+- counter = 1; /* request is not yet queued on the start device */
+- list_for_each(l, &device->ccw_queue)
+- if (++counter >= 31)
+- break;
+- }
++ if (!device->profile.data)
++ return;
++
++ spin_lock(get_ccwdev_lock(device->cdev));
++ counter = 1; /* request is not yet queued on the start device */
++ list_for_each(l, &device->ccw_queue)
++ if (++counter >= 31)
++ break;
++ spin_unlock(get_ccwdev_lock(device->cdev));
++
+ spin_lock(&device->profile.lock);
+- if (device->profile.data) {
+- device->profile.data->dasd_io_nr_req[counter]++;
+- if (rq_data_dir(req) == READ)
+- device->profile.data->dasd_read_nr_req[counter]++;
+- }
++ device->profile.data->dasd_io_nr_req[counter]++;
++ if (rq_data_dir(req) == READ)
++ device->profile.data->dasd_read_nr_req[counter]++;
+ spin_unlock(&device->profile.lock);
+ }
+
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index 339812efe8221..d6ad437883fad 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -1022,6 +1022,10 @@ EXPORT_SYMBOL(ap_driver_unregister);
+
+ void ap_bus_force_rescan(void)
+ {
++ /* Only trigger AP bus scans after the initial scan is done */
++ if (atomic64_read(&ap_scan_bus_count) <= 0)
++ return;
++
+ /* processing a asynchronous bus rescan */
+ del_timer(&ap_config_timer);
+ queue_work(system_long_wq, &ap_scan_work);
+@@ -1865,15 +1869,18 @@ static inline void ap_scan_domains(struct ap_card *ac)
+ }
+ /* get it and thus adjust reference counter */
+ get_device(dev);
+- if (decfg)
++ if (decfg) {
+ AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
+ __func__, ac->id, dom);
+- else if (chkstop)
++ } else if (chkstop) {
+ AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
+ __func__, ac->id, dom);
+- else
++ } else {
++ /* nudge the queue's state machine */
++ ap_queue_init_state(aq);
+ AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
+ __func__, ac->id, dom);
++ }
+ goto put_dev_and_continue;
+ }
+ /* handle state changes on already existing queue device */
+@@ -1895,10 +1902,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
+ } else if (!chkstop && aq->chkstop) {
+ /* checkstop off */
+ aq->chkstop = false;
+- if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+- aq->dev_state = AP_DEV_STATE_OPERATING;
+- aq->sm_state = AP_SM_STATE_RESET_START;
+- }
++ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
++ _ap_queue_init_state(aq);
+ spin_unlock_bh(&aq->lock);
+ AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
+ __func__, ac->id, dom);
+@@ -1922,10 +1927,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
+ } else if (!decfg && !aq->config) {
+ /* config on this queue device */
+ aq->config = true;
+- if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+- aq->dev_state = AP_DEV_STATE_OPERATING;
+- aq->sm_state = AP_SM_STATE_RESET_START;
+- }
++ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
++ _ap_queue_init_state(aq);
+ spin_unlock_bh(&aq->lock);
+ AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
+ __func__, ac->id, dom);
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
+index be54b070c0316..3e34912a60506 100644
+--- a/drivers/s390/crypto/ap_bus.h
++++ b/drivers/s390/crypto/ap_bus.h
+@@ -287,6 +287,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+ void ap_queue_prepare_remove(struct ap_queue *aq);
+ void ap_queue_remove(struct ap_queue *aq);
+ void ap_queue_init_state(struct ap_queue *aq);
++void _ap_queue_init_state(struct ap_queue *aq);
+
+ struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
+ int comp_type, unsigned int functions, int ml);
+diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
+index 1336e632adc4a..2943b2529d3a0 100644
+--- a/drivers/s390/crypto/ap_queue.c
++++ b/drivers/s390/crypto/ap_queue.c
+@@ -1160,14 +1160,19 @@ void ap_queue_remove(struct ap_queue *aq)
+ spin_unlock_bh(&aq->lock);
+ }
+
+-void ap_queue_init_state(struct ap_queue *aq)
++void _ap_queue_init_state(struct ap_queue *aq)
+ {
+- spin_lock_bh(&aq->lock);
+ aq->dev_state = AP_DEV_STATE_OPERATING;
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ aq->last_err_rc = 0;
+ aq->assoc_idx = ASSOC_IDX_INVALID;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
++}
++
++void ap_queue_init_state(struct ap_queue *aq)
++{
++ spin_lock_bh(&aq->lock);
++ _ap_queue_init_state(aq);
+ spin_unlock_bh(&aq->lock);
+ }
+ EXPORT_SYMBOL(ap_queue_init_state);
+diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
+index 4902d45e929ce..c61e6427384c3 100644
+--- a/drivers/s390/net/Kconfig
++++ b/drivers/s390/net/Kconfig
+@@ -103,10 +103,11 @@ config CCWGROUP
+ config ISM
+ tristate "Support for ISM vPCI Adapter"
+ depends on PCI
++ imply SMC
+ default n
+ help
+ Select this option if you want to use the Internal Shared Memory
+- vPCI Adapter.
++ vPCI Adapter. The adapter can be used with the SMC network protocol.
+
+ To compile as a module choose M. The module name is ism.
+ If unsure, choose N.
+diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
+index 6df7f377d2f90..81aabbfbbe2ca 100644
+--- a/drivers/s390/net/ism_drv.c
++++ b/drivers/s390/net/ism_drv.c
+@@ -30,7 +30,6 @@ static const struct pci_device_id ism_device_table[] = {
+ MODULE_DEVICE_TABLE(pci, ism_device_table);
+
+ static debug_info_t *ism_debug_info;
+-static const struct smcd_ops ism_ops;
+
+ #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
+ static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
+@@ -289,22 +288,6 @@ out:
+ return ret;
+ }
+
+-static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
+- u32 vid)
+-{
+- union ism_query_rgid cmd;
+-
+- memset(&cmd, 0, sizeof(cmd));
+- cmd.request.hdr.cmd = ISM_QUERY_RGID;
+- cmd.request.hdr.len = sizeof(cmd.request);
+-
+- cmd.request.rgid = rgid;
+- cmd.request.vlan_valid = vid_valid;
+- cmd.request.vlan_id = vid;
+-
+- return ism_cmd(ism, &cmd);
+-}
+-
+ static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+ {
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
+@@ -429,23 +412,6 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
+ return ism_cmd(ism, &cmd);
+ }
+
+-static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
+- u32 event_code, u64 info)
+-{
+- union ism_sig_ieq cmd;
+-
+- memset(&cmd, 0, sizeof(cmd));
+- cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+- cmd.request.hdr.len = sizeof(cmd.request);
+-
+- cmd.request.rgid = rgid;
+- cmd.request.trigger_irq = trigger_irq;
+- cmd.request.event_code = event_code;
+- cmd.request.info = info;
+-
+- return ism_cmd(ism, &cmd);
+-}
+-
+ static unsigned int max_bytes(unsigned int start, unsigned int len,
+ unsigned int boundary)
+ {
+@@ -503,14 +469,6 @@ u8 *ism_get_seid(void)
+ }
+ EXPORT_SYMBOL_GPL(ism_get_seid);
+
+-static u16 ism_get_chid(struct ism_dev *ism)
+-{
+- if (!ism || !ism->pdev)
+- return 0;
+-
+- return to_zpci(ism->pdev)->pchid;
+-}
+-
+ static void ism_handle_event(struct ism_dev *ism)
+ {
+ struct ism_event *entry;
+@@ -569,11 +527,6 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
+-static u64 ism_get_local_gid(struct ism_dev *ism)
+-{
+- return ism->local_gid;
+-}
+-
+ static int ism_dev_init(struct ism_dev *ism)
+ {
+ struct pci_dev *pdev = ism->pdev;
+@@ -774,6 +727,22 @@ module_exit(ism_exit);
+ /*************************** SMC-D Implementation *****************************/
+
+ #if IS_ENABLED(CONFIG_SMC)
++static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
++ u32 vid)
++{
++ union ism_query_rgid cmd;
++
++ memset(&cmd, 0, sizeof(cmd));
++ cmd.request.hdr.cmd = ISM_QUERY_RGID;
++ cmd.request.hdr.len = sizeof(cmd.request);
++
++ cmd.request.rgid = rgid;
++ cmd.request.vlan_valid = vid_valid;
++ cmd.request.vlan_id = vid;
++
++ return ism_cmd(ism, &cmd);
++}
++
+ static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
+ u32 vid)
+ {
+@@ -811,6 +780,23 @@ static int smcd_reset_vlan_required(struct smcd_dev *smcd)
+ return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
+ }
+
++static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
++ u32 event_code, u64 info)
++{
++ union ism_sig_ieq cmd;
++
++ memset(&cmd, 0, sizeof(cmd));
++ cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
++ cmd.request.hdr.len = sizeof(cmd.request);
++
++ cmd.request.rgid = rgid;
++ cmd.request.trigger_irq = trigger_irq;
++ cmd.request.event_code = event_code;
++ cmd.request.info = info;
++
++ return ism_cmd(ism, &cmd);
++}
++
+ static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info)
+ {
+@@ -830,11 +816,24 @@ static int smcd_supports_v2(void)
+ SYSTEM_EID.type[0] != '0';
+ }
+
++static u64 ism_get_local_gid(struct ism_dev *ism)
++{
++ return ism->local_gid;
++}
++
+ static u64 smcd_get_local_gid(struct smcd_dev *smcd)
+ {
+ return ism_get_local_gid(smcd->priv);
+ }
+
++static u16 ism_get_chid(struct ism_dev *ism)
++{
++ if (!ism || !ism->pdev)
++ return 0;
++
++ return to_zpci(ism->pdev)->pchid;
++}
++
+ static u16 smcd_get_chid(struct smcd_dev *smcd)
+ {
+ return ism_get_chid(smcd->priv);
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index bbb64ee6afd7c..089186fe17915 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -4865,6 +4865,12 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
+ hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ }
+
++static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
++{
++ debugfs_remove_recursive(hisi_hba->debugfs_dir);
++ hisi_hba->debugfs_dir = NULL;
++}
++
+ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+ {
+ struct device *dev = hisi_hba->dev;
+@@ -4888,18 +4894,13 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ if (debugfs_alloc_v3_hw(hisi_hba, i)) {
+- debugfs_remove_recursive(hisi_hba->debugfs_dir);
++ debugfs_exit_v3_hw(hisi_hba);
+ dev_dbg(dev, "failed to init debugfs!\n");
+ break;
+ }
+ }
+ }
+
+-static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+-{
+- debugfs_remove_recursive(hisi_hba->debugfs_dir);
+-}
+-
+ static int
+ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index ce9eb00e2ca04..c98346e464b48 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -22,7 +22,6 @@
+ #include <linux/bsg-lib.h>
+ #include <asm/firmware.h>
+ #include <asm/irq.h>
+-#include <asm/rtas.h>
+ #include <asm/vio.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -1519,7 +1518,11 @@ static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->l_lock, flags);
+- BUG_ON(list_empty(&queue->free));
++ if (list_empty(&queue->free)) {
++ ibmvfc_log(queue->vhost, 4, "empty event pool on queue:%ld\n", queue->hwq_id);
++ spin_unlock_irqrestore(&queue->l_lock, flags);
++ return NULL;
++ }
+ evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ atomic_set(&evt->free, 0);
+ list_del(&evt->queue_list);
+@@ -1948,9 +1951,15 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+ if (vhost->using_channels) {
+ scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
++ if (!evt)
++ return SCSI_MLQUEUE_HOST_BUSY;
++
+ evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
+- } else
++ } else {
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt)
++ return SCSI_MLQUEUE_HOST_BUSY;
++ }
+
+ ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
+ evt->cmnd = cmnd;
+@@ -2038,6 +2047,11 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
+
+ vhost->aborting_passthru = 1;
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
++
+ ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+@@ -2096,6 +2110,10 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
+ goto unlock_out;
+
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ rc = -ENOMEM;
++ goto unlock_out;
++ }
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ plogi = &evt->iu.plogi;
+ memset(plogi, 0, sizeof(*plogi));
+@@ -2214,6 +2232,11 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
+ }
+
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ rc = -ENOMEM;
++ goto out;
++ }
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.passthru;
+
+@@ -2302,6 +2325,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
+ else
+ evt = ibmvfc_get_event(&vhost->crq);
+
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
++
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
+@@ -2505,6 +2533,8 @@ static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
+ struct ibmvfc_tmf *tmf;
+
+ evt = ibmvfc_get_event(queue);
++ if (!evt)
++ return NULL;
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+@@ -2561,6 +2591,11 @@ static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
+
+ if (found_evt && vhost->logged_in) {
+ evt = ibmvfc_init_tmf(&queues[i], sdev, type);
++ if (!evt) {
++ spin_unlock(queues[i].q_lock);
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
+ evt->sync_iu = &queues[i].cancel_rsp;
+ ibmvfc_send_event(evt, vhost, default_timeout);
+ list_add_tail(&evt->cancel, &cancelq);
+@@ -2774,6 +2809,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+
+ if (vhost->state == IBMVFC_ACTIVE) {
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
+@@ -4032,6 +4071,12 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+@@ -4139,6 +4184,12 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
+ kref_get(&tgt->kref);
+ tgt->logo_rcvd = 0;
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
+@@ -4215,6 +4266,8 @@ static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_t
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt)
++ return NULL;
+ ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ mad = &evt->iu.implicit_logout;
+@@ -4242,6 +4295,13 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_done);
++ if (!evt) {
++ vhost->discovery_threads--;
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+@@ -4381,6 +4441,12 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
+@@ -4547,6 +4613,14 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
+ vhost->abort_threads++;
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
++ vhost->abort_threads--;
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return;
++ }
+ ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
+
+ evt->tgt = tgt;
+@@ -4597,6 +4671,12 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+@@ -4700,6 +4780,12 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ evt->tgt = tgt;
+ ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
+@@ -4872,6 +4958,13 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
+ {
+ struct ibmvfc_discover_targets *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
++ int level = IBMVFC_DEFAULT_LOG_LEVEL;
++
++ if (!evt) {
++ ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
+
+ ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.discover_targets;
+@@ -4949,8 +5042,15 @@ static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
+ struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ unsigned int num_channels =
+ min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
++ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+ int i;
+
++ if (!evt) {
++ ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
++
+ memset(setup_buf, 0, sizeof(*setup_buf));
+ if (num_channels == 0)
+ setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
+@@ -5012,6 +5112,13 @@ static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
+ {
+ struct ibmvfc_channel_enquiry *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
++ int level = IBMVFC_DEFAULT_LOG_LEVEL;
++
++ if (!evt) {
++ ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
+
+ ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.channel_enquiry;
+@@ -5134,6 +5241,12 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
+ struct ibmvfc_npiv_login_mad *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+
++ if (!evt) {
++ ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
++
+ ibmvfc_gather_partition_info(vhost);
+ ibmvfc_set_login_info(vhost);
+ ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
+@@ -5198,6 +5311,12 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
+ struct ibmvfc_event *evt;
+
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
++
+ ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
+
+ mad = &evt->iu.npiv_logout;
+@@ -5804,7 +5923,7 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
+ irq_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
+- } while (rtas_busy_delay(rc));
++ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ reg_failed:
+ LEAVE;
+ return rc;
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index 9c02c9523c4d4..ab06e9aeb613e 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -241,6 +241,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
+ }
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->ptp_rdata = fc_rport_create(lport, remote_fid);
++ if (!lport->ptp_rdata) {
++ printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n",
++ lport->port_id);
++ mutex_unlock(&lport->disc.disc_mutex);
++ return;
++ }
+ kref_get(&lport->ptp_rdata->kref);
+ lport->ptp_rdata->ids.port_name = remote_wwpn;
+ lport->ptp_rdata->ids.node_name = remote_wwnn;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index e1aa667dae662..3d4f13da1ae87 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -263,13 +263,13 @@ u32 megasas_readl(struct megasas_instance *instance,
+ * Fusion registers could intermittently return all zeroes.
+ * This behavior is transient in nature and subsequent reads will
+ * return valid value. As a workaround in driver, retry readl for
+- * upto three times until a non-zero value is read.
++ * up to thirty times until a non-zero value is read.
+ */
+ if (instance->adapter_type == AERO_SERIES) {
+ do {
+ ret_val = readl(addr);
+ i++;
+- } while (ret_val == 0 && i < 3);
++ } while (ret_val == 0 && i < 30);
+ return ret_val;
+ } else {
+ return readl(addr);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 61a32bf00747e..a75f670bf5519 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -223,8 +223,8 @@ _base_readl_ext_retry(const void __iomem *addr)
+
+ for (i = 0 ; i < 30 ; i++) {
+ ret_val = readl(addr);
+- if (ret_val == 0)
+- continue;
++ if (ret_val != 0)
++ break;
+ }
+
+ return ret_val;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index dcae09a37d498..c45eef743c457 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1836,8 +1836,16 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
+ }
+
+ spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+- if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
+- sp->done(sp, res);
++ switch (sp->type) {
++ case SRB_SCSI_CMD:
++ if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
++ sp->done(sp, res);
++ break;
++ default:
++ if (ret_cmd)
++ sp->done(sp, res);
++ break;
++ }
+ } else {
+ sp->done(sp, res);
+ }
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6effa13039f39..e17509f0b3fa8 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3953,8 +3953,15 @@ static int sd_resume(struct device *dev, bool runtime)
+
+ static int sd_resume_system(struct device *dev)
+ {
+- if (pm_runtime_suspended(dev))
++ if (pm_runtime_suspended(dev)) {
++ struct scsi_disk *sdkp = dev_get_drvdata(dev);
++ struct scsi_device *sdp = sdkp ? sdkp->device : NULL;
++
++ if (sdp && sdp->force_runtime_start_on_system_start)
++ pm_request_resume(dev);
++
+ return 0;
++ }
+
+ return sd_resume(dev, false);
+ }
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index e32a4161a8d02..c61848595da06 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -944,6 +944,9 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ u32 version;
+ struct regmap *regmap;
+
++ if (!IS_ERR(drv_data))
++ return -EBUSY;
++
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data) {
+ ret = -ENOMEM;
+diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
+index d05e0d6edf493..6f8b2f7ae3cc1 100644
+--- a/drivers/soc/qcom/pmic_glink_altmode.c
++++ b/drivers/soc/qcom/pmic_glink_altmode.c
+@@ -444,6 +444,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
+ ret = fwnode_property_read_u32(fwnode, "reg", &port);
+ if (ret < 0) {
+ dev_err(dev, "missing reg property of %pOFn\n", fwnode);
++ fwnode_handle_put(fwnode);
+ return ret;
+ }
+
+@@ -454,6 +455,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
+
+ if (altmode->ports[port].altmode) {
+ dev_err(dev, "multiple connector definition for port %u\n", port);
++ fwnode_handle_put(fwnode);
+ return -EINVAL;
+ }
+
+@@ -465,48 +467,62 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
+ alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
+ alt_port->bridge.of_node = to_of_node(fwnode);
+ alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
+- alt_port->bridge.type = DRM_MODE_CONNECTOR_USB;
++ alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+
+ ret = devm_drm_bridge_add(dev, &alt_port->bridge);
+- if (ret)
++ if (ret) {
++ fwnode_handle_put(fwnode);
+ return ret;
++ }
+
+ alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
+ alt_port->dp_alt.mode = USB_TYPEC_DP_MODE;
+ alt_port->dp_alt.active = 1;
+
+ alt_port->typec_mux = fwnode_typec_mux_get(fwnode);
+- if (IS_ERR(alt_port->typec_mux))
++ if (IS_ERR(alt_port->typec_mux)) {
++ fwnode_handle_put(fwnode);
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_mux),
+ "failed to acquire mode-switch for port: %d\n",
+ port);
++ }
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_mux,
+ alt_port->typec_mux);
+- if (ret)
++ if (ret) {
++ fwnode_handle_put(fwnode);
+ return ret;
++ }
+
+ alt_port->typec_retimer = fwnode_typec_retimer_get(fwnode);
+- if (IS_ERR(alt_port->typec_retimer))
++ if (IS_ERR(alt_port->typec_retimer)) {
++ fwnode_handle_put(fwnode);
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_retimer),
+ "failed to acquire retimer-switch for port: %d\n",
+ port);
++ }
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_retimer,
+ alt_port->typec_retimer);
+- if (ret)
++ if (ret) {
++ fwnode_handle_put(fwnode);
+ return ret;
++ }
+
+ alt_port->typec_switch = fwnode_typec_switch_get(fwnode);
+- if (IS_ERR(alt_port->typec_switch))
++ if (IS_ERR(alt_port->typec_switch)) {
++ fwnode_handle_put(fwnode);
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_switch),
+ "failed to acquire orientation-switch for port: %d\n",
+ port);
++ }
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_switch,
+ alt_port->typec_switch);
+- if (ret)
++ if (ret) {
++ fwnode_handle_put(fwnode);
+ return ret;
++ }
+ }
+
+ altmode->client = devm_pmic_glink_register_client(dev,
+diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
+index 2a1096dab63d3..9ebdd0cd0b1cf 100644
+--- a/drivers/soundwire/dmi-quirks.c
++++ b/drivers/soundwire/dmi-quirks.c
+@@ -141,7 +141,7 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-k0xxx"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16"),
+ },
+ .driver_data = (void *)hp_omen_16,
+ },
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 2c21d5b96fdce..bcbf840cd41c8 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -1157,6 +1157,7 @@ config SPI_XTENSA_XTFPGA
+ config SPI_ZYNQ_QSPI
+ tristate "Xilinx Zynq QSPI controller"
+ depends on ARCH_ZYNQ || COMPILE_TEST
++ depends on SPI_MEM
+ help
+ This enables support for the Zynq Quad SPI controller
+ in master mode.
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index c964f41dcc428..168eff721ed37 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -759,7 +759,7 @@ static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+ f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
+ len : NXP_FSPI_MIN_IOMAP;
+
+- f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start,
++ f->ahb_addr = ioremap(f->memmap_phy + f->memmap_start,
+ f->memmap_len);
+
+ if (!f->ahb_addr) {
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index e5cd82eb9e549..ddf1c684bcc7d 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -117,7 +117,7 @@ struct omap2_mcspi_regs {
+
+ struct omap2_mcspi {
+ struct completion txdone;
+- struct spi_master *master;
++ struct spi_controller *ctlr;
+ /* Virtual base address of the controller */
+ void __iomem *base;
+ unsigned long phys;
+@@ -125,10 +125,12 @@ struct omap2_mcspi {
+ struct omap2_mcspi_dma *dma_channels;
+ struct device *dev;
+ struct omap2_mcspi_regs ctx;
++ struct clk *ref_clk;
+ int fifo_depth;
+- bool slave_aborted;
++ bool target_aborted;
+ unsigned int pin_dir:1;
+ size_t max_xfer_len;
++ u32 ref_clk_hz;
+ };
+
+ struct omap2_mcspi_cs {
+@@ -141,17 +143,17 @@ struct omap2_mcspi_cs {
+ u32 chconf0, chctrl0;
+ };
+
+-static inline void mcspi_write_reg(struct spi_master *master,
++static inline void mcspi_write_reg(struct spi_controller *ctlr,
+ int idx, u32 val)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+
+ writel_relaxed(val, mcspi->base + idx);
+ }
+
+-static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
++static inline u32 mcspi_read_reg(struct spi_controller *ctlr, int idx)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+
+ return readl_relaxed(mcspi->base + idx);
+ }
+@@ -235,7 +237,7 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
+
+ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ u32 l;
+
+ /* The controller handles the inverted chip selects
+@@ -266,24 +268,24 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
+ }
+ }
+
+-static void omap2_mcspi_set_mode(struct spi_master *master)
++static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ u32 l;
+
+ /*
+- * Choose master or slave mode
++ * Choose host or target mode
+ */
+- l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
++ l = mcspi_read_reg(ctlr, OMAP2_MCSPI_MODULCTRL);
+ l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
+- if (spi_controller_is_slave(master)) {
++ if (spi_controller_is_target(ctlr)) {
+ l |= (OMAP2_MCSPI_MODULCTRL_MS);
+ } else {
+ l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
+ l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ }
+- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l);
+
+ ctx->modulctrl = l;
+ }
+@@ -291,14 +293,14 @@ static void omap2_mcspi_set_mode(struct spi_master *master)
+ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
+ struct spi_transfer *t, int enable)
+ {
+- struct spi_master *master = spi->master;
++ struct spi_controller *ctlr = spi->controller;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+ unsigned int wcnt;
+ int max_fifo_depth, bytes_per_word;
+ u32 chconf, xferlevel;
+
+- mcspi = spi_master_get_devdata(master);
++ mcspi = spi_controller_get_devdata(ctlr);
+
+ chconf = mcspi_cached_chconf0(spi);
+ if (enable) {
+@@ -326,7 +328,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
+ xferlevel |= bytes_per_word - 1;
+ }
+
+- mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
+ mcspi_write_chconf0(spi, chconf);
+ mcspi->fifo_depth = max_fifo_depth;
+
+@@ -364,9 +366,9 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
+ static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
+ struct completion *x)
+ {
+- if (spi_controller_is_slave(mcspi->master)) {
++ if (spi_controller_is_target(mcspi->ctlr)) {
+ if (wait_for_completion_interruptible(x) ||
+- mcspi->slave_aborted)
++ mcspi->target_aborted)
+ return -EINTR;
+ } else {
+ wait_for_completion(x);
+@@ -378,7 +380,7 @@ static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
+ static void omap2_mcspi_rx_callback(void *data)
+ {
+ struct spi_device *spi = data;
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+ /* We must disable the DMA RX request */
+@@ -390,7 +392,7 @@ static void omap2_mcspi_rx_callback(void *data)
+ static void omap2_mcspi_tx_callback(void *data)
+ {
+ struct spi_device *spi = data;
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+ /* We must disable the DMA TX request */
+@@ -407,7 +409,7 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
+ struct omap2_mcspi_dma *mcspi_dma;
+ struct dma_async_tx_descriptor *tx;
+
+- mcspi = spi_master_get_devdata(spi->master);
++ mcspi = spi_controller_get_devdata(spi->controller);
+ mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+ dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
+@@ -445,13 +447,13 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ struct dma_async_tx_descriptor *tx;
+
+- mcspi = spi_master_get_devdata(spi->master);
++ mcspi = spi_controller_get_devdata(spi->controller);
+ mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+ count = xfer->len;
+
+ /*
+ * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
+- * it mentions reducing DMA transfer length by one element in master
++ * it mentions reducing DMA transfer length by one element in host
+ * normal mode.
+ */
+ if (mcspi->fifo_depth == 0)
+@@ -514,7 +516,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ omap2_mcspi_set_dma_req(spi, 1, 1);
+
+ ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
+- if (ret || mcspi->slave_aborted) {
++ if (ret || mcspi->target_aborted) {
+ dmaengine_terminate_sync(mcspi_dma->dma_rx);
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+ return 0;
+@@ -590,7 +592,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ void __iomem *irqstat_reg;
+ int wait_res;
+
+- mcspi = spi_master_get_devdata(spi->master);
++ mcspi = spi_controller_get_devdata(spi->controller);
+ mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+ if (cs->word_len <= 8) {
+@@ -617,14 +619,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+
+- mcspi->slave_aborted = false;
++ mcspi->target_aborted = false;
+ reinit_completion(&mcspi_dma->dma_tx_completion);
+ reinit_completion(&mcspi_dma->dma_rx_completion);
+ reinit_completion(&mcspi->txdone);
+ if (tx) {
+- /* Enable EOW IRQ to know end of tx in slave mode */
+- if (spi_controller_is_slave(spi->master))
+- mcspi_write_reg(spi->master,
++ /* Enable EOW IRQ to know end of tx in target mode */
++ if (spi_controller_is_target(spi->controller))
++ mcspi_write_reg(spi->controller,
+ OMAP2_MCSPI_IRQENABLE,
+ OMAP2_MCSPI_IRQSTATUS_EOW);
+ omap2_mcspi_tx_dma(spi, xfer, cfg);
+@@ -637,15 +639,15 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ int ret;
+
+ ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
+- if (ret || mcspi->slave_aborted) {
++ if (ret || mcspi->target_aborted) {
+ dmaengine_terminate_sync(mcspi_dma->dma_tx);
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+ return 0;
+ }
+
+- if (spi_controller_is_slave(mcspi->master)) {
++ if (spi_controller_is_target(mcspi->ctlr)) {
+ ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
+- if (ret || mcspi->slave_aborted)
++ if (ret || mcspi->target_aborted)
+ return 0;
+ }
+
+@@ -656,7 +658,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
+ dev_err(&spi->dev, "EOW timed out\n");
+
+- mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
++ mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS,
+ OMAP2_MCSPI_IRQSTATUS_EOW);
+ }
+
+@@ -880,12 +882,12 @@ out:
+ return count - c;
+ }
+
+-static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
++static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
+ {
+ u32 div;
+
+ for (div = 0; div < 15; div++)
+- if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
++ if (speed_hz >= (ref_clk_hz >> div))
+ return div;
+
+ return 15;
+@@ -897,11 +899,11 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ {
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+- u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
++ u32 ref_clk_hz, l = 0, clkd = 0, div, extclk = 0, clkg = 0;
+ u8 word_len = spi->bits_per_word;
+ u32 speed_hz = spi->max_speed_hz;
+
+- mcspi = spi_master_get_devdata(spi->master);
++ mcspi = spi_controller_get_devdata(spi->controller);
+
+ if (t != NULL && t->bits_per_word)
+ word_len = t->bits_per_word;
+@@ -911,14 +913,15 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ if (t && t->speed_hz)
+ speed_hz = t->speed_hz;
+
+- speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
+- if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+- clkd = omap2_mcspi_calc_divisor(speed_hz);
+- speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
++ ref_clk_hz = mcspi->ref_clk_hz;
++ speed_hz = min_t(u32, speed_hz, ref_clk_hz);
++ if (speed_hz < (ref_clk_hz / OMAP2_MCSPI_MAX_DIVIDER)) {
++ clkd = omap2_mcspi_calc_divisor(speed_hz, ref_clk_hz);
++ speed_hz = ref_clk_hz >> clkd;
+ clkg = 0;
+ } else {
+- div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+- speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
++ div = (ref_clk_hz + speed_hz - 1) / speed_hz;
++ speed_hz = ref_clk_hz / div;
+ clkd = (div - 1) & 0xf;
+ extclk = (div - 1) >> 4;
+ clkg = OMAP2_MCSPI_CHCONF_CLKG;
+@@ -926,7 +929,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+
+ l = mcspi_cached_chconf0(spi);
+
+- /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
++ /* standard 4-wire host mode: SCK, MOSI/out, MISO/in, nCS
+ * REVISIT: this controller could support SPI_3WIRE mode.
+ */
+ if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+@@ -1017,13 +1020,13 @@ no_dma:
+ return ret;
+ }
+
+-static void omap2_mcspi_release_dma(struct spi_master *master)
++static void omap2_mcspi_release_dma(struct spi_controller *ctlr)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ struct omap2_mcspi_dma *mcspi_dma;
+ int i;
+
+- for (i = 0; i < master->num_chipselect; i++) {
++ for (i = 0; i < ctlr->num_chipselect; i++) {
+ mcspi_dma = &mcspi->dma_channels[i];
+
+ if (mcspi_dma->dma_rx) {
+@@ -1054,7 +1057,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
+ {
+ bool initial_setup = false;
+ int ret;
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+@@ -1096,24 +1099,24 @@ static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
+ struct omap2_mcspi *mcspi = data;
+ u32 irqstat;
+
+- irqstat = mcspi_read_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS);
++ irqstat = mcspi_read_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS);
+ if (!irqstat)
+ return IRQ_NONE;
+
+- /* Disable IRQ and wakeup slave xfer task */
+- mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQENABLE, 0);
++ /* Disable IRQ and wakeup target xfer task */
++ mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQENABLE, 0);
+ if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
+ complete(&mcspi->txdone);
+
+ return IRQ_HANDLED;
+ }
+
+-static int omap2_mcspi_slave_abort(struct spi_master *master)
++static int omap2_mcspi_target_abort(struct spi_controller *ctlr)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
+
+- mcspi->slave_aborted = true;
++ mcspi->target_aborted = true;
+ complete(&mcspi_dma->dma_rx_completion);
+ complete(&mcspi_dma->dma_tx_completion);
+ complete(&mcspi->txdone);
+@@ -1121,7 +1124,7 @@ static int omap2_mcspi_slave_abort(struct spi_master *master)
+ return 0;
+ }
+
+-static int omap2_mcspi_transfer_one(struct spi_master *master,
++static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+ {
+@@ -1129,7 +1132,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ /* We only enable one channel at a time -- the one whose message is
+ * -- although this controller would gladly
+ * arbitrate among multiple channels. This corresponds to "single
+- * channel" master mode. As a side effect, we need to manage the
++ * channel" host mode. As a side effect, we need to manage the
+ * chipselect with the FORCE bit ... CS != channel enable.
+ */
+
+@@ -1141,13 +1144,13 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ int status = 0;
+ u32 chconf;
+
+- mcspi = spi_master_get_devdata(master);
++ mcspi = spi_controller_get_devdata(ctlr);
+ mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0);
+ cs = spi->controller_state;
+ cd = spi->controller_data;
+
+ /*
+- * The slave driver could have changed spi->mode in which case
++ * The target driver could have changed spi->mode in which case
+ * it will be different from cs->mode (the current hardware setup).
+ * If so, set par_override (even though its not a parity issue) so
+ * omap2_mcspi_setup_transfer will be called to configure the hardware
+@@ -1175,7 +1178,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
+- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+@@ -1201,8 +1204,8 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ unsigned count;
+
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+- master->cur_msg_mapped &&
+- master->can_dma(master, spi, t))
++ ctlr->cur_msg_mapped &&
++ ctlr->can_dma(ctlr, spi, t))
+ omap2_mcspi_set_fifo(spi, t, 1);
+
+ omap2_mcspi_set_enable(spi, 1);
+@@ -1213,8 +1216,8 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ + OMAP2_MCSPI_TX0);
+
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+- master->cur_msg_mapped &&
+- master->can_dma(master, spi, t))
++ ctlr->cur_msg_mapped &&
++ ctlr->can_dma(ctlr, spi, t))
+ count = omap2_mcspi_txrx_dma(spi, t);
+ else
+ count = omap2_mcspi_txrx_pio(spi, t);
+@@ -1240,7 +1243,7 @@ out:
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+@@ -1256,10 +1259,10 @@ out:
+ return status;
+ }
+
+-static int omap2_mcspi_prepare_message(struct spi_master *master,
++static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
+
+@@ -1283,29 +1286,29 @@ static int omap2_mcspi_prepare_message(struct spi_master *master,
+ return 0;
+ }
+
+-static bool omap2_mcspi_can_dma(struct spi_master *master,
++static bool omap2_mcspi_can_dma(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ struct omap2_mcspi_dma *mcspi_dma =
+ &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+ if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
+ return false;
+
+- if (spi_controller_is_slave(master))
++ if (spi_controller_is_target(ctlr))
+ return true;
+
+- master->dma_rx = mcspi_dma->dma_rx;
+- master->dma_tx = mcspi_dma->dma_tx;
++ ctlr->dma_rx = mcspi_dma->dma_rx;
++ ctlr->dma_tx = mcspi_dma->dma_tx;
+
+ return (xfer->len >= DMA_MIN_BYTES);
+ }
+
+ static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ struct omap2_mcspi_dma *mcspi_dma =
+ &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+@@ -1317,7 +1320,7 @@ static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
+
+ static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
+ {
+- struct spi_master *master = mcspi->master;
++ struct spi_controller *ctlr = mcspi->ctlr;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ int ret = 0;
+
+@@ -1325,11 +1328,11 @@ static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
+ if (ret < 0)
+ return ret;
+
+- mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE,
+ OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+ ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
+
+- omap2_mcspi_set_mode(master);
++ omap2_mcspi_set_mode(ctlr);
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
+ return 0;
+@@ -1353,8 +1356,8 @@ static int omap_mcspi_runtime_suspend(struct device *dev)
+ */
+ static int omap_mcspi_runtime_resume(struct device *dev)
+ {
+- struct spi_master *master = dev_get_drvdata(dev);
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct spi_controller *ctlr = dev_get_drvdata(dev);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
+ int error;
+@@ -1364,8 +1367,8 @@ static int omap_mcspi_runtime_resume(struct device *dev)
+ dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
+
+ /* McSPI: context restore */
+- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
+- mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
+
+ list_for_each_entry(cs, &ctx->cs, node) {
+ /*
+@@ -1420,7 +1423,7 @@ MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
+
+ static int omap2_mcspi_probe(struct platform_device *pdev)
+ {
+- struct spi_master *master;
++ struct spi_controller *ctlr;
+ const struct omap2_mcspi_platform_config *pdata;
+ struct omap2_mcspi *mcspi;
+ struct resource *r;
+@@ -1430,32 +1433,30 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ const struct of_device_id *match;
+
+ if (of_property_read_bool(node, "spi-slave"))
+- master = spi_alloc_slave(&pdev->dev, sizeof(*mcspi));
++ ctlr = spi_alloc_target(&pdev->dev, sizeof(*mcspi));
+ else
+- master = spi_alloc_master(&pdev->dev, sizeof(*mcspi));
+- if (!master)
++ ctlr = spi_alloc_host(&pdev->dev, sizeof(*mcspi));
++ if (!ctlr)
+ return -ENOMEM;
+
+ /* the spi->mode bits understood by this driver: */
+- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+- master->setup = omap2_mcspi_setup;
+- master->auto_runtime_pm = true;
+- master->prepare_message = omap2_mcspi_prepare_message;
+- master->can_dma = omap2_mcspi_can_dma;
+- master->transfer_one = omap2_mcspi_transfer_one;
+- master->set_cs = omap2_mcspi_set_cs;
+- master->cleanup = omap2_mcspi_cleanup;
+- master->slave_abort = omap2_mcspi_slave_abort;
+- master->dev.of_node = node;
+- master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+- master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
+- master->use_gpio_descriptors = true;
+-
+- platform_set_drvdata(pdev, master);
+-
+- mcspi = spi_master_get_devdata(master);
+- mcspi->master = master;
++ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
++ ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
++ ctlr->setup = omap2_mcspi_setup;
++ ctlr->auto_runtime_pm = true;
++ ctlr->prepare_message = omap2_mcspi_prepare_message;
++ ctlr->can_dma = omap2_mcspi_can_dma;
++ ctlr->transfer_one = omap2_mcspi_transfer_one;
++ ctlr->set_cs = omap2_mcspi_set_cs;
++ ctlr->cleanup = omap2_mcspi_cleanup;
++ ctlr->target_abort = omap2_mcspi_target_abort;
++ ctlr->dev.of_node = node;
++ ctlr->use_gpio_descriptors = true;
++
++ platform_set_drvdata(pdev, ctlr);
++
++ mcspi = spi_controller_get_devdata(ctlr);
++ mcspi->ctlr = ctlr;
+
+ match = of_match_device(omap_mcspi_of_match, &pdev->dev);
+ if (match) {
+@@ -1463,24 +1464,24 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ pdata = match->data;
+
+ of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
+- master->num_chipselect = num_cs;
++ ctlr->num_chipselect = num_cs;
+ if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in"))
+ mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+- master->num_chipselect = pdata->num_cs;
++ ctlr->num_chipselect = pdata->num_cs;
+ mcspi->pin_dir = pdata->pin_dir;
+ }
+ regs_offset = pdata->regs_offset;
+ if (pdata->max_xfer_len) {
+ mcspi->max_xfer_len = pdata->max_xfer_len;
+- master->max_transfer_size = omap2_mcspi_max_xfer_size;
++ ctlr->max_transfer_size = omap2_mcspi_max_xfer_size;
+ }
+
+ mcspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
+ if (IS_ERR(mcspi->base)) {
+ status = PTR_ERR(mcspi->base);
+- goto free_master;
++ goto free_ctlr;
+ }
+ mcspi->phys = r->start + regs_offset;
+ mcspi->base += regs_offset;
+@@ -1489,36 +1490,44 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+
+ INIT_LIST_HEAD(&mcspi->ctx.cs);
+
+- mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
++ mcspi->dma_channels = devm_kcalloc(&pdev->dev, ctlr->num_chipselect,
+ sizeof(struct omap2_mcspi_dma),
+ GFP_KERNEL);
+ if (mcspi->dma_channels == NULL) {
+ status = -ENOMEM;
+- goto free_master;
++ goto free_ctlr;
+ }
+
+- for (i = 0; i < master->num_chipselect; i++) {
++ for (i = 0; i < ctlr->num_chipselect; i++) {
+ sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
+ sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
+
+ status = omap2_mcspi_request_dma(mcspi,
+ &mcspi->dma_channels[i]);
+ if (status == -EPROBE_DEFER)
+- goto free_master;
++ goto free_ctlr;
+ }
+
+ status = platform_get_irq(pdev, 0);
+ if (status < 0)
+- goto free_master;
++ goto free_ctlr;
+ init_completion(&mcspi->txdone);
+ status = devm_request_irq(&pdev->dev, status,
+ omap2_mcspi_irq_handler, 0, pdev->name,
+ mcspi);
+ if (status) {
+ dev_err(&pdev->dev, "Cannot request IRQ");
+- goto free_master;
++ goto free_ctlr;
+ }
+
++ mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
++ if (mcspi->ref_clk)
++ mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
++ else
++ mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
++ ctlr->max_speed_hz = mcspi->ref_clk_hz;
++ ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
++
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_enable(&pdev->dev);
+@@ -1527,7 +1536,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ if (status < 0)
+ goto disable_pm;
+
+- status = devm_spi_register_controller(&pdev->dev, master);
++ status = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (status < 0)
+ goto disable_pm;
+
+@@ -1537,18 +1546,18 @@ disable_pm:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+-free_master:
+- omap2_mcspi_release_dma(master);
+- spi_master_put(master);
++free_ctlr:
++ omap2_mcspi_release_dma(ctlr);
++ spi_controller_put(ctlr);
+ return status;
+ }
+
+ static void omap2_mcspi_remove(struct platform_device *pdev)
+ {
+- struct spi_master *master = platform_get_drvdata(pdev);
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct spi_controller *ctlr = platform_get_drvdata(pdev);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+
+- omap2_mcspi_release_dma(master);
++ omap2_mcspi_release_dma(ctlr);
+
+ pm_runtime_dont_use_autosuspend(mcspi->dev);
+ pm_runtime_put_sync(mcspi->dev);
+@@ -1560,8 +1569,8 @@ MODULE_ALIAS("platform:omap2_mcspi");
+
+ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
+ {
+- struct spi_master *master = dev_get_drvdata(dev);
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct spi_controller *ctlr = dev_get_drvdata(dev);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ int error;
+
+ error = pinctrl_pm_select_sleep_state(dev);
+@@ -1569,9 +1578,9 @@ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
+ dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
+ __func__, error);
+
+- error = spi_master_suspend(master);
++ error = spi_controller_suspend(ctlr);
+ if (error)
+- dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
++ dev_warn(mcspi->dev, "%s: controller suspend failed: %i\n",
+ __func__, error);
+
+ return pm_runtime_force_suspend(dev);
+@@ -1579,13 +1588,13 @@ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
+
+ static int __maybe_unused omap2_mcspi_resume(struct device *dev)
+ {
+- struct spi_master *master = dev_get_drvdata(dev);
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct spi_controller *ctlr = dev_get_drvdata(dev);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ int error;
+
+- error = spi_master_resume(master);
++ error = spi_controller_resume(ctlr);
+ if (error)
+- dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
++ dev_warn(mcspi->dev, "%s: controller resume failed: %i\n",
+ __func__, error);
+
+ return pm_runtime_force_resume(dev);
+diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
+index 4d6db6182c5ed..f5cd365c913a8 100644
+--- a/drivers/spi/spi-tegra20-slink.c
++++ b/drivers/spi/spi-tegra20-slink.c
+@@ -1086,6 +1086,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
+ reset_control_deassert(tspi->rst);
+
+ spi_irq = platform_get_irq(pdev, 0);
++ if (spi_irq < 0)
++ return spi_irq;
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+ tegra_slink_isr_thread, IRQF_ONESHOT,
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 8d6304cb061ec..399e81d37b3ba 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -3323,33 +3323,52 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
+
++static inline int __spi_check_suspended(const struct spi_controller *ctlr)
++{
++ return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
++}
++
++static inline void __spi_mark_suspended(struct spi_controller *ctlr)
++{
++ mutex_lock(&ctlr->bus_lock_mutex);
++ ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
++ mutex_unlock(&ctlr->bus_lock_mutex);
++}
++
++static inline void __spi_mark_resumed(struct spi_controller *ctlr)
++{
++ mutex_lock(&ctlr->bus_lock_mutex);
++ ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
++ mutex_unlock(&ctlr->bus_lock_mutex);
++}
++
+ int spi_controller_suspend(struct spi_controller *ctlr)
+ {
+- int ret;
++ int ret = 0;
+
+ /* Basically no-ops for non-queued controllers */
+- if (!ctlr->queued)
+- return 0;
+-
+- ret = spi_stop_queue(ctlr);
+- if (ret)
+- dev_err(&ctlr->dev, "queue stop failed\n");
++ if (ctlr->queued) {
++ ret = spi_stop_queue(ctlr);
++ if (ret)
++ dev_err(&ctlr->dev, "queue stop failed\n");
++ }
+
++ __spi_mark_suspended(ctlr);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(spi_controller_suspend);
+
+ int spi_controller_resume(struct spi_controller *ctlr)
+ {
+- int ret;
+-
+- if (!ctlr->queued)
+- return 0;
++ int ret = 0;
+
+- ret = spi_start_queue(ctlr);
+- if (ret)
+- dev_err(&ctlr->dev, "queue restart failed\n");
++ __spi_mark_resumed(ctlr);
+
++ if (ctlr->queued) {
++ ret = spi_start_queue(ctlr);
++ if (ret)
++ dev_err(&ctlr->dev, "queue restart failed\n");
++ }
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(spi_controller_resume);
+@@ -4153,8 +4172,7 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
+ ctlr->cur_msg = msg;
+ ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+ if (ret)
+- goto out;
+-
++ dev_err(&ctlr->dev, "noqueue transfer failed\n");
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
+
+@@ -4170,7 +4188,6 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
+ spi_idle_runtime_pm(ctlr);
+ }
+
+-out:
+ mutex_unlock(&ctlr->io_mutex);
+ }
+
+@@ -4193,6 +4210,11 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
+ int status;
+ struct spi_controller *ctlr = spi->controller;
+
++ if (__spi_check_suspended(ctlr)) {
++ dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
++ return -ESHUTDOWN;
++ }
++
+ status = __spi_validate(spi, message);
+ if (status != 0)
+ return status;
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+index b696bf884cbd6..32af0e96e762b 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+@@ -172,12 +172,12 @@ int cedrus_hw_suspend(struct device *device)
+ {
+ struct cedrus_dev *dev = dev_get_drvdata(device);
+
+- reset_control_assert(dev->rstc);
+-
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->ahb_clk);
+
++ reset_control_assert(dev->rstc);
++
+ return 0;
+ }
+
+@@ -186,11 +186,18 @@ int cedrus_hw_resume(struct device *device)
+ struct cedrus_dev *dev = dev_get_drvdata(device);
+ int ret;
+
++ ret = reset_control_reset(dev->rstc);
++ if (ret) {
++ dev_err(dev->dev, "Failed to apply reset\n");
++
++ return ret;
++ }
++
+ ret = clk_prepare_enable(dev->ahb_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable AHB clock\n");
+
+- return ret;
++ goto err_rst;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+@@ -207,21 +214,14 @@ int cedrus_hw_resume(struct device *device)
+ goto err_mod_clk;
+ }
+
+- ret = reset_control_reset(dev->rstc);
+- if (ret) {
+- dev_err(dev->dev, "Failed to apply reset\n");
+-
+- goto err_ram_clk;
+- }
+-
+ return 0;
+
+-err_ram_clk:
+- clk_disable_unprepare(dev->ram_clk);
+ err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+ err_ahb_clk:
+ clk_disable_unprepare(dev->ahb_clk);
++err_rst:
++ reset_control_assert(dev->rstc);
+
+ return ret;
+ }
+diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
+index 36243a3972fd7..5ac5cb60bae67 100644
+--- a/drivers/thermal/intel/intel_powerclamp.c
++++ b/drivers/thermal/intel/intel_powerclamp.c
+@@ -256,7 +256,7 @@ skip_limit_set:
+
+ static const struct kernel_param_ops max_idle_ops = {
+ .set = max_idle_set,
+- .get = param_get_int,
++ .get = param_get_byte,
+ };
+
+ module_param_cb(max_idle, &max_idle_ops, &max_idle, 0644);
+diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
+index 843214d30bd8b..8b0edb2048443 100644
+--- a/drivers/thermal/mediatek/auxadc_thermal.c
++++ b/drivers/thermal/mediatek/auxadc_thermal.c
+@@ -1267,7 +1267,7 @@ static int mtk_thermal_probe(struct platform_device *pdev)
+
+ mtk_thermal_turn_on_buffer(mt, apmixed_base);
+
+- if (mt->conf->version != MTK_THERMAL_V2)
++ if (mt->conf->version != MTK_THERMAL_V1)
+ mtk_thermal_release_periodic_ts(mt, auxadc_base);
+
+ if (mt->conf->version == MTK_THERMAL_V1)
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 58533ea75cd92..e6f3166a9208f 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -689,7 +689,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ if (result)
+ goto release_ida;
+
+- sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
++ snprintf(dev->attr_name, sizeof(dev->attr_name), "cdev%d_trip_point",
++ dev->id);
+ sysfs_attr_init(&dev->attr.attr);
+ dev->attr.attr.name = dev->attr_name;
+ dev->attr.attr.mode = 0444;
+@@ -698,7 +699,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ if (result)
+ goto remove_symbol_link;
+
+- sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
++ snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name),
++ "cdev%d_weight", dev->id);
+ sysfs_attr_init(&dev->weight_attr.attr);
+ dev->weight_attr.attr.name = dev->weight_attr_name;
+ dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;
+diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
+index 024e2e365a26b..597ac4144e331 100644
+--- a/drivers/thermal/thermal_trip.c
++++ b/drivers/thermal/thermal_trip.c
+@@ -55,6 +55,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+ {
+ struct thermal_trip trip;
+ int low = -INT_MAX, high = INT_MAX;
++ bool same_trip = false;
+ int i, ret;
+
+ lockdep_assert_held(&tz->lock);
+@@ -63,6 +64,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+ return;
+
+ for (i = 0; i < tz->num_trips; i++) {
++ bool low_set = false;
+ int trip_low;
+
+ ret = __thermal_zone_get_trip(tz, i , &trip);
+@@ -71,18 +73,31 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+
+ trip_low = trip.temperature - trip.hysteresis;
+
+- if (trip_low < tz->temperature && trip_low > low)
++ if (trip_low < tz->temperature && trip_low > low) {
+ low = trip_low;
++ low_set = true;
++ same_trip = false;
++ }
+
+ if (trip.temperature > tz->temperature &&
+- trip.temperature < high)
++ trip.temperature < high) {
+ high = trip.temperature;
++ same_trip = low_set;
++ }
+ }
+
+ /* No need to change trip points */
+ if (tz->prev_low_trip == low && tz->prev_high_trip == high)
+ return;
+
++ /*
++ * If "high" and "low" are the same, skip the change unless this is the
++ * first time.
++ */
++ if (same_trip && (tz->prev_low_trip != -INT_MAX ||
++ tz->prev_high_trip != INT_MAX))
++ return;
++
+ tz->prev_low_trip = low;
+ tz->prev_high_trip = high;
+
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index 488138a28ae13..e6bfa63b40aee 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -31,6 +31,9 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
+ {
+ struct tb_port *port;
+
++ if (tb_switch_is_icm(sw))
++ return;
++
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_usb3_down(port))
+ continue;
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index bd5815f8f23bd..509b99af5087b 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -1082,7 +1082,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
+ * Only set bonding if the link was not already bonded. This
+ * avoids the lane adapter to re-enter bonding state.
+ */
+- if (width == TB_LINK_WIDTH_SINGLE) {
++ if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) {
+ ret = tb_port_set_lane_bonding(port, true);
+ if (ret)
+ goto err_lane1;
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 98764e740c078..34c01874f45be 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -377,18 +377,21 @@ void xen_console_resume(void)
+ #ifdef CONFIG_HVC_XEN_FRONTEND
+ static void xencons_disconnect_backend(struct xencons_info *info)
+ {
+- if (info->irq > 0)
+- unbind_from_irqhandler(info->irq, NULL);
+- info->irq = 0;
++ if (info->hvc != NULL)
++ hvc_remove(info->hvc);
++ info->hvc = NULL;
++ if (info->irq > 0) {
++ evtchn_put(info->evtchn);
++ info->irq = 0;
++ info->evtchn = 0;
++ }
++ /* evtchn_put() will also close it so this is only an error path */
+ if (info->evtchn > 0)
+ xenbus_free_evtchn(info->xbdev, info->evtchn);
+ info->evtchn = 0;
+ if (info->gntref > 0)
+ gnttab_free_grant_references(info->gntref);
+ info->gntref = 0;
+- if (info->hvc != NULL)
+- hvc_remove(info->hvc);
+- info->hvc = NULL;
+ }
+
+ static void xencons_free(struct xencons_info *info)
+@@ -433,7 +436,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
+ if (ret)
+ return ret;
+ info->evtchn = evtchn;
+- irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
++ irq = bind_evtchn_to_irq_lateeoi(evtchn);
+ if (irq < 0)
+ return irq;
+ info->irq = irq;
+@@ -553,10 +556,23 @@ static void xencons_backend_changed(struct xenbus_device *dev,
+ if (dev->state == XenbusStateClosed)
+ break;
+ fallthrough; /* Missed the backend's CLOSING state */
+- case XenbusStateClosing:
++ case XenbusStateClosing: {
++ struct xencons_info *info = dev_get_drvdata(&dev->dev);;
++
++ /*
++ * Don't tear down the evtchn and grant ref before the other
++ * end has disconnected, but do stop userspace from trying
++ * to use the device before we allow the backend to close.
++ */
++ if (info->hvc) {
++ hvc_remove(info->hvc);
++ info->hvc = NULL;
++ }
++
+ xenbus_frontend_closed(dev);
+ break;
+ }
++ }
+ }
+
+ static const struct xenbus_device_id xencons_ids[] = {
+@@ -588,7 +604,7 @@ static int __init xen_hvc_init(void)
+ ops = &dom0_hvc_ops;
+ r = xen_initial_domain_console_init();
+ if (r < 0)
+- return r;
++ goto register_fe;
+ info = vtermno_to_xencons(HVC_COOKIE);
+ } else {
+ ops = &domU_hvc_ops;
+@@ -597,7 +613,7 @@ static int __init xen_hvc_init(void)
+ else
+ r = xen_pv_console_init();
+ if (r < 0)
+- return r;
++ goto register_fe;
+
+ info = vtermno_to_xencons(HVC_COOKIE);
+ info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
+@@ -616,12 +632,13 @@ static int __init xen_hvc_init(void)
+ list_del(&info->list);
+ spin_unlock_irqrestore(&xencons_lock, flags);
+ if (info->irq)
+- unbind_from_irqhandler(info->irq, NULL);
++ evtchn_put(info->evtchn);
+ kfree(info);
+ return r;
+ }
+
+ r = 0;
++ register_fe:
+ #ifdef CONFIG_HVC_XEN_FRONTEND
+ r = xenbus_register_frontend(&xencons_driver);
+ #endif
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 1f3aba607cd51..0ee7531c92017 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -4108,6 +4108,8 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
+
+ static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
+ {
++ if (dlci->gsm->dead)
++ return -EL2HLT;
+ if (dlci->adaption == 2) {
+ /* Send convergence layer type 2 empty data frame. */
+ gsm_modem_upd_via_data(dlci, brk);
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 62a9bd30b4db5..bbd7914ddc9ad 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2429,6 +2429,153 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
++ /*
++ * Brainboxes devices - all Oxsemi based
++ */
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4027,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4028,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4029,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4019,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4016,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4015,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x400A,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x400E,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x400C,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x400B,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x400F,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4010,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4011,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x401D,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x401E,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4013,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4017,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4018,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8811,
+@@ -4913,6 +5060,12 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 0, 0,
+ pbn_b1_bt_1_115200 },
+
++ /*
++ * IntaShield IS-100
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0D60,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_b2_1_115200 },
+ /*
+ * IntaShield IS-200
+ */
+@@ -4925,6 +5078,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
+ pbn_b2_4_115200 },
++ /*
++ * IntaShield IX-100
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x4027,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_1_15625000 },
++ /*
++ * IntaShield IX-200
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x4028,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_2_15625000 },
++ /*
++ * IntaShield IX-400
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x4029,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_4_15625000 },
+ /* Brainboxes Devices */
+ /*
+ * Brainboxes UC-101
+@@ -4940,10 +5114,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_1_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0AA2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_1_115200 },
+ /*
+- * Brainboxes UC-257
++ * Brainboxes UC-253/UC-734
+ */
+- { PCI_VENDOR_ID_INTASHIELD, 0x0861,
++ { PCI_VENDOR_ID_INTASHIELD, 0x0CA1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+@@ -4979,6 +5157,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x08E2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x08E3,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-310
+ */
+@@ -4989,6 +5175,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ /*
+ * Brainboxes UC-313
+ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x08A1,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x08A2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+@@ -5003,6 +5197,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ /*
+ * Brainboxes UC-346
+ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0B01,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+@@ -5014,6 +5212,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0A82,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+@@ -5026,12 +5228,94 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+- * Brainboxes UC-420/431
++ * Brainboxes UC-420
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0921,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
++ /*
++ * Brainboxes UC-607
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x09A1,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x09A2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x09A3,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ /*
++ * Brainboxes UC-836
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0D41,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_4_115200 },
++ /*
++ * Brainboxes UP-189
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0AC1,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0AC2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0AC3,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ /*
++ * Brainboxes UP-200
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0B21,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0B22,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0B23,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ /*
++ * Brainboxes UP-869
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C01,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C02,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C03,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ /*
++ * Brainboxes UP-880
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C21,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C22,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C23,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
+ /*
+ * Brainboxes PX-101
+ */
+@@ -5064,7 +5348,7 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_INTASHIELD, 0x4015,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+- pbn_oxsemi_4_15625000 },
++ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-260/PX-701
+ */
+@@ -5072,6 +5356,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
++ /*
++ * Brainboxes PX-275/279
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0E41,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_8_115200 },
+ /*
+ * Brainboxes PX-310
+ */
+@@ -5119,16 +5410,38 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+- * Brainboxes PX-803
++ * Brainboxes PX-475
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x401D,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_1_15625000 },
++ /*
++ * Brainboxes PX-803/PX-857
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4009,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+- pbn_b0_1_115200 },
++ pbn_b0_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x4018,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_2_15625000 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x401E,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+- pbn_oxsemi_1_15625000 },
++ pbn_oxsemi_2_15625000 },
++ /*
++ * Brainboxes PX-820
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x4002,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b0_4_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x4013,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-846
+ */
+diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
+index 790d910dafa5d..9388b9ddea3bd 100644
+--- a/drivers/tty/serial/meson_uart.c
++++ b/drivers/tty/serial/meson_uart.c
+@@ -380,10 +380,14 @@ static void meson_uart_set_termios(struct uart_port *port,
+ else
+ val |= AML_UART_STOP_BIT_1SB;
+
+- if (cflags & CRTSCTS)
+- val &= ~AML_UART_TWO_WIRE_EN;
+- else
++ if (cflags & CRTSCTS) {
++ if (port->flags & UPF_HARD_FLOW)
++ val &= ~AML_UART_TWO_WIRE_EN;
++ else
++ termios->c_cflag &= ~CRTSCTS;
++ } else {
+ val |= AML_UART_TWO_WIRE_EN;
++ }
+
+ writel(val, port->membase + AML_UART_CONTROL);
+
+@@ -705,6 +709,7 @@ static int meson_uart_probe(struct platform_device *pdev)
+ u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
+ int ret = 0;
+ int irq;
++ bool has_rtscts;
+
+ if (pdev->dev.of_node)
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
+@@ -732,6 +737,7 @@ static int meson_uart_probe(struct platform_device *pdev)
+ return irq;
+
+ of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
++ has_rtscts = of_property_read_bool(pdev->dev.of_node, "uart-has-rtscts");
+
+ if (meson_ports[pdev->id]) {
+ return dev_err_probe(&pdev->dev, -EBUSY,
+@@ -762,6 +768,8 @@ static int meson_uart_probe(struct platform_device *pdev)
+ port->mapsize = resource_size(res_mem);
+ port->irq = irq;
+ port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
++ if (has_rtscts)
++ port->flags |= UPF_HARD_FLOW;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
+ port->dev = &pdev->dev;
+ port->line = pdev->id;
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index d5ba6e90bd95f..f912f8bf1e633 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -146,7 +146,7 @@ static void __uart_start(struct uart_state *state)
+
+ /* Increment the runtime PM usage count for the active check below */
+ err = pm_runtime_get(&port_dev->dev);
+- if (err < 0) {
++ if (err < 0 && err != -EINPROGRESS) {
+ pm_runtime_put_noidle(&port_dev->dev);
+ return;
+ }
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 23198e3f1461a..6b4a28bcf2f5f 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -262,13 +262,14 @@ static void sysrq_handle_showallcpus(u8 key)
+ if (in_hardirq())
+ regs = get_irq_regs();
+
+- pr_info("CPU%d:\n", smp_processor_id());
++ pr_info("CPU%d:\n", get_cpu());
+ if (regs)
+ show_regs(regs);
+ else
+ show_stack(NULL, NULL, KERN_INFO);
+
+ schedule_work(&sysrq_showallcpus);
++ put_cpu();
+ }
+ }
+
+diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
+index 0d04287da0984..ef8741c3e6629 100644
+--- a/drivers/tty/tty_jobctrl.c
++++ b/drivers/tty/tty_jobctrl.c
+@@ -300,12 +300,7 @@ void disassociate_ctty(int on_exit)
+ return;
+ }
+
+- spin_lock_irq(&current->sighand->siglock);
+- put_pid(current->signal->tty_old_pgrp);
+- current->signal->tty_old_pgrp = NULL;
+- tty = tty_kref_get(current->signal->tty);
+- spin_unlock_irq(&current->sighand->siglock);
+-
++ tty = get_current_tty();
+ if (tty) {
+ unsigned long flags;
+
+@@ -320,6 +315,16 @@ void disassociate_ctty(int on_exit)
+ tty_kref_put(tty);
+ }
+
++ /* If tty->ctrl.pgrp is not NULL, it may be assigned to
++ * current->signal->tty_old_pgrp in a race condition, and
++ * cause pid memleak. Release current->signal->tty_old_pgrp
++ * after tty->ctrl.pgrp set to NULL.
++ */
++ spin_lock_irq(&current->sighand->siglock);
++ put_pid(current->signal->tty_old_pgrp);
++ current->signal->tty_old_pgrp = NULL;
++ spin_unlock_irq(&current->sighand->siglock);
++
+ /* Now clear signal->tty under the lock */
+ read_lock(&tasklist_lock);
+ session_clear_tty(task_session(current));
+diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
+index a39ed981bfd3e..5b625f20233b4 100644
+--- a/drivers/tty/vcc.c
++++ b/drivers/tty/vcc.c
+@@ -579,18 +579,22 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ return -ENOMEM;
+
+ name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL);
++ if (!name) {
++ rv = -ENOMEM;
++ goto free_port;
++ }
+
+ rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions,
+ ARRAY_SIZE(vcc_versions), NULL, name);
+ if (rv)
+- goto free_port;
++ goto free_name;
+
+ port->vio.debug = vcc_dbg_vio;
+ vcc_ldc_cfg.debug = vcc_dbg_ldc;
+
+ rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port);
+ if (rv)
+- goto free_port;
++ goto free_name;
+
+ spin_lock_init(&port->lock);
+
+@@ -624,6 +628,11 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ goto unreg_tty;
+ }
+ port->domain = kstrdup(domain, GFP_KERNEL);
++ if (!port->domain) {
++ rv = -ENOMEM;
++ goto unreg_tty;
++ }
++
+
+ mdesc_release(hp);
+
+@@ -653,8 +662,9 @@ free_table:
+ vcc_table_remove(port->index);
+ free_ldc:
+ vio_ldc_free(&port->vio);
+-free_port:
++free_name:
+ kfree(name);
++free_port:
+ kfree(port);
+
+ return rv;
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 2ba8ec254dcee..0787456c2b892 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -436,7 +436,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+- hwq->max_entries = hba->nutrs;
++ hwq->max_entries = hba->nutrs + 1;
+ spin_lock_init(&hwq->sq_lock);
+ spin_lock_init(&hwq->cq_lock);
+ mutex_init(&hwq->sq_mutex);
+@@ -630,6 +630,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ int tag = scsi_cmd_to_rq(cmd)->tag;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct ufs_hw_queue *hwq;
++ unsigned long flags;
+ int err = FAILED;
+
+ if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+@@ -670,8 +671,10 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ }
+
+ err = SUCCESS;
++ spin_lock_irqsave(&hwq->cq_lock, flags);
+ if (ufshcd_cmd_inflight(lrbp->cmd))
+ ufshcd_release_scsi_cmd(hba, lrbp);
++ spin_unlock_irqrestore(&hwq->cq_lock, flags);
+
+ out:
+ return err;
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 8382e8cfa414a..170fbd5715b21 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -3632,7 +3632,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ */
+ ret = utf16s_to_utf8s(uc_str->uc,
+ uc_str->len - QUERY_DESC_HDR_SIZE,
+- UTF16_BIG_ENDIAN, str, ascii_len);
++ UTF16_BIG_ENDIAN, str, ascii_len - 1);
+
+ /* replace non-printable or non-ASCII characters with spaces */
+ for (i = 0; i < ret; i++)
+@@ -6347,11 +6347,24 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
++ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
++ struct ufs_hw_queue *hwq;
++ unsigned long flags;
+
+ *ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ *ret ? "failed" : "succeeded");
++
++ /* Release cmd in MCQ mode if abort succeeds */
++ if (is_mcq_enabled(hba) && (*ret == 0)) {
++ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
++ spin_lock_irqsave(&hwq->cq_lock, flags);
++ if (ufshcd_cmd_inflight(lrbp->cmd))
++ ufshcd_release_scsi_cmd(hba, lrbp);
++ spin_unlock_irqrestore(&hwq->cq_lock, flags);
++ }
++
+ return *ret == 0;
+ }
+
+@@ -8723,7 +8736,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+ if (ret)
+ goto out;
+
+- if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
++ if (!hba->pm_op_in_progress &&
++ (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
+ /* Reset the device and controller before doing reinit */
+ ufshcd_device_reset(hba);
+ ufshcd_hba_stop(hba);
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index d1149b1c3ed50..b1d720031251e 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -909,8 +909,13 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
+ return ret;
+ }
+
+- /* Use the agreed gear */
+- host->hs_gear = dev_req_params->gear_tx;
++ /*
++ * Update hs_gear only when the gears are scaled to a higher value. This is because,
++ * the PHY gear settings are backwards compatible and we only need to change the PHY
++ * settings while scaling to higher gears.
++ */
++ if (dev_req_params->gear_tx > host->hs_gear)
++ host->hs_gear = dev_req_params->gear_tx;
+
+ /* enable the device ref clock before changing to HS mode */
+ if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
+diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
+index 07f6068342d46..275a6a2fa671e 100644
+--- a/drivers/usb/cdns3/cdnsp-ring.c
++++ b/drivers/usb/cdns3/cdnsp-ring.c
+@@ -1529,6 +1529,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ unsigned long flags;
+ int counter = 0;
+
++ local_bh_disable();
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
+@@ -1541,6 +1542,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ cdnsp_died(pdev);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
++ local_bh_enable();
+ return IRQ_HANDLED;
+ }
+
+@@ -1557,6 +1559,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
++ local_bh_enable();
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index 08af26b762a2d..0cce192083701 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -30,8 +30,7 @@ struct ehci_ci_priv {
+ };
+
+ struct ci_hdrc_dma_aligned_buffer {
+- void *kmalloc_ptr;
+- void *old_xfer_buffer;
++ void *original_buffer;
+ u8 data[];
+ };
+
+@@ -380,59 +379,52 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
+ return 0;
+ }
+
+-static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb)
++static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb, bool copy_back)
+ {
+ struct ci_hdrc_dma_aligned_buffer *temp;
+- size_t length;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
++ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+
+ temp = container_of(urb->transfer_buffer,
+ struct ci_hdrc_dma_aligned_buffer, data);
++ urb->transfer_buffer = temp->original_buffer;
++
++ if (copy_back && usb_urb_dir_in(urb)) {
++ size_t length;
+
+- if (usb_urb_dir_in(urb)) {
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+- memcpy(temp->old_xfer_buffer, temp->data, length);
++ memcpy(temp->original_buffer, temp->data, length);
+ }
+- urb->transfer_buffer = temp->old_xfer_buffer;
+- kfree(temp->kmalloc_ptr);
+
+- urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
++ kfree(temp);
+ }
+
+ static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+ {
+- struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr;
+- const unsigned int ci_hdrc_usb_dma_align = 32;
+- size_t kmalloc_size;
++ struct ci_hdrc_dma_aligned_buffer *temp;
+
+- if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 ||
+- !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1)))
++ if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0)
++ return 0;
++ if (IS_ALIGNED((uintptr_t)urb->transfer_buffer, 4)
++ && IS_ALIGNED(urb->transfer_buffer_length, 4))
+ return 0;
+
+- /* Allocate a buffer with enough padding for alignment */
+- kmalloc_size = urb->transfer_buffer_length +
+- sizeof(struct ci_hdrc_dma_aligned_buffer) +
+- ci_hdrc_usb_dma_align - 1;
+-
+- kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+- if (!kmalloc_ptr)
++ temp = kmalloc(sizeof(*temp) + ALIGN(urb->transfer_buffer_length, 4), mem_flags);
++ if (!temp)
+ return -ENOMEM;
+
+- /* Position our struct dma_aligned_buffer such that data is aligned */
+- temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1;
+- temp->kmalloc_ptr = kmalloc_ptr;
+- temp->old_xfer_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+- urb->transfer_buffer = temp->data;
+
++ temp->original_buffer = urb->transfer_buffer;
++ urb->transfer_buffer = temp->data;
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+@@ -449,7 +441,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+- ci_hdrc_free_dma_aligned_buffer(urb);
++ ci_hdrc_free_dma_aligned_buffer(urb, false);
+
+ return ret;
+ }
+@@ -457,7 +449,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+ {
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+- ci_hdrc_free_dma_aligned_buffer(urb);
++ ci_hdrc_free_dma_aligned_buffer(urb, true);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index b19e38d5fd10c..7f8d33f92ddb5 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -1047,7 +1047,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+
+ if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
+ dev_notice(ddev, "descriptor type invalid, skip\n");
+- continue;
++ goto skip_to_next_descriptor;
+ }
+
+ switch (cap_type) {
+@@ -1078,6 +1078,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ break;
+ }
+
++skip_to_next_descriptor:
+ total_len -= length;
+ buffer += length;
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 0ff47eeffb490..dfc30cebd4c4c 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -622,29 +622,6 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
+ ret = 0;
+ }
+ mutex_unlock(&hub->status_mutex);
+-
+- /*
+- * There is no need to lock status_mutex here, because status_mutex
+- * protects hub->status, and the phy driver only checks the port
+- * status without changing the status.
+- */
+- if (!ret) {
+- struct usb_device *hdev = hub->hdev;
+-
+- /*
+- * Only roothub will be notified of port state changes,
+- * since the USB PHY only cares about changes at the next
+- * level.
+- */
+- if (is_root_hub(hdev)) {
+- struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
+-
+- if (hcd->usb_phy)
+- usb_phy_notify_port_status(hcd->usb_phy,
+- port1 - 1, *status, *change);
+- }
+- }
+-
+ return ret;
+ }
+
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 657f1f659ffaf..35c7a4df8e717 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -4769,8 +4769,8 @@ fail3:
+ if (qh_allocated && qh->channel && qh->channel->qh == qh)
+ qh->channel->qh = NULL;
+ fail2:
+- spin_unlock_irqrestore(&hsotg->lock, flags);
+ urb->hcpriv = NULL;
++ spin_unlock_irqrestore(&hsotg->lock, flags);
+ kfree(qtd);
+ fail1:
+ if (qh_allocated) {
+diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
+index 0144ca8350c31..5c7538d498dd1 100644
+--- a/drivers/usb/dwc2/hcd_intr.c
++++ b/drivers/usb/dwc2/hcd_intr.c
+@@ -2015,15 +2015,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+ {
+ struct dwc2_qtd *qtd;
+ struct dwc2_host_chan *chan;
+- u32 hcint, hcintmsk;
++ u32 hcint, hcintraw, hcintmsk;
+
+ chan = hsotg->hc_ptr_array[chnum];
+
+- hcint = dwc2_readl(hsotg, HCINT(chnum));
++ hcintraw = dwc2_readl(hsotg, HCINT(chnum));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
++ hcint = hcintraw & hcintmsk;
++ dwc2_writel(hsotg, hcint, HCINT(chnum));
++
+ if (!chan) {
+ dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
+- dwc2_writel(hsotg, hcint, HCINT(chnum));
+ return;
+ }
+
+@@ -2032,11 +2034,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+ chnum);
+ dev_vdbg(hsotg->dev,
+ " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+- hcint, hcintmsk, hcint & hcintmsk);
++ hcintraw, hcintmsk, hcint);
+ }
+
+- dwc2_writel(hsotg, hcint, HCINT(chnum));
+-
+ /*
+ * If we got an interrupt after someone called
+ * dwc2_hcd_endpoint_disable() we don't want to crash below
+@@ -2046,8 +2046,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+ return;
+ }
+
+- chan->hcint = hcint;
+- hcint &= hcintmsk;
++ chan->hcint = hcintraw;
+
+ /*
+ * If the channel was halted due to a dequeue, the qtd list might
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 343d2570189ff..8d5af9ccb6027 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1094,6 +1094,111 @@ static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
+ }
+ }
+
++static void dwc3_config_threshold(struct dwc3 *dwc)
++{
++ u32 reg;
++ u8 rx_thr_num;
++ u8 rx_maxburst;
++ u8 tx_thr_num;
++ u8 tx_maxburst;
++
++ /*
++ * Must config both number of packets and max burst settings to enable
++ * RX and/or TX threshold.
++ */
++ if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
++ rx_thr_num = dwc->rx_thr_num_pkt_prd;
++ rx_maxburst = dwc->rx_max_burst_prd;
++ tx_thr_num = dwc->tx_thr_num_pkt_prd;
++ tx_maxburst = dwc->tx_max_burst_prd;
++
++ if (rx_thr_num && rx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++ reg |= DWC31_RXTHRNUMPKTSEL_PRD;
++
++ reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
++ reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
++
++ reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
++ reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++ }
++
++ if (tx_thr_num && tx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++ reg |= DWC31_TXTHRNUMPKTSEL_PRD;
++
++ reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
++ reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
++
++ reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
++ reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++ }
++ }
++
++ rx_thr_num = dwc->rx_thr_num_pkt;
++ rx_maxburst = dwc->rx_max_burst;
++ tx_thr_num = dwc->tx_thr_num_pkt;
++ tx_maxburst = dwc->tx_max_burst;
++
++ if (DWC3_IP_IS(DWC3)) {
++ if (rx_thr_num && rx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++ reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
++ reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
++
++ reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
++ reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++ }
++
++ if (tx_thr_num && tx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++ reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
++ reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
++
++ reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
++ reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++ }
++ } else {
++ if (rx_thr_num && rx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++ reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
++ reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
++
++ reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
++ reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++ }
++
++ if (tx_thr_num && tx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++ reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
++ reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
++
++ reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
++ reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++ }
++ }
++}
++
+ /**
+ * dwc3_core_init - Low-level initialization of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+@@ -1246,42 +1351,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
+- /*
+- * Must config both number of packets and max burst settings to enable
+- * RX and/or TX threshold.
+- */
+- if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
+- u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
+- u8 rx_maxburst = dwc->rx_max_burst_prd;
+- u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
+- u8 tx_maxburst = dwc->tx_max_burst_prd;
+-
+- if (rx_thr_num && rx_maxburst) {
+- reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+- reg |= DWC31_RXTHRNUMPKTSEL_PRD;
+-
+- reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
+- reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
+-
+- reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
+- reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
+-
+- dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+- }
+-
+- if (tx_thr_num && tx_maxburst) {
+- reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+- reg |= DWC31_TXTHRNUMPKTSEL_PRD;
+-
+- reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
+- reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
+-
+- reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
+- reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
+-
+- dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+- }
+- }
++ dwc3_config_threshold(dwc);
+
+ return 0;
+
+@@ -1417,6 +1487,10 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ u8 lpm_nyet_threshold;
+ u8 tx_de_emphasis;
+ u8 hird_threshold;
++ u8 rx_thr_num_pkt = 0;
++ u8 rx_max_burst = 0;
++ u8 tx_thr_num_pkt = 0;
++ u8 tx_max_burst = 0;
+ u8 rx_thr_num_pkt_prd = 0;
+ u8 rx_max_burst_prd = 0;
+ u8 tx_thr_num_pkt_prd = 0;
+@@ -1479,6 +1553,14 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ "snps,usb2-lpm-disable");
+ dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
+ "snps,usb2-gadget-lpm-disable");
++ device_property_read_u8(dev, "snps,rx-thr-num-pkt",
++ &rx_thr_num_pkt);
++ device_property_read_u8(dev, "snps,rx-max-burst",
++ &rx_max_burst);
++ device_property_read_u8(dev, "snps,tx-thr-num-pkt",
++ &tx_thr_num_pkt);
++ device_property_read_u8(dev, "snps,tx-max-burst",
++ &tx_max_burst);
+ device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
+ &rx_thr_num_pkt_prd);
+ device_property_read_u8(dev, "snps,rx-max-burst-prd",
+@@ -1560,6 +1642,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+
+ dwc->hird_threshold = hird_threshold;
+
++ dwc->rx_thr_num_pkt = rx_thr_num_pkt;
++ dwc->rx_max_burst = rx_max_burst;
++
++ dwc->tx_thr_num_pkt = tx_thr_num_pkt;
++ dwc->tx_max_burst = tx_max_burst;
++
+ dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
+ dwc->rx_max_burst_prd = rx_max_burst_prd;
+
+@@ -1918,6 +2006,8 @@ static int dwc3_probe(struct platform_device *pdev)
+
+ pm_runtime_put(dev);
+
++ dma_set_max_seg_size(dev, UINT_MAX);
++
+ return 0;
+
+ err_exit_debugfs:
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index a69ac67d89fe6..6782ec8bfd64c 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -211,6 +211,11 @@
+ #define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
+ #define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29)
+
++/* Global TX Threshold Configuration Register */
++#define DWC3_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0xff) << 16)
++#define DWC3_GTXTHRCFG_TXPKTCNT(n) (((n) & 0xf) << 24)
++#define DWC3_GTXTHRCFG_PKTCNTSEL BIT(29)
++
+ /* Global RX Threshold Configuration Register for DWC_usb31 only */
+ #define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 16)
+ #define DWC31_GRXTHRCFG_RXPKTCNT(n) (((n) & 0x1f) << 21)
+@@ -1045,6 +1050,10 @@ struct dwc3_scratchpad_array {
+ * @test_mode_nr: test feature selector
+ * @lpm_nyet_threshold: LPM NYET response threshold
+ * @hird_threshold: HIRD threshold
++ * @rx_thr_num_pkt: USB receive packet count
++ * @rx_max_burst: max USB receive burst size
++ * @tx_thr_num_pkt: USB transmit packet count
++ * @tx_max_burst: max USB transmit burst size
+ * @rx_thr_num_pkt_prd: periodic ESS receive packet count
+ * @rx_max_burst_prd: max periodic ESS receive burst size
+ * @tx_thr_num_pkt_prd: periodic ESS transmit packet count
+@@ -1273,6 +1282,10 @@ struct dwc3 {
+ u8 test_mode_nr;
+ u8 lpm_nyet_threshold;
+ u8 hird_threshold;
++ u8 rx_thr_num_pkt;
++ u8 rx_max_burst;
++ u8 tx_thr_num_pkt;
++ u8 tx_max_burst;
+ u8 rx_thr_num_pkt_prd;
+ u8 rx_max_burst_prd;
+ u8 tx_thr_num_pkt_prd;
+diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
+index 039bf241769af..57ddd2e43022e 100644
+--- a/drivers/usb/dwc3/drd.c
++++ b/drivers/usb/dwc3/drd.c
+@@ -505,6 +505,7 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
+ dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
+ }
++ dwc3_set_mode(dwc, mode);
+
+ dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
+ dwc3_role_switch.set = dwc3_usb_role_switch_set;
+@@ -526,7 +527,6 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
+ }
+ }
+
+- dwc3_set_mode(dwc, mode);
+ return 0;
+ }
+ #else
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 3de43df6bbe81..82544374110b0 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -549,7 +549,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++ IRQF_ONESHOT,
+ "qcom_dwc3 HS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
+@@ -564,7 +564,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++ IRQF_ONESHOT,
+ "qcom_dwc3 DP_HS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
+@@ -579,7 +579,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++ IRQF_ONESHOT,
+ "qcom_dwc3 DM_HS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
+@@ -594,7 +594,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++ IRQF_ONESHOT,
+ "qcom_dwc3 SS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
+@@ -758,6 +758,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+ if (!qcom->dwc3) {
+ ret = -ENODEV;
+ dev_err(dev, "failed to get dwc3 platform device\n");
++ of_platform_depopulate(dev);
+ }
+
+ node_put:
+@@ -766,9 +767,9 @@ node_put:
+ return ret;
+ }
+
+-static struct platform_device *
+-dwc3_qcom_create_urs_usb_platdev(struct device *dev)
++static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+ {
++ struct platform_device *urs_usb = NULL;
+ struct fwnode_handle *fwh;
+ struct acpi_device *adev;
+ char name[8];
+@@ -788,9 +789,26 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+
+ adev = to_acpi_device_node(fwh);
+ if (!adev)
+- return NULL;
++ goto err_put_handle;
++
++ urs_usb = acpi_create_platform_device(adev, NULL);
++ if (IS_ERR_OR_NULL(urs_usb))
++ goto err_put_handle;
++
++ return urs_usb;
++
++err_put_handle:
++ fwnode_handle_put(fwh);
++
++ return urs_usb;
++}
+
+- return acpi_create_platform_device(adev, NULL);
++static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
++{
++ struct fwnode_handle *fwh = urs_usb->dev.fwnode;
++
++ platform_device_unregister(urs_usb);
++ fwnode_handle_put(fwh);
+ }
+
+ static int dwc3_qcom_probe(struct platform_device *pdev)
+@@ -874,13 +892,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
+ if (IS_ERR(qcom->qscratch_base)) {
+ ret = PTR_ERR(qcom->qscratch_base);
+- goto clk_disable;
++ goto free_urs;
+ }
+
+ ret = dwc3_qcom_setup_irq(pdev);
+ if (ret) {
+ dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
+- goto clk_disable;
++ goto free_urs;
+ }
+
+ /*
+@@ -899,7 +917,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+
+ if (ret) {
+ dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
+- goto depopulate;
++ goto free_urs;
+ }
+
+ ret = dwc3_qcom_interconnect_init(qcom);
+@@ -931,10 +949,16 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ interconnect_exit:
+ dwc3_qcom_interconnect_exit(qcom);
+ depopulate:
+- if (np)
++ if (np) {
+ of_platform_depopulate(&pdev->dev);
+- else
+- platform_device_put(pdev);
++ } else {
++ device_remove_software_node(&qcom->dwc3->dev);
++ platform_device_del(qcom->dwc3);
++ }
++ platform_device_put(qcom->dwc3);
++free_urs:
++ if (qcom->urs_usb)
++ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
+ clk_disable:
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+@@ -953,11 +977,16 @@ static void dwc3_qcom_remove(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ int i;
+
+- device_remove_software_node(&qcom->dwc3->dev);
+- if (np)
++ if (np) {
+ of_platform_depopulate(&pdev->dev);
+- else
+- platform_device_put(pdev);
++ } else {
++ device_remove_software_node(&qcom->dwc3->dev);
++ platform_device_del(qcom->dwc3);
++ }
++ platform_device_put(qcom->dwc3);
++
++ if (qcom->urs_usb)
++ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
+
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index e6ab8cc225ffd..cc0ed29a4adc0 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -1410,7 +1410,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_ncm *ncm = func_to_ncm(f);
+ struct usb_string *us;
+- int status;
++ int status = 0;
+ struct usb_ep *ep;
+ struct f_ncm_opts *ncm_opts;
+
+@@ -1428,22 +1428,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
+ }
+
+- /*
+- * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+- * configurations are bound in sequence with list_for_each_entry,
+- * in each configuration its functions are bound in sequence
+- * with list_for_each_entry, so we assume no race condition
+- * with regard to ncm_opts->bound access
+- */
+- if (!ncm_opts->bound) {
+- mutex_lock(&ncm_opts->lock);
+- gether_set_gadget(ncm_opts->net, cdev->gadget);
++ mutex_lock(&ncm_opts->lock);
++ gether_set_gadget(ncm_opts->net, cdev->gadget);
++ if (!ncm_opts->bound)
+ status = gether_register_netdev(ncm_opts->net);
+- mutex_unlock(&ncm_opts->lock);
+- if (status)
+- goto fail;
+- ncm_opts->bound = true;
+- }
++ mutex_unlock(&ncm_opts->lock);
++
++ if (status)
++ goto fail;
++
++ ncm_opts->bound = true;
++
+ us = usb_gstrings_attach(cdev, ncm_strings,
+ ARRAY_SIZE(ncm_string_defs));
+ if (IS_ERR(us)) {
+diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
+index e549022642e56..ea106ad665a1f 100644
+--- a/drivers/usb/gadget/legacy/raw_gadget.c
++++ b/drivers/usb/gadget/legacy/raw_gadget.c
+@@ -663,12 +663,12 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ if (WARN_ON(in && dev->ep0_out_pending)) {
+ ret = -ENODEV;
+ dev->state = STATE_DEV_FAILED;
+- goto out_done;
++ goto out_unlock;
+ }
+ if (WARN_ON(!in && dev->ep0_in_pending)) {
+ ret = -ENODEV;
+ dev->state = STATE_DEV_FAILED;
+- goto out_done;
++ goto out_unlock;
+ }
+
+ dev->req->buf = data;
+@@ -683,7 +683,7 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ "fail, usb_ep_queue returned %d\n", ret);
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->state = STATE_DEV_FAILED;
+- goto out_done;
++ goto out_queue_failed;
+ }
+
+ ret = wait_for_completion_interruptible(&dev->ep0_done);
+@@ -692,13 +692,16 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ usb_ep_dequeue(dev->gadget->ep0, dev->req);
+ wait_for_completion(&dev->ep0_done);
+ spin_lock_irqsave(&dev->lock, flags);
+- goto out_done;
++ if (dev->ep0_status == -ECONNRESET)
++ dev->ep0_status = -EINTR;
++ goto out_interrupted;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+- ret = dev->ep0_status;
+
+-out_done:
++out_interrupted:
++ ret = dev->ep0_status;
++out_queue_failed:
+ dev->ep0_urb_queued = false;
+ out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+@@ -1067,7 +1070,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ "fail, usb_ep_queue returned %d\n", ret);
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->state = STATE_DEV_FAILED;
+- goto out_done;
++ goto out_queue_failed;
+ }
+
+ ret = wait_for_completion_interruptible(&done);
+@@ -1076,13 +1079,16 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ usb_ep_dequeue(ep->ep, ep->req);
+ wait_for_completion(&done);
+ spin_lock_irqsave(&dev->lock, flags);
+- goto out_done;
++ if (ep->status == -ECONNRESET)
++ ep->status = -EINTR;
++ goto out_interrupted;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+- ret = ep->status;
+
+-out_done:
++out_interrupted:
++ ret = ep->status;
++out_queue_failed:
+ ep->urb_queued = false;
+ out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index b9ae5c2a25275..95ed9404f6f85 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -535,6 +535,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ /* xHC spec requires PCI devices to support D3hot and D3cold */
+ if (xhci->hci_version >= 0x120)
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
++ else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110)
++ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+@@ -693,7 +695,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ pm_runtime_put_noidle(&dev->dev);
+
+- if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
++ if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
++ pm_runtime_forbid(&dev->dev);
++ else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ pm_runtime_allow(&dev->dev);
+
+ dma_set_max_seg_size(&dev->dev, UINT_MAX);
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 28218c8f18376..732cdeb739202 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -13,6 +13,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/of.h>
++#include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ #include <linux/usb/phy.h>
+ #include <linux/slab.h>
+@@ -148,7 +149,7 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
+ int ret;
+ int irq;
+ struct xhci_plat_priv *priv = NULL;
+-
++ bool of_match;
+
+ if (usb_disabled())
+ return -ENODEV;
+@@ -253,16 +254,23 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
+ &xhci->imod_interval);
+ }
+
+- hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
+- if (IS_ERR(hcd->usb_phy)) {
+- ret = PTR_ERR(hcd->usb_phy);
+- if (ret == -EPROBE_DEFER)
+- goto disable_clk;
+- hcd->usb_phy = NULL;
+- } else {
+- ret = usb_phy_init(hcd->usb_phy);
+- if (ret)
+- goto disable_clk;
++ /*
++ * Drivers such as dwc3 manages PHYs themself (and rely on driver name
++ * matching for the xhci platform device).
++ */
++ of_match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
++ if (of_match) {
++ hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
++ if (IS_ERR(hcd->usb_phy)) {
++ ret = PTR_ERR(hcd->usb_phy);
++ if (ret == -EPROBE_DEFER)
++ goto disable_clk;
++ hcd->usb_phy = NULL;
++ } else {
++ ret = usb_phy_init(hcd->usb_phy);
++ if (ret)
++ goto disable_clk;
++ }
+ }
+
+ hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
+@@ -285,15 +293,17 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
+ goto dealloc_usb2_hcd;
+ }
+
+- xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
+- "usb-phy", 1);
+- if (IS_ERR(xhci->shared_hcd->usb_phy)) {
+- xhci->shared_hcd->usb_phy = NULL;
+- } else {
+- ret = usb_phy_init(xhci->shared_hcd->usb_phy);
+- if (ret)
+- dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
+- __func__, ret);
++ if (of_match) {
++ xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
++ "usb-phy", 1);
++ if (IS_ERR(xhci->shared_hcd->usb_phy)) {
++ xhci->shared_hcd->usb_phy = NULL;
++ } else {
++ ret = usb_phy_init(xhci->shared_hcd->usb_phy);
++ if (ret)
++ dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
++ __func__, ret);
++ }
+ }
+
+ xhci->shared_hcd->tpl_support = hcd->tpl_support;
+@@ -458,23 +468,38 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
+ int ret;
+
+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+- clk_prepare_enable(xhci->clk);
+- clk_prepare_enable(xhci->reg_clk);
++ ret = clk_prepare_enable(xhci->clk);
++ if (ret)
++ return ret;
++
++ ret = clk_prepare_enable(xhci->reg_clk);
++ if (ret) {
++ clk_disable_unprepare(xhci->clk);
++ return ret;
++ }
+ }
+
+ ret = xhci_priv_resume_quirk(hcd);
+ if (ret)
+- return ret;
++ goto disable_clks;
+
+ ret = xhci_resume(xhci, PMSG_RESUME);
+ if (ret)
+- return ret;
++ goto disable_clks;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
++
++disable_clks:
++ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
++ clk_disable_unprepare(xhci->clk);
++ clk_disable_unprepare(xhci->reg_clk);
++ }
++
++ return ret;
+ }
+
+ static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index e1b1b64a07232..132b76fa7ca60 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -968,6 +968,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ int retval = 0;
+ bool comp_timer_running = false;
+ bool pending_portevent = false;
++ bool suspended_usb3_devs = false;
+ bool reinit_xhc = false;
+
+ if (!hcd->state)
+@@ -1115,10 +1116,17 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ /*
+ * Resume roothubs only if there are pending events.
+ * USB 3 devices resend U3 LFPS wake after a 100ms delay if
+- * the first wake signalling failed, give it that chance.
++ * the first wake signalling failed, give it that chance if
++ * there are suspended USB 3 devices.
+ */
++ if (xhci->usb3_rhub.bus_state.suspended_ports ||
++ xhci->usb3_rhub.bus_state.bus_suspended)
++ suspended_usb3_devs = true;
++
+ pending_portevent = xhci_pending_portevent(xhci);
+- if (!pending_portevent && msg.event == PM_EVENT_AUTO_RESUME) {
++
++ if (suspended_usb3_devs && !pending_portevent &&
++ msg.event == PM_EVENT_AUTO_RESUME) {
+ msleep(120);
+ pending_portevent = xhci_pending_portevent(xhci);
+ }
+diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
+index 57bbe13090948..d72130eda57d6 100644
+--- a/drivers/usb/misc/onboard_usb_hub.c
++++ b/drivers/usb/misc/onboard_usb_hub.c
+@@ -437,6 +437,8 @@ static const struct usb_device_id onboard_hub_id_table[] = {
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
++ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2744) }, /* USB5744 USB 2.0 */
++ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x5744) }, /* USB5744 USB 3.0 */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
+diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
+index 2a4ab5ac0ebed..8af34e6d1afff 100644
+--- a/drivers/usb/misc/onboard_usb_hub.h
++++ b/drivers/usb/misc/onboard_usb_hub.h
+@@ -16,6 +16,11 @@ static const struct onboard_hub_pdata microchip_usb424_data = {
+ .num_supplies = 1,
+ };
+
++static const struct onboard_hub_pdata microchip_usb5744_data = {
++ .reset_us = 0,
++ .num_supplies = 2,
++};
++
+ static const struct onboard_hub_pdata realtek_rts5411_data = {
+ .reset_us = 0,
+ .num_supplies = 1,
+@@ -50,6 +55,8 @@ static const struct of_device_id onboard_hub_match[] = {
+ { .compatible = "usb424,2412", .data = &microchip_usb424_data, },
+ { .compatible = "usb424,2514", .data = &microchip_usb424_data, },
+ { .compatible = "usb424,2517", .data = &microchip_usb424_data, },
++ { .compatible = "usb424,2744", .data = &microchip_usb5744_data, },
++ { .compatible = "usb424,5744", .data = &microchip_usb5744_data, },
+ { .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
+ { .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
+ { .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 45dcfaadaf98e..4dffcfefd62da 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -203,8 +203,8 @@ static void option_instat_callback(struct urb *urb);
+ #define DELL_PRODUCT_5829E_ESIM 0x81e4
+ #define DELL_PRODUCT_5829E 0x81e6
+
+-#define DELL_PRODUCT_FM101R 0x8213
+-#define DELL_PRODUCT_FM101R_ESIM 0x8215
++#define DELL_PRODUCT_FM101R_ESIM 0x8213
++#define DELL_PRODUCT_FM101R 0x8215
+
+ #define KYOCERA_VENDOR_ID 0x0c88
+ #define KYOCERA_PRODUCT_KPC650 0x17da
+@@ -609,6 +609,8 @@ static void option_instat_callback(struct urb *urb);
+ #define UNISOC_VENDOR_ID 0x1782
+ /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
+ #define TOZED_PRODUCT_LT70C 0x4055
++/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
++#define LUAT_PRODUCT_AIR720U 0x4e00
+
+ /* Device flags */
+
+@@ -1546,7 +1548,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(4) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff),
++ .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
+@@ -2249,6 +2252,7 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */
+ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
+@@ -2271,6 +2275,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
++ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 0547daf116a26..5df40759d77ad 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -19,7 +19,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
+ "Cypress ISD-300LP",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0150, 0x0160,
+ "Super Top",
+ "USB 2.0 SATA BRIDGE",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index d962f67c95ae6..6d455ca76125e 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1625,6 +1625,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
+ if (PD_VDO_VID(p[0]) != USB_SID_PD)
+ break;
+
++ if (IS_ERR_OR_NULL(port->partner))
++ break;
++
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
+ typec_partner_set_svdm_version(port->partner,
+ PD_VDO_SVDM_VER(p[0]));
+@@ -3903,6 +3906,8 @@ static void run_state_machine(struct tcpm_port *port)
+ port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
+ port->state == SRC_UNATTACHED) ||
+ (port->enter_state == SNK_ATTACH_WAIT &&
++ port->state == SNK_UNATTACHED) ||
++ (port->enter_state == SNK_DEBOUNCED &&
+ port->state == SNK_UNATTACHED));
+
+ port->enter_state = port->state;
+@@ -4268,7 +4273,8 @@ static void run_state_machine(struct tcpm_port *port)
+ current_lim = PD_P_SNK_STDBY_MW / 5;
+ tcpm_set_current_limit(port, current_lim, 5000);
+ /* Not sink vbus if operational current is 0mA */
+- tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
++ tcpm_set_charge(port, !port->pd_supported ||
++ pdo_max_current(port->snk_pdo[0]));
+
+ if (!port->pd_supported)
+ tcpm_set_state(port, SNK_READY, 0);
+@@ -5386,6 +5392,15 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
+ if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
+ port->tcpc->set_bist_data(port->tcpc, false);
+
++ switch (port->state) {
++ case ERROR_RECOVERY:
++ case PORT_RESET:
++ case PORT_RESET_WAIT_OFF:
++ return;
++ default:
++ break;
++ }
++
+ if (port->ams != NONE_AMS)
+ port->ams = NONE_AMS;
+ if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
+diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
+index bb1854b3311dc..db6e248f82083 100644
+--- a/drivers/usb/typec/ucsi/ucsi_glink.c
++++ b/drivers/usb/typec/ucsi/ucsi_glink.c
+@@ -8,9 +8,13 @@
+ #include <linux/mutex.h>
+ #include <linux/property.h>
+ #include <linux/soc/qcom/pdr.h>
++#include <linux/usb/typec_mux.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/soc/qcom/pmic_glink.h>
+ #include "ucsi.h"
+
++#define PMIC_GLINK_MAX_PORTS 2
++
+ #define UCSI_BUF_SIZE 48
+
+ #define MSG_TYPE_REQ_RESP 1
+@@ -52,6 +56,9 @@ struct ucsi_notify_ind_msg {
+ struct pmic_glink_ucsi {
+ struct device *dev;
+
++ struct gpio_desc *port_orientation[PMIC_GLINK_MAX_PORTS];
++ struct typec_switch *port_switch[PMIC_GLINK_MAX_PORTS];
++
+ struct pmic_glink_client *client;
+
+ struct ucsi *ucsi;
+@@ -220,8 +227,20 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
+ }
+
+ con_num = UCSI_CCI_CONNECTOR(cci);
+- if (con_num)
++ if (con_num) {
++ if (con_num < PMIC_GLINK_MAX_PORTS &&
++ ucsi->port_orientation[con_num - 1]) {
++ int orientation = gpiod_get_value(ucsi->port_orientation[con_num - 1]);
++
++ if (orientation >= 0) {
++ typec_switch_set(ucsi->port_switch[con_num - 1],
++ orientation ? TYPEC_ORIENTATION_REVERSE
++ : TYPEC_ORIENTATION_NORMAL);
++ }
++ }
++
+ ucsi_connector_change(ucsi->ucsi, con_num);
++ }
+
+ if (ucsi->sync_pending && cci & UCSI_CCI_BUSY) {
+ ucsi->sync_val = -EBUSY;
+@@ -282,6 +301,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+ {
+ struct pmic_glink_ucsi *ucsi;
+ struct device *dev = &adev->dev;
++ struct fwnode_handle *fwnode;
+ int ret;
+
+ ucsi = devm_kzalloc(dev, sizeof(*ucsi), GFP_KERNEL);
+@@ -309,6 +329,38 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+
+ ucsi_set_drvdata(ucsi->ucsi, ucsi);
+
++ device_for_each_child_node(dev, fwnode) {
++ struct gpio_desc *desc;
++ u32 port;
++
++ ret = fwnode_property_read_u32(fwnode, "reg", &port);
++ if (ret < 0) {
++ dev_err(dev, "missing reg property of %pOFn\n", fwnode);
++ return ret;
++ }
++
++ if (port >= PMIC_GLINK_MAX_PORTS) {
++ dev_warn(dev, "invalid connector number, ignoring\n");
++ continue;
++ }
++
++ desc = devm_gpiod_get_index_optional(&adev->dev, "orientation", port, GPIOD_IN);
++
++ /* If GPIO isn't found, continue */
++ if (!desc)
++ continue;
++
++ if (IS_ERR(desc))
++ return dev_err_probe(dev, PTR_ERR(desc),
++ "unable to acquire orientation gpio\n");
++ ucsi->port_orientation[port] = desc;
++
++ ucsi->port_switch[port] = fwnode_typec_switch_get(fwnode);
++ if (IS_ERR(ucsi->port_switch[port]))
++ return dev_err_probe(dev, PTR_ERR(ucsi->port_switch[port]),
++ "failed to acquire orientation-switch\n");
++ }
++
+ ucsi->client = devm_pmic_glink_register_client(dev,
+ PMIC_GLINK_OWNER_USBC,
+ pmic_glink_ucsi_callback,
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index 9c6954aad6c88..ce625b1ce9a51 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -464,8 +464,13 @@ static void stub_disconnect(struct usb_device *udev)
+ /* release port */
+ rc = usb_hub_release_port(udev->parent, udev->portnum,
+ (struct usb_dev_state *) udev);
+- if (rc) {
+- dev_dbg(&udev->dev, "unable to release port\n");
++ /*
++ * NOTE: If a HUB disconnect triggered disconnect of the down stream
++ * device usb_hub_release_port will return -ENODEV so we can safely ignore
++ * that error here.
++ */
++ if (rc && (rc != -ENODEV)) {
++ dev_dbg(&udev->dev, "unable to release port (%i)\n", rc);
+ return;
+ }
+
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+index b3a3cb1657955..b137f36793439 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+@@ -437,7 +437,7 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ if (blk->shared_backend) {
+ blk->buffer = shared_buffer;
+ } else {
+- blk->buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
++ blk->buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ GFP_KERNEL);
+ if (!blk->buffer) {
+ ret = -ENOMEM;
+@@ -495,7 +495,7 @@ static int __init vdpasim_blk_init(void)
+ goto parent_err;
+
+ if (shared_backend) {
+- shared_buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
++ shared_buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ GFP_KERNEL);
+ if (!shared_buffer) {
+ ret = -ENOMEM;
+diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c
+index ab4b5958e4131..caffa1a2cf591 100644
+--- a/drivers/vfio/pci/pds/pci_drv.c
++++ b/drivers/vfio/pci/pds/pci_drv.c
+@@ -55,10 +55,10 @@ static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio)
+ * VFIO_DEVICE_STATE_RUNNING.
+ */
+ if (deferred_reset_needed) {
+- spin_lock(&pds_vfio->reset_lock);
++ mutex_lock(&pds_vfio->reset_mutex);
+ pds_vfio->deferred_reset = true;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_ERROR;
+- spin_unlock(&pds_vfio->reset_lock);
++ mutex_unlock(&pds_vfio->reset_mutex);
+ }
+ }
+
+diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
+index 649b18ee394bb..4c351c59d05a9 100644
+--- a/drivers/vfio/pci/pds/vfio_dev.c
++++ b/drivers/vfio/pci/pds/vfio_dev.c
+@@ -29,7 +29,7 @@ struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev)
+ void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
+ {
+ again:
+- spin_lock(&pds_vfio->reset_lock);
++ mutex_lock(&pds_vfio->reset_mutex);
+ if (pds_vfio->deferred_reset) {
+ pds_vfio->deferred_reset = false;
+ if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
+@@ -39,23 +39,23 @@ again:
+ }
+ pds_vfio->state = pds_vfio->deferred_reset_state;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
+- spin_unlock(&pds_vfio->reset_lock);
++ mutex_unlock(&pds_vfio->reset_mutex);
+ goto again;
+ }
+ mutex_unlock(&pds_vfio->state_mutex);
+- spin_unlock(&pds_vfio->reset_lock);
++ mutex_unlock(&pds_vfio->reset_mutex);
+ }
+
+ void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio)
+ {
+- spin_lock(&pds_vfio->reset_lock);
++ mutex_lock(&pds_vfio->reset_mutex);
+ pds_vfio->deferred_reset = true;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
+ if (!mutex_trylock(&pds_vfio->state_mutex)) {
+- spin_unlock(&pds_vfio->reset_lock);
++ mutex_unlock(&pds_vfio->reset_mutex);
+ return;
+ }
+- spin_unlock(&pds_vfio->reset_lock);
++ mutex_unlock(&pds_vfio->reset_mutex);
+ pds_vfio_state_mutex_unlock(pds_vfio);
+ }
+
+@@ -155,6 +155,9 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
+
+ pds_vfio->vf_id = vf_id;
+
++ mutex_init(&pds_vfio->state_mutex);
++ mutex_init(&pds_vfio->reset_mutex);
++
+ vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
+ vdev->mig_ops = &pds_vfio_lm_ops;
+ vdev->log_ops = &pds_vfio_log_ops;
+@@ -168,6 +171,17 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
+ return 0;
+ }
+
++static void pds_vfio_release_device(struct vfio_device *vdev)
++{
++ struct pds_vfio_pci_device *pds_vfio =
++ container_of(vdev, struct pds_vfio_pci_device,
++ vfio_coredev.vdev);
++
++ mutex_destroy(&pds_vfio->state_mutex);
++ mutex_destroy(&pds_vfio->reset_mutex);
++ vfio_pci_core_release_dev(vdev);
++}
++
+ static int pds_vfio_open_device(struct vfio_device *vdev)
+ {
+ struct pds_vfio_pci_device *pds_vfio =
+@@ -179,7 +193,6 @@ static int pds_vfio_open_device(struct vfio_device *vdev)
+ if (err)
+ return err;
+
+- mutex_init(&pds_vfio->state_mutex);
+ pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
+
+@@ -199,14 +212,13 @@ static void pds_vfio_close_device(struct vfio_device *vdev)
+ pds_vfio_put_save_file(pds_vfio);
+ pds_vfio_dirty_disable(pds_vfio, true);
+ mutex_unlock(&pds_vfio->state_mutex);
+- mutex_destroy(&pds_vfio->state_mutex);
+ vfio_pci_core_close_device(vdev);
+ }
+
+ static const struct vfio_device_ops pds_vfio_ops = {
+ .name = "pds-vfio",
+ .init = pds_vfio_init_device,
+- .release = vfio_pci_core_release_dev,
++ .release = pds_vfio_release_device,
+ .open_device = pds_vfio_open_device,
+ .close_device = pds_vfio_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+diff --git a/drivers/vfio/pci/pds/vfio_dev.h b/drivers/vfio/pci/pds/vfio_dev.h
+index b8f2d667608f3..e7b01080a1ec3 100644
+--- a/drivers/vfio/pci/pds/vfio_dev.h
++++ b/drivers/vfio/pci/pds/vfio_dev.h
+@@ -18,7 +18,7 @@ struct pds_vfio_pci_device {
+ struct pds_vfio_dirty dirty;
+ struct mutex state_mutex; /* protect migration state */
+ enum vfio_device_mig_state state;
+- spinlock_t reset_lock; /* protect reset_done flow */
++ struct mutex reset_mutex; /* protect reset_done flow */
+ u8 deferred_reset;
+ enum vfio_device_mig_state deferred_reset_state;
+ struct notifier_block nb;
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index 78379ffd23363..fb590e346e43d 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -1511,7 +1511,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
+
+ err:
+ put_device(&v->dev);
+- ida_simple_remove(&vhost_vdpa_ida, v->minor);
+ return r;
+ }
+
+diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
+index a51fbab963680..289bd9ce4d36d 100644
+--- a/drivers/video/backlight/pwm_bl.c
++++ b/drivers/video/backlight/pwm_bl.c
+@@ -626,9 +626,14 @@ static void pwm_backlight_remove(struct platform_device *pdev)
+ {
+ struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
++ struct pwm_state state;
+
+ backlight_device_unregister(bl);
+ pwm_backlight_power_off(pb);
++ pwm_get_state(pb->pwm, &state);
++ state.duty_cycle = 0;
++ state.enabled = false;
++ pwm_apply_state(pb->pwm, &state);
+
+ if (pb->exit)
+ pb->exit(&pdev->dev);
+@@ -638,8 +643,13 @@ static void pwm_backlight_shutdown(struct platform_device *pdev)
+ {
+ struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
++ struct pwm_state state;
+
+ pwm_backlight_power_off(pb);
++ pwm_get_state(pb->pwm, &state);
++ state.duty_cycle = 0;
++ state.enabled = false;
++ pwm_apply_state(pb->pwm, &state);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+@@ -647,12 +657,24 @@ static int pwm_backlight_suspend(struct device *dev)
+ {
+ struct backlight_device *bl = dev_get_drvdata(dev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
++ struct pwm_state state;
+
+ if (pb->notify)
+ pb->notify(pb->dev, 0);
+
+ pwm_backlight_power_off(pb);
+
++ /*
++ * Note that disabling the PWM doesn't guarantee that the output stays
++ * at its inactive state. However without the PWM disabled, the PWM
++ * driver refuses to suspend. So disable here even though this might
++ * enable the backlight on poorly designed boards.
++ */
++ pwm_get_state(pb->pwm, &state);
++ state.duty_cycle = 0;
++ state.enabled = false;
++ pwm_apply_state(pb->pwm, &state);
++
+ if (pb->notify_after)
+ pb->notify_after(pb->dev, 0);
+
+diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
+index 7fbd9f069ac2e..0bced82fa4940 100644
+--- a/drivers/video/fbdev/fsl-diu-fb.c
++++ b/drivers/video/fbdev/fsl-diu-fb.c
+@@ -490,7 +490,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
+ * Workaround for failed writing desc register of planes.
+ * Needed with MPC5121 DIU rev 2.0 silicon.
+ */
+-void wr_reg_wa(u32 *reg, u32 val)
++static void wr_reg_wa(u32 *reg, u32 val)
+ {
+ do {
+ out_be32(reg, val);
+diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
+index f4c8677488fb8..f5eaa58a808fb 100644
+--- a/drivers/video/fbdev/imsttfb.c
++++ b/drivers/video/fbdev/imsttfb.c
+@@ -1419,7 +1419,6 @@ static int init_imstt(struct fb_info *info)
+ if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
+ || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
+ printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
+- framebuffer_release(info);
+ return -ENODEV;
+ }
+
+@@ -1451,14 +1450,11 @@ static int init_imstt(struct fb_info *info)
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_YPAN;
+
+- if (fb_alloc_cmap(&info->cmap, 0, 0)) {
+- framebuffer_release(info);
++ if (fb_alloc_cmap(&info->cmap, 0, 0))
+ return -ENODEV;
+- }
+
+ if (register_framebuffer(info) < 0) {
+ fb_dealloc_cmap(&info->cmap);
+- framebuffer_release(info);
+ return -ENODEV;
+ }
+
+@@ -1498,8 +1494,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ if (!request_mem_region(addr, size, "imsttfb")) {
+ printk(KERN_ERR "imsttfb: Can't reserve memory region\n");
+- framebuffer_release(info);
+- return -ENODEV;
++ ret = -ENODEV;
++ goto release_info;
+ }
+
+ switch (pdev->device) {
+@@ -1516,36 +1512,39 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
+ "contact maintainer.\n", pdev->device);
+ ret = -ENODEV;
+- goto error;
++ goto release_mem_region;
+ }
+
+ info->fix.smem_start = addr;
+ info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
+ 0x400000 : 0x800000);
+ if (!info->screen_base)
+- goto error;
++ goto release_mem_region;
+ info->fix.mmio_start = addr + 0x800000;
+ par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+ if (!par->dc_regs)
+- goto error;
++ goto unmap_screen_base;
+ par->cmap_regs_phys = addr + 0x840000;
+ par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
+ if (!par->cmap_regs)
+- goto error;
++ goto unmap_dc_regs;
+ info->pseudo_palette = par->palette;
+ ret = init_imstt(info);
+ if (ret)
+- goto error;
++ goto unmap_cmap_regs;
+
+ pci_set_drvdata(pdev, info);
+- return ret;
++ return 0;
+
+-error:
+- if (par->dc_regs)
+- iounmap(par->dc_regs);
+- if (info->screen_base)
+- iounmap(info->screen_base);
++unmap_cmap_regs:
++ iounmap(par->cmap_regs);
++unmap_dc_regs:
++ iounmap(par->dc_regs);
++unmap_screen_base:
++ iounmap(info->screen_base);
++release_mem_region:
+ release_mem_region(addr, size);
++release_info:
+ framebuffer_release(info);
+ return ret;
+ }
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 97dbe715e96ad..5bee58ef5f1e3 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -57,6 +57,11 @@ struct snp_guest_dev {
+
+ struct snp_secrets_page_layout *layout;
+ struct snp_req_data input;
++ union {
++ struct snp_report_req report;
++ struct snp_derived_key_req derived_key;
++ struct snp_ext_report_req ext_report;
++ } req;
+ u32 *os_area_msg_seqno;
+ u8 *vmpck;
+ };
+@@ -473,8 +478,8 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
+ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
+ struct snp_guest_crypto *crypto = snp_dev->crypto;
++ struct snp_report_req *req = &snp_dev->req.report;
+ struct snp_report_resp *resp;
+- struct snp_report_req req;
+ int rc, resp_len;
+
+ lockdep_assert_held(&snp_cmd_mutex);
+@@ -482,7 +487,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+ if (!arg->req_data || !arg->resp_data)
+ return -EINVAL;
+
+- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ return -EFAULT;
+
+ /*
+@@ -496,7 +501,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+ return -ENOMEM;
+
+ rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
+- SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
++ SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
+ resp_len);
+ if (rc)
+ goto e_free;
+@@ -511,9 +516,9 @@ e_free:
+
+ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
++ struct snp_derived_key_req *req = &snp_dev->req.derived_key;
+ struct snp_guest_crypto *crypto = snp_dev->crypto;
+ struct snp_derived_key_resp resp = {0};
+- struct snp_derived_key_req req;
+ int rc, resp_len;
+ /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
+ u8 buf[64 + 16];
+@@ -532,11 +537,11 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
+ if (sizeof(buf) < resp_len)
+ return -ENOMEM;
+
+- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ return -EFAULT;
+
+ rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
+- SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len);
++ SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
+ if (rc)
+ return rc;
+
+@@ -552,8 +557,8 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
+
+ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
++ struct snp_ext_report_req *req = &snp_dev->req.ext_report;
+ struct snp_guest_crypto *crypto = snp_dev->crypto;
+- struct snp_ext_report_req req;
+ struct snp_report_resp *resp;
+ int ret, npages = 0, resp_len;
+
+@@ -562,18 +567,18 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ if (!arg->req_data || !arg->resp_data)
+ return -EINVAL;
+
+- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ return -EFAULT;
+
+ /* userspace does not want certificate data */
+- if (!req.certs_len || !req.certs_address)
++ if (!req->certs_len || !req->certs_address)
+ goto cmd;
+
+- if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
+- !IS_ALIGNED(req.certs_len, PAGE_SIZE))
++ if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
++ !IS_ALIGNED(req->certs_len, PAGE_SIZE))
+ return -EINVAL;
+
+- if (!access_ok((const void __user *)req.certs_address, req.certs_len))
++ if (!access_ok((const void __user *)req->certs_address, req->certs_len))
+ return -EFAULT;
+
+ /*
+@@ -582,8 +587,8 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ * the host. If host does not supply any certs in it, then copy
+ * zeros to indicate that certificate data was not provided.
+ */
+- memset(snp_dev->certs_data, 0, req.certs_len);
+- npages = req.certs_len >> PAGE_SHIFT;
++ memset(snp_dev->certs_data, 0, req->certs_len);
++ npages = req->certs_len >> PAGE_SHIFT;
+ cmd:
+ /*
+ * The intermediate response buffer is used while decrypting the
+@@ -597,14 +602,14 @@ cmd:
+
+ snp_dev->input.data_npages = npages;
+ ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
+- SNP_MSG_REPORT_REQ, &req.data,
+- sizeof(req.data), resp->data, resp_len);
++ SNP_MSG_REPORT_REQ, &req->data,
++ sizeof(req->data), resp->data, resp_len);
+
+ /* If certs length is invalid then copy the returned length */
+ if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
+- req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
++ req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
+
+- if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
++ if (copy_to_user((void __user *)arg->req_data, req, sizeof(*req)))
+ ret = -EFAULT;
+ }
+
+@@ -612,8 +617,8 @@ cmd:
+ goto e_free;
+
+ if (npages &&
+- copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
+- req.certs_len)) {
++ copy_to_user((void __user *)req->certs_address, snp_dev->certs_data,
++ req->certs_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
+index 607ce4b8df574..ec0c08652ec2f 100644
+--- a/drivers/watchdog/ixp4xx_wdt.c
++++ b/drivers/watchdog/ixp4xx_wdt.c
+@@ -105,6 +105,25 @@ static const struct watchdog_ops ixp4xx_wdt_ops = {
+ .owner = THIS_MODULE,
+ };
+
++/*
++ * The A0 version of the IXP422 had a bug in the watchdog making
++ * is useless, but we still need to use it to restart the system
++ * as it is the only way, so in this special case we register a
++ * "dummy" watchdog that doesn't really work, but will support
++ * the restart operation.
++ */
++static int ixp4xx_wdt_dummy(struct watchdog_device *wdd)
++{
++ return 0;
++}
++
++static const struct watchdog_ops ixp4xx_wdt_restart_only_ops = {
++ .start = ixp4xx_wdt_dummy,
++ .stop = ixp4xx_wdt_dummy,
++ .restart = ixp4xx_wdt_restart,
++ .owner = THIS_MODULE,
++};
++
+ static const struct watchdog_info ixp4xx_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING
+ | WDIOF_MAGICCLOSE
+@@ -114,14 +133,17 @@ static const struct watchdog_info ixp4xx_wdt_info = {
+
+ static int ixp4xx_wdt_probe(struct platform_device *pdev)
+ {
++ static const struct watchdog_ops *iwdt_ops;
+ struct device *dev = &pdev->dev;
+ struct ixp4xx_wdt *iwdt;
+ struct clk *clk;
+ int ret;
+
+ if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
+- dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n");
+- return -ENODEV;
++ dev_info(dev, "Rev. A0 IXP42x CPU detected - only restart supported\n");
++ iwdt_ops = &ixp4xx_wdt_restart_only_ops;
++ } else {
++ iwdt_ops = &ixp4xx_wdt_ops;
+ }
+
+ iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
+@@ -141,7 +163,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
+ iwdt->rate = IXP4XX_TIMER_FREQ;
+
+ iwdt->wdd.info = &ixp4xx_wdt_info;
+- iwdt->wdd.ops = &ixp4xx_wdt_ops;
++ iwdt->wdd.ops = iwdt_ops;
+ iwdt->wdd.min_timeout = 1;
+ iwdt->wdd.max_timeout = U32_MAX / iwdt->rate;
+ iwdt->wdd.parent = dev;
+diff --git a/drivers/watchdog/marvell_gti_wdt.c b/drivers/watchdog/marvell_gti_wdt.c
+index d7eb8286e11ec..1ec1e014ba831 100644
+--- a/drivers/watchdog/marvell_gti_wdt.c
++++ b/drivers/watchdog/marvell_gti_wdt.c
+@@ -271,7 +271,7 @@ static int gti_wdt_probe(struct platform_device *pdev)
+ &wdt_idx);
+ if (!err) {
+ if (wdt_idx >= priv->data->gti_num_timers)
+- return dev_err_probe(&pdev->dev, err,
++ return dev_err_probe(&pdev->dev, -EINVAL,
+ "GTI wdog timer index not valid");
+
+ priv->wdt_timer_idx = wdt_idx;
+diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
+index 421ebcda62e64..5f23913ce3b49 100644
+--- a/drivers/watchdog/sbsa_gwdt.c
++++ b/drivers/watchdog/sbsa_gwdt.c
+@@ -152,14 +152,14 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
+ timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
+
+ if (action)
+- sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
++ sbsa_gwdt_reg_write((u64)gwdt->clk * timeout, gwdt);
+ else
+ /*
+ * In the single stage mode, The first signal (WS0) is ignored,
+ * the timeout is (WOR * 2), so the WOR should be configured
+ * to half value of timeout.
+ */
+- sbsa_gwdt_reg_write(gwdt->clk / 2 * timeout, gwdt);
++ sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
+
+ return 0;
+ }
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 1b2136fe0fa51..c50419638ac0a 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -164,6 +164,8 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
+
+ /* IRQ <-> IPI mapping */
+ static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
++/* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
++static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
+
+ /* Event channel distribution data */
+ static atomic_t channels_on_cpu[NR_CPUS];
+@@ -366,6 +368,7 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
+ info->u.ipi = ipi;
+
+ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
++ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+
+ return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
+ }
+@@ -601,7 +604,9 @@ static void lateeoi_list_add(struct irq_info *info)
+
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+
+- if (list_empty(&eoi->eoi_list)) {
++ elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
++ eoi_list);
++ if (!elem || info->eoi_time < elem->eoi_time) {
+ list_add(&info->eoi_list, &eoi->eoi_list);
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
+ &eoi->delayed, delay);
+@@ -981,6 +986,7 @@ static void __unbind_from_irq(unsigned int irq)
+ break;
+ case IRQT_IPI:
+ per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
++ per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(irq)] = 0;
+ break;
+ case IRQT_EVTCHN:
+ dev = info->u.interdomain;
+@@ -1631,7 +1637,7 @@ EXPORT_SYMBOL_GPL(evtchn_put);
+
+ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+ {
+- int irq;
++ evtchn_port_t evtchn;
+
+ #ifdef CONFIG_X86
+ if (unlikely(vector == XEN_NMI_VECTOR)) {
+@@ -1642,9 +1648,9 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+ return;
+ }
+ #endif
+- irq = per_cpu(ipi_to_irq, cpu)[vector];
+- BUG_ON(irq < 0);
+- notify_remote_via_irq(irq);
++ evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
++ BUG_ON(evtchn == 0);
++ notify_remote_via_evtchn(evtchn);
+ }
+
+ struct evtchn_loop_ctrl {
+diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
+index b3e3d1bb37f3e..5086552731453 100644
+--- a/drivers/xen/pcpu.c
++++ b/drivers/xen/pcpu.c
+@@ -47,6 +47,9 @@
+ #include <asm/xen/hypervisor.h>
+ #include <asm/xen/hypercall.h>
+
++#ifdef CONFIG_ACPI
++#include <acpi/processor.h>
++#endif
+
+ /*
+ * @cpu_id: Xen physical cpu logic number
+@@ -400,4 +403,23 @@ bool __init xen_processor_present(uint32_t acpi_id)
+
+ return online;
+ }
++
++void xen_sanitize_proc_cap_bits(uint32_t *cap)
++{
++ struct xen_platform_op op = {
++ .cmd = XENPF_set_processor_pminfo,
++ .u.set_pminfo.id = -1,
++ .u.set_pminfo.type = XEN_PM_PDC,
++ };
++ u32 buf[3] = { ACPI_PDC_REVISION_ID, 1, *cap };
++ int ret;
++
++ set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
++ ret = HYPERVISOR_platform_op(&op);
++ if (ret)
++ pr_err("sanitize of _PDC buffer bits from Xen failed: %d\n",
++ ret);
++ else
++ *cap = buf[2];
++}
+ #endif
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
+index f00ad5f5f1d4a..da88173bac432 100644
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -935,7 +935,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
+ return -ENOMEM;
+ dm_op = kirqfd + 1;
+
+- if (copy_from_user(dm_op, irqfd->dm_op, irqfd->size)) {
++ if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
+ ret = -EFAULT;
+ goto error_kfree;
+ }
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 946bd56f0ac53..0e6c6c25d154f 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -405,4 +405,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
+ .get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
++ .max_mapping_size = swiotlb_max_mapping_size,
+ };
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index 059de92aea7d0..d47eee6c51435 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -288,12 +288,6 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+ u16 val;
+ int ret = 0;
+
+- err = pci_read_config_word(dev, PCI_COMMAND, &val);
+- if (err)
+- return err;
+- if (!(val & PCI_COMMAND_INTX_DISABLE))
+- ret |= INTERRUPT_TYPE_INTX;
+-
+ /*
+ * Do not trust dev->msi(x)_enabled here, as enabling could be done
+ * bypassing the pci_*msi* functions, by the qemu.
+@@ -316,6 +310,19 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+ if (val & PCI_MSIX_FLAGS_ENABLE)
+ ret |= INTERRUPT_TYPE_MSIX;
+ }
++
++ /*
++ * PCIe spec says device cannot use INTx if MSI/MSI-X is enabled,
++ * so check for INTx only when both are disabled.
++ */
++ if (!ret) {
++ err = pci_read_config_word(dev, PCI_COMMAND, &val);
++ if (err)
++ return err;
++ if (!(val & PCI_COMMAND_INTX_DISABLE))
++ ret |= INTERRUPT_TYPE_INTX;
++ }
++
+ return ret ?: INTERRUPT_TYPE_NONE;
+ }
+
+diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
+index 097316a741268..1948a9700c8fa 100644
+--- a/drivers/xen/xen-pciback/conf_space_capability.c
++++ b/drivers/xen/xen-pciback/conf_space_capability.c
+@@ -236,10 +236,16 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
+ return PCIBIOS_SET_FAILED;
+
+ if (new_value & field_config->enable_bit) {
+- /* don't allow enabling together with other interrupt types */
++ /*
++ * Don't allow enabling together with other interrupt type, but do
++ * allow enabling MSI(-X) while INTx is still active to please Linuxes
++ * MSI(-X) startup sequence. It is safe to do, as according to PCI
++ * spec, device with enabled MSI(-X) shouldn't use INTx.
++ */
+ int int_type = xen_pcibk_get_interrupt_type(dev);
+
+ if (int_type == INTERRUPT_TYPE_NONE ||
++ int_type == INTERRUPT_TYPE_INTX ||
+ int_type == field_config->int_type)
+ goto write;
+ return PCIBIOS_SET_FAILED;
+diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
+index 981435103af1a..fc03326459664 100644
+--- a/drivers/xen/xen-pciback/conf_space_header.c
++++ b/drivers/xen/xen-pciback/conf_space_header.c
+@@ -104,24 +104,9 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+ pci_clear_mwi(dev);
+ }
+
+- if (dev_data && dev_data->allow_interrupt_control) {
+- if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) {
+- if (value & PCI_COMMAND_INTX_DISABLE) {
+- pci_intx(dev, 0);
+- } else {
+- /* Do not allow enabling INTx together with MSI or MSI-X. */
+- switch (xen_pcibk_get_interrupt_type(dev)) {
+- case INTERRUPT_TYPE_NONE:
+- pci_intx(dev, 1);
+- break;
+- case INTERRUPT_TYPE_INTX:
+- break;
+- default:
+- return PCIBIOS_SET_FAILED;
+- }
+- }
+- }
+- }
++ if (dev_data && dev_data->allow_interrupt_control &&
++ ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE))
++ pci_intx(dev, !(value & PCI_COMMAND_INTX_DISABLE));
+
+ cmd->val = value;
+
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 639bf628389ba..3205e5d724c8c 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -1025,7 +1025,7 @@ static int __init xenbus_init(void)
+ if (err < 0) {
+ pr_err("xenstore_late_init couldn't bind irq err=%d\n",
+ err);
+- return err;
++ goto out_error;
+ }
+
+ xs_init_irq = err;
+diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
+index e00cf8109b3f3..3c4572ef3a488 100644
+--- a/fs/9p/xattr.c
++++ b/fs/9p/xattr.c
+@@ -68,7 +68,7 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
+ struct p9_fid *fid;
+ int ret;
+
+- p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
++ p9_debug(P9_DEBUG_VFS, "name = '%s' value_len = %zu\n",
+ name, buffer_size);
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+@@ -139,7 +139,8 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
+
+ ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+ {
+- return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
++ /* Txattrwalk with an empty string lists xattrs instead */
++ return v9fs_xattr_get(dentry, "", buffer, buffer_size);
+ }
+
+ static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
+diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
+index 95bcbd7654d1b..8081d68004d05 100644
+--- a/fs/afs/dynroot.c
++++ b/fs/afs/dynroot.c
+@@ -132,8 +132,8 @@ static int afs_probe_cell_name(struct dentry *dentry)
+
+ ret = dns_query(net->net, "afsdb", name, len, "srv=1",
+ NULL, NULL, false);
+- if (ret == -ENODATA)
+- ret = -EDESTADDRREQ;
++ if (ret == -ENODATA || ret == -ENOKEY)
++ ret = -ENOENT;
+ return ret;
+ }
+
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index da73b97e19a9a..5041eae64423a 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -553,6 +553,7 @@ struct afs_server_entry {
+ };
+
+ struct afs_server_list {
++ struct rcu_head rcu;
+ afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */
+ refcount_t usage;
+ unsigned char nr_servers;
+diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
+index ed9056703505f..b59896b1de0af 100644
+--- a/fs/afs/server_list.c
++++ b/fs/afs/server_list.c
+@@ -17,7 +17,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
+ for (i = 0; i < slist->nr_servers; i++)
+ afs_unuse_server(net, slist->servers[i].server,
+ afs_server_trace_put_slist);
+- kfree(slist);
++ kfree_rcu(slist, rcu);
+ }
+ }
+
+diff --git a/fs/afs/super.c b/fs/afs/super.c
+index 95d713074dc81..e95fb4cb4fcd2 100644
+--- a/fs/afs/super.c
++++ b/fs/afs/super.c
+@@ -407,6 +407,8 @@ static int afs_validate_fc(struct fs_context *fc)
+ return PTR_ERR(volume);
+
+ ctx->volume = volume;
++ if (volume->type != AFSVL_RWVOL)
++ ctx->flock_mode = afs_flock_mode_local;
+ }
+
+ return 0;
+diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
+index 488e58490b16e..eb415ce563600 100644
+--- a/fs/afs/vl_rotate.c
++++ b/fs/afs/vl_rotate.c
+@@ -58,6 +58,12 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
+ }
+
+ /* Status load is ordered after lookup counter load */
++ if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
++ pr_warn("No record of cell %s\n", cell->name);
++ vc->error = -ENOENT;
++ return false;
++ }
++
+ if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
+ vc->error = -EDESTADDRREQ;
+ return false;
+@@ -285,6 +291,7 @@ failed:
+ */
+ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+ {
++ struct afs_cell *cell = vc->cell;
+ static int count;
+ int i;
+
+@@ -294,6 +301,9 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+
+ rcu_read_lock();
+ pr_notice("EDESTADDR occurred\n");
++ pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
++ pr_notice("DNS: src=%u st=%u lc=%x\n",
++ cell->dns_source, cell->dns_status, cell->dns_lookup_count);
+ pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
+ vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
+
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index b2e5107b7cecc..5a97db9888107 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2601,7 +2601,7 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
+
+ btrfs_set_dev_extent_length(leaf, extent, num_bytes);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -3025,7 +3025,7 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
+ cache->global_root_id);
+ btrfs_set_stack_block_group_flags(&bgi, cache->flags);
+ write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ fail:
+ btrfs_release_path(path);
+ /*
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 617d4827eec26..118ad4d2cbbe2 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -359,7 +359,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+
+- btrfs_mark_buffer_dirty(cow);
++ btrfs_mark_buffer_dirty(trans, cow);
+ *cow_ret = cow;
+ return 0;
+ }
+@@ -627,7 +627,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ cow->start);
+ btrfs_set_node_ptr_generation(parent, parent_slot,
+ trans->transid);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ if (last_ref) {
+ ret = btrfs_tree_mod_log_free_eb(buf);
+ if (ret) {
+@@ -643,7 +643,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ if (unlock_orig)
+ btrfs_tree_unlock(buf);
+ free_extent_buffer_stale(buf);
+- btrfs_mark_buffer_dirty(cow);
++ btrfs_mark_buffer_dirty(trans, cow);
+ *cow_ret = cow;
+ return 0;
+ }
+@@ -1197,7 +1197,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ goto out;
+ }
+ btrfs_set_node_key(parent, &right_key, pslot + 1);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ }
+ }
+ if (btrfs_header_nritems(mid) == 1) {
+@@ -1255,7 +1255,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ goto out;
+ }
+ btrfs_set_node_key(parent, &mid_key, pslot);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ }
+
+ /* update the path */
+@@ -1362,7 +1362,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+ btrfs_set_node_key(parent, &disk_key, pslot);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ if (btrfs_header_nritems(left) > orig_slot) {
+ path->nodes[level] = left;
+ path->slots[level + 1] -= 1;
+@@ -1422,7 +1422,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+ btrfs_set_node_key(parent, &disk_key, pslot + 1);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+
+ if (btrfs_header_nritems(mid) <= orig_slot) {
+ path->nodes[level] = right;
+@@ -2678,7 +2678,8 @@ int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
+ * higher levels
+ *
+ */
+-static void fixup_low_keys(struct btrfs_path *path,
++static void fixup_low_keys(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path,
+ struct btrfs_disk_key *key, int level)
+ {
+ int i;
+@@ -2695,7 +2696,7 @@ static void fixup_low_keys(struct btrfs_path *path,
+ BTRFS_MOD_LOG_KEY_REPLACE);
+ BUG_ON(ret < 0);
+ btrfs_set_node_key(t, key, tslot);
+- btrfs_mark_buffer_dirty(path->nodes[i]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[i]);
+ if (tslot != 0)
+ break;
+ }
+@@ -2707,10 +2708,11 @@ static void fixup_low_keys(struct btrfs_path *path,
+ * This function isn't completely safe. It's the caller's responsibility
+ * that the new key won't break the order
+ */
+-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
++void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ const struct btrfs_key *new_key)
+ {
++ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_disk_key disk_key;
+ struct extent_buffer *eb;
+ int slot;
+@@ -2748,9 +2750,9 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
+
+ btrfs_cpu_key_to_disk(&disk_key, new_key);
+ btrfs_set_item_key(eb, &disk_key, slot);
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+ if (slot == 0)
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+
+ /*
+@@ -2881,8 +2883,8 @@ static int push_node_left(struct btrfs_trans_handle *trans,
+ }
+ btrfs_set_header_nritems(src, src_nritems - push_items);
+ btrfs_set_header_nritems(dst, dst_nritems + push_items);
+- btrfs_mark_buffer_dirty(src);
+- btrfs_mark_buffer_dirty(dst);
++ btrfs_mark_buffer_dirty(trans, src);
++ btrfs_mark_buffer_dirty(trans, dst);
+
+ return ret;
+ }
+@@ -2957,8 +2959,8 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
+ btrfs_set_header_nritems(src, src_nritems - push_items);
+ btrfs_set_header_nritems(dst, dst_nritems + push_items);
+
+- btrfs_mark_buffer_dirty(src);
+- btrfs_mark_buffer_dirty(dst);
++ btrfs_mark_buffer_dirty(trans, src);
++ btrfs_mark_buffer_dirty(trans, dst);
+
+ return ret;
+ }
+@@ -3007,7 +3009,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
+
+ btrfs_set_node_ptr_generation(c, 0, lower_gen);
+
+- btrfs_mark_buffer_dirty(c);
++ btrfs_mark_buffer_dirty(trans, c);
+
+ old = root->node;
+ ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
+@@ -3079,7 +3081,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans,
+ WARN_ON(trans->transid == 0);
+ btrfs_set_node_ptr_generation(lower, slot, trans->transid);
+ btrfs_set_header_nritems(lower, nritems + 1);
+- btrfs_mark_buffer_dirty(lower);
++ btrfs_mark_buffer_dirty(trans, lower);
+
+ return 0;
+ }
+@@ -3158,8 +3160,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
+ btrfs_set_header_nritems(split, c_nritems - mid);
+ btrfs_set_header_nritems(c, mid);
+
+- btrfs_mark_buffer_dirty(c);
+- btrfs_mark_buffer_dirty(split);
++ btrfs_mark_buffer_dirty(trans, c);
++ btrfs_mark_buffer_dirty(trans, split);
+
+ ret = insert_ptr(trans, path, &disk_key, split->start,
+ path->slots[level + 1] + 1, level + 1);
+@@ -3325,15 +3327,15 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
+ btrfs_set_header_nritems(left, left_nritems);
+
+ if (left_nritems)
+- btrfs_mark_buffer_dirty(left);
++ btrfs_mark_buffer_dirty(trans, left);
+ else
+ btrfs_clear_buffer_dirty(trans, left);
+
+- btrfs_mark_buffer_dirty(right);
++ btrfs_mark_buffer_dirty(trans, right);
+
+ btrfs_item_key(right, &disk_key, 0);
+ btrfs_set_node_key(upper, &disk_key, slot + 1);
+- btrfs_mark_buffer_dirty(upper);
++ btrfs_mark_buffer_dirty(trans, upper);
+
+ /* then fixup the leaf pointer in the path */
+ if (path->slots[0] >= left_nritems) {
+@@ -3545,14 +3547,14 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
+ btrfs_set_token_item_offset(&token, i, push_space);
+ }
+
+- btrfs_mark_buffer_dirty(left);
++ btrfs_mark_buffer_dirty(trans, left);
+ if (right_nritems)
+- btrfs_mark_buffer_dirty(right);
++ btrfs_mark_buffer_dirty(trans, right);
+ else
+ btrfs_clear_buffer_dirty(trans, right);
+
+ btrfs_item_key(right, &disk_key, 0);
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+
+ /* then fixup the leaf pointer in the path */
+ if (path->slots[0] < push_items) {
+@@ -3683,8 +3685,8 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
+ if (ret < 0)
+ return ret;
+
+- btrfs_mark_buffer_dirty(right);
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, right);
++ btrfs_mark_buffer_dirty(trans, l);
+ BUG_ON(path->slots[0] != slot);
+
+ if (mid <= slot) {
+@@ -3925,7 +3927,7 @@ again:
+ path->nodes[0] = right;
+ path->slots[0] = 0;
+ if (path->slots[1] == 0)
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+ /*
+ * We create a new leaf 'right' for the required ins_len and
+@@ -4024,7 +4026,8 @@ err:
+ return ret;
+ }
+
+-static noinline int split_item(struct btrfs_path *path,
++static noinline int split_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path,
+ const struct btrfs_key *new_key,
+ unsigned long split_offset)
+ {
+@@ -4083,7 +4086,7 @@ static noinline int split_item(struct btrfs_path *path,
+ write_extent_buffer(leaf, buf + split_offset,
+ btrfs_item_ptr_offset(leaf, slot),
+ item_size - split_offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ BUG_ON(btrfs_leaf_free_space(leaf) < 0);
+ kfree(buf);
+@@ -4117,7 +4120,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ if (ret)
+ return ret;
+
+- ret = split_item(path, new_key, split_offset);
++ ret = split_item(trans, path, new_key, split_offset);
+ return ret;
+ }
+
+@@ -4127,7 +4130,8 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ * off the end of the item or if we shift the item to chop bytes off
+ * the front.
+ */
+-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
++void btrfs_truncate_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 new_size, int from_end)
+ {
+ int slot;
+ struct extent_buffer *leaf;
+@@ -4203,11 +4207,11 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
+ btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
+ btrfs_set_item_key(leaf, &disk_key, slot);
+ if (slot == 0)
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+
+ btrfs_set_item_size(leaf, slot, new_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (btrfs_leaf_free_space(leaf) < 0) {
+ btrfs_print_leaf(leaf);
+@@ -4218,7 +4222,8 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
+ /*
+ * make the item pointed to by the path bigger, data_size is the added size.
+ */
+-void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
++void btrfs_extend_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 data_size)
+ {
+ int slot;
+ struct extent_buffer *leaf;
+@@ -4268,7 +4273,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ data_end = old_data;
+ old_size = btrfs_item_size(leaf, slot);
+ btrfs_set_item_size(leaf, slot, old_size + data_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (btrfs_leaf_free_space(leaf) < 0) {
+ btrfs_print_leaf(leaf);
+@@ -4279,6 +4284,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ /*
+ * Make space in the node before inserting one or more items.
+ *
++ * @trans: transaction handle
+ * @root: root we are inserting items to
+ * @path: points to the leaf/slot where we are going to insert new items
+ * @batch: information about the batch of items to insert
+@@ -4286,7 +4292,8 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ * Main purpose is to save stack depth by doing the bulk of the work in a
+ * function that doesn't call btrfs_search_slot
+ */
+-static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
++static void setup_items_for_insert(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root, struct btrfs_path *path,
+ const struct btrfs_item_batch *batch)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+@@ -4306,7 +4313,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ */
+ if (path->slots[0] == 0) {
+ btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+ btrfs_unlock_up_safe(path, 1);
+
+@@ -4365,7 +4372,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ }
+
+ btrfs_set_header_nritems(leaf, nritems + batch->nr);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (btrfs_leaf_free_space(leaf) < 0) {
+ btrfs_print_leaf(leaf);
+@@ -4376,12 +4383,14 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ /*
+ * Insert a new item into a leaf.
+ *
++ * @trans: Transaction handle.
+ * @root: The root of the btree.
+ * @path: A path pointing to the target leaf and slot.
+ * @key: The key of the new item.
+ * @data_size: The size of the data associated with the new key.
+ */
+-void btrfs_setup_item_for_insert(struct btrfs_root *root,
++void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct btrfs_key *key,
+ u32 data_size)
+@@ -4393,7 +4402,7 @@ void btrfs_setup_item_for_insert(struct btrfs_root *root,
+ batch.total_data_size = data_size;
+ batch.nr = 1;
+
+- setup_items_for_insert(root, path, &batch);
++ setup_items_for_insert(trans, root, path, &batch);
+ }
+
+ /*
+@@ -4419,7 +4428,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
+ slot = path->slots[0];
+ BUG_ON(slot < 0);
+
+- setup_items_for_insert(root, path, batch);
++ setup_items_for_insert(trans, root, path, batch);
+ return 0;
+ }
+
+@@ -4444,7 +4453,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ leaf = path->nodes[0];
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ write_extent_buffer(leaf, data, ptr, data_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ btrfs_free_path(path);
+ return ret;
+@@ -4475,7 +4484,7 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
+ return ret;
+
+ path->slots[0]++;
+- btrfs_setup_item_for_insert(root, path, new_key, item_size);
++ btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
+ leaf = path->nodes[0];
+ memcpy_extent_buffer(leaf,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+@@ -4533,9 +4542,9 @@ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_disk_key disk_key;
+
+ btrfs_node_key(parent, &disk_key, 0);
+- fixup_low_keys(path, &disk_key, level + 1);
++ fixup_low_keys(trans, path, &disk_key, level + 1);
+ }
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ return 0;
+ }
+
+@@ -4632,7 +4641,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_disk_key disk_key;
+
+ btrfs_item_key(leaf, &disk_key, 0);
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+
+ /*
+@@ -4697,11 +4706,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ * dirtied this buffer
+ */
+ if (path->nodes[0] == leaf)
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ free_extent_buffer(leaf);
+ }
+ } else {
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ }
+ return ret;
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index ff40acd63a374..06333a74d6c4c 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -518,7 +518,7 @@ int btrfs_previous_item(struct btrfs_root *root,
+ int type);
+ int btrfs_previous_extent_item(struct btrfs_root *root,
+ struct btrfs_path *path, u64 min_objectid);
+-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
++void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ const struct btrfs_key *new_key);
+ struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
+@@ -545,8 +545,10 @@ int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+ struct extent_buffer *buf);
+ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_path *path, int level, int slot);
+-void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
+-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
++void btrfs_extend_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 data_size);
++void btrfs_truncate_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 new_size, int from_end);
+ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+@@ -610,7 +612,8 @@ struct btrfs_item_batch {
+ int nr;
+ };
+
+-void btrfs_setup_item_for_insert(struct btrfs_root *root,
++void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct btrfs_key *key,
+ u32 data_size);
+diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
+index 427abaf608b8c..0d105ed1b8def 100644
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -322,9 +322,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ } else {
+ if (current->journal_info)
+ flush = BTRFS_RESERVE_FLUSH_LIMIT;
+-
+- if (btrfs_transaction_in_commit(fs_info))
+- schedule_timeout(1);
+ }
+
+ num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 90aaedce1548a..16f9e5f474cca 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1030,7 +1030,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_inode_item);
+ write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
+ sizeof(struct btrfs_inode_item));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
+ goto out;
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index fff22ed55c428..fe6ba17a05099 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -442,7 +442,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
+ dev_replace->item_needs_writeback = 0;
+ up_write(&dev_replace->rwsem);
+
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+
+ out:
+ btrfs_free_path(path);
+diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
+index 082eb0e195981..9c07d5c3e5ad2 100644
+--- a/fs/btrfs/dir-item.c
++++ b/fs/btrfs/dir-item.c
+@@ -38,7 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
+ di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
+ if (di)
+ return ERR_PTR(-EEXIST);
+- btrfs_extend_item(path, data_size);
++ btrfs_extend_item(trans, path, data_size);
+ } else if (ret < 0)
+ return ERR_PTR(ret);
+ WARN_ON(ret > 0);
+@@ -93,7 +93,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
+
+ write_extent_buffer(leaf, name, name_ptr, name_len);
+ write_extent_buffer(leaf, data, data_ptr, data_len);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ return ret;
+ }
+@@ -153,7 +153,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
+ name_ptr = (unsigned long)(dir_item + 1);
+
+ write_extent_buffer(leaf, name->name, name_ptr, name->len);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ second_insert:
+ /* FIXME, use some real flag for selecting the extra index */
+@@ -439,7 +439,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
+ start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+ item_len - (ptr + sub_item_len - start));
+- btrfs_truncate_item(path, item_len - sub_item_len, 1);
++ btrfs_truncate_item(trans, path, item_len - sub_item_len, 1);
+ }
+ return ret;
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 68f60d50e1fd0..71efb6883f307 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -867,7 +867,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
+ }
+
+ root->node = leaf;
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ root->commit_root = btrfs_root_node(root);
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+@@ -942,7 +942,7 @@ int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
+
+ root->node = leaf;
+
+- btrfs_mark_buffer_dirty(root->node);
++ btrfs_mark_buffer_dirty(trans, root->node);
+ btrfs_tree_unlock(root->node);
+
+ return 0;
+@@ -3197,6 +3197,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
+ goto fail_alloc;
+ }
+
++ btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
+ /*
+ * Verify the type first, if that or the checksum value are
+ * corrupted, we'll find out
+@@ -4423,7 +4424,8 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ btrfs_close_devices(fs_info->fs_devices);
+ }
+
+-void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
++void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
++ struct extent_buffer *buf)
+ {
+ struct btrfs_fs_info *fs_info = buf->fs_info;
+ u64 transid = btrfs_header_generation(buf);
+@@ -4437,10 +4439,14 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
+ if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
+ return;
+ #endif
++ /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
++ ASSERT(trans->transid == fs_info->generation);
+ btrfs_assert_tree_write_locked(buf);
+- if (transid != fs_info->generation)
++ if (transid != fs_info->generation) {
+ WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
+ buf->start, transid, fs_info->generation);
++ btrfs_abort_transaction(trans, -EUCLEAN);
++ }
+ set_extent_buffer_dirty(buf);
+ #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+ /*
+diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
+index 02b645744a822..50dab8f639dcc 100644
+--- a/fs/btrfs/disk-io.h
++++ b/fs/btrfs/disk-io.h
+@@ -104,7 +104,8 @@ static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
+ }
+
+ void btrfs_put_root(struct btrfs_root *root);
+-void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
++void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
++ struct extent_buffer *buf);
+ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
+ int atomic);
+ int btrfs_read_extent_buffer(struct extent_buffer *buf,
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index fc313fce5bbdc..91fe57e87583c 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -575,7 +575,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
+ }
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ ret = 0;
+ fail:
+ btrfs_release_path(path);
+@@ -623,7 +623,7 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
+ else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
+ btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ return ret;
+ }
+@@ -976,7 +976,7 @@ out:
+ * helper to add new inline back ref
+ */
+ static noinline_for_stack
+-void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
++void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_extent_inline_ref *iref,
+ u64 parent, u64 root_objectid,
+@@ -999,7 +999,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
+ type = extent_ref_type(parent, owner);
+ size = btrfs_extent_inline_ref_size(type);
+
+- btrfs_extend_item(path, size);
++ btrfs_extend_item(trans, path, size);
+
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ refs = btrfs_extent_refs(leaf, ei);
+@@ -1033,7 +1033,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
+ } else {
+ btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+
+ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
+@@ -1066,7 +1066,9 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
+ /*
+ * helper to update/remove inline back ref
+ */
+-static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path,
++static noinline_for_stack int update_inline_extent_backref(
++ struct btrfs_trans_handle *trans,
++ struct btrfs_path *path,
+ struct btrfs_extent_inline_ref *iref,
+ int refs_to_mod,
+ struct btrfs_delayed_extent_op *extent_op)
+@@ -1174,9 +1176,9 @@ static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *pa
+ memmove_extent_buffer(leaf, ptr, ptr + size,
+ end - ptr - size);
+ item_size -= size;
+- btrfs_truncate_item(path, item_size, 1);
++ btrfs_truncate_item(trans, path, item_size, 1);
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ return 0;
+ }
+
+@@ -1206,9 +1208,10 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
+ bytenr, num_bytes, root_objectid, path->slots[0]);
+ return -EUCLEAN;
+ }
+- ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op);
++ ret = update_inline_extent_backref(trans, path, iref,
++ refs_to_add, extent_op);
+ } else if (ret == -ENOENT) {
+- setup_inline_extent_backref(trans->fs_info, path, iref, parent,
++ setup_inline_extent_backref(trans, path, iref, parent,
+ root_objectid, owner, offset,
+ refs_to_add, extent_op);
+ ret = 0;
+@@ -1226,7 +1229,8 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
+
+ BUG_ON(!is_data && refs_to_drop != 1);
+ if (iref)
+- ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
++ ret = update_inline_extent_backref(trans, path, iref,
++ -refs_to_drop, NULL);
+ else if (is_data)
+ ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
+ else
+@@ -1510,7 +1514,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ if (extent_op)
+ __run_delayed_extent_op(extent_op, leaf, item);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ /* now insert the actual backref */
+@@ -1678,7 +1682,7 @@ again:
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ __run_delayed_extent_op(extent_op, leaf, ei);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return err;
+@@ -3151,7 +3155,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+ }
+ } else {
+ btrfs_set_extent_refs(leaf, ei, refs);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ if (found_extent) {
+ ret = remove_extent_backref(trans, extent_root, path,
+@@ -4659,7 +4663,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
+ }
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_free_path(path);
+
+ return alloc_reserved_extent(trans, ins->objectid, ins->offset);
+@@ -4734,7 +4738,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
+ }
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_free_path(path);
+
+ return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index caccd0376342b..1530df88370ce 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -675,8 +675,8 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio)
+ * the array will be skipped
+ *
+ * Return: 0 if all pages were able to be allocated;
+- * -ENOMEM otherwise, and the caller is responsible for freeing all
+- * non-null page pointers in the array.
++ * -ENOMEM otherwise, the partially allocated pages would be freed and
++ * the array slots zeroed
+ */
+ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
+ {
+@@ -695,8 +695,13 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
+ * though alloc_pages_bulk_array() falls back to alloc_page()
+ * if it could not bulk-allocate. So we must be out of memory.
+ */
+- if (allocated == last)
++ if (allocated == last) {
++ for (int i = 0; i < allocated; i++) {
++ __free_page(page_array[i]);
++ page_array[i] = NULL;
++ }
+ return -ENOMEM;
++ }
+
+ memalloc_retry_wait(GFP_NOFS);
+ }
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 1ce5dd1544995..45cae356e89ba 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -194,7 +194,7 @@ int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_encryption(leaf, item, 0);
+ btrfs_set_file_extent_other_encoding(leaf, item, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -811,11 +811,12 @@ blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
+ * This calls btrfs_truncate_item with the correct args based on the overlap,
+ * and fixes up the key as required.
+ */
+-static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
++static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_key *key,
+ u64 bytenr, u64 len)
+ {
++ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct extent_buffer *leaf;
+ const u32 csum_size = fs_info->csum_size;
+ u64 csum_end;
+@@ -836,7 +837,7 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+ */
+ u32 new_size = (bytenr - key->offset) >> blocksize_bits;
+ new_size *= csum_size;
+- btrfs_truncate_item(path, new_size, 1);
++ btrfs_truncate_item(trans, path, new_size, 1);
+ } else if (key->offset >= bytenr && csum_end > end_byte &&
+ end_byte > key->offset) {
+ /*
+@@ -848,10 +849,10 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+ u32 new_size = (csum_end - end_byte) >> blocksize_bits;
+ new_size *= csum_size;
+
+- btrfs_truncate_item(path, new_size, 0);
++ btrfs_truncate_item(trans, path, new_size, 0);
+
+ key->offset = end_byte;
+- btrfs_set_item_key_safe(fs_info, path, key);
++ btrfs_set_item_key_safe(trans, path, key);
+ } else {
+ BUG();
+ }
+@@ -994,7 +995,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+
+ key.offset = end_byte - 1;
+ } else {
+- truncate_one_csum(fs_info, path, &key, bytenr, len);
++ truncate_one_csum(trans, path, &key, bytenr, len);
+ if (key.offset < bytenr)
+ break;
+ }
+@@ -1202,7 +1203,7 @@ extend_csum:
+ diff /= csum_size;
+ diff *= csum_size;
+
+- btrfs_extend_item(path, diff);
++ btrfs_extend_item(trans, path, diff);
+ ret = 0;
+ goto csum;
+ }
+@@ -1249,7 +1250,7 @@ found:
+ ins_size /= csum_size;
+ total_bytes += ins_size * fs_info->sectorsize;
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ if (total_bytes < sums->len) {
+ btrfs_release_path(path);
+ cond_resched();
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 361535c71c0f5..23a145ca94573 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -368,7 +368,7 @@ next_slot:
+ btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - args->start);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (update_refs && disk_bytenr > 0) {
+ btrfs_init_generic_ref(&ref,
+@@ -405,13 +405,13 @@ next_slot:
+
+ memcpy(&new_key, &key, sizeof(new_key));
+ new_key.offset = args->end;
+- btrfs_set_item_key_safe(fs_info, path, &new_key);
++ btrfs_set_item_key_safe(trans, path, &new_key);
+
+ extent_offset += args->end - key.offset;
+ btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - args->end);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ if (update_refs && disk_bytenr > 0)
+ args->bytes_found += args->end - key.offset;
+ break;
+@@ -431,7 +431,7 @@ next_slot:
+
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ args->start - key.offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ if (update_refs && disk_bytenr > 0)
+ args->bytes_found += extent_end - args->start;
+ if (args->end == extent_end)
+@@ -536,7 +536,8 @@ delete_extent_item:
+ if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
+ path->slots[0]++;
+ }
+- btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
++ btrfs_setup_item_for_insert(trans, root, path, &key,
++ args->extent_item_size);
+ args->extent_inserted = true;
+ }
+
+@@ -593,7 +594,6 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
+ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode, u64 start, u64 end)
+ {
+- struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *root = inode->root;
+ struct extent_buffer *leaf;
+ struct btrfs_path *path;
+@@ -664,7 +664,7 @@ again:
+ ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ new_key.offset = end;
+- btrfs_set_item_key_safe(fs_info, path, &new_key);
++ btrfs_set_item_key_safe(trans, path, &new_key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi,
+@@ -679,7 +679,7 @@ again:
+ trans->transid);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ end - other_start);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+ }
+@@ -698,7 +698,7 @@ again:
+ trans->transid);
+ path->slots[0]++;
+ new_key.offset = start;
+- btrfs_set_item_key_safe(fs_info, path, &new_key);
++ btrfs_set_item_key_safe(trans, path, &new_key);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+@@ -708,7 +708,7 @@ again:
+ other_end - start);
+ btrfs_set_file_extent_offset(leaf, fi,
+ start - orig_offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+ }
+@@ -742,7 +742,7 @@ again:
+ btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - split);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
+ num_bytes, 0);
+@@ -814,7 +814,7 @@ again:
+ btrfs_set_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ } else {
+ fi = btrfs_item_ptr(leaf, del_slot - 1,
+ struct btrfs_file_extent_item);
+@@ -823,7 +823,7 @@ again:
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - key.offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ if (ret < 0) {
+@@ -2104,7 +2104,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+
+@@ -2112,7 +2112,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ u64 num_bytes;
+
+ key.offset = offset;
+- btrfs_set_item_key_safe(fs_info, path, &key);
++ btrfs_set_item_key_safe(trans, path, &key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
+@@ -2121,7 +2121,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+ btrfs_release_path(path);
+@@ -2273,7 +2273,7 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
+ if (extent_info->is_new_extent)
+ btrfs_set_file_extent_generation(leaf, extent, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 27fad70451aad..8dd8ef760321e 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -195,7 +195,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
+ btrfs_set_inode_nlink(leaf, inode_item, 1);
+ btrfs_set_inode_transid(leaf, inode_item, trans->transid);
+ btrfs_set_inode_block_group(leaf, inode_item, offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ key.objectid = BTRFS_FREE_SPACE_OBJECTID;
+@@ -213,7 +213,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
+ struct btrfs_free_space_header);
+ memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
+ btrfs_set_free_space_key(leaf, header, &disk_key);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ return 0;
+@@ -1185,7 +1185,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
+ btrfs_set_free_space_entries(leaf, header, entries);
+ btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
+ btrfs_set_free_space_generation(leaf, header, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ return 0;
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index c0e734082dcc4..7b598b070700e 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -89,7 +89,7 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
+ struct btrfs_free_space_info);
+ btrfs_set_free_space_extent_count(leaf, info, 0);
+ btrfs_set_free_space_flags(leaf, info, 0);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = 0;
+ out:
+@@ -287,7 +287,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
+ btrfs_set_free_space_flags(leaf, info, flags);
+ expected_extent_count = btrfs_free_space_extent_count(leaf, info);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ if (extent_count != expected_extent_count) {
+@@ -324,7 +324,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ write_extent_buffer(leaf, bitmap_cursor, ptr,
+ data_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ i += extent_size;
+@@ -430,7 +430,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
+ flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
+ btrfs_set_free_space_flags(leaf, info, flags);
+ expected_extent_count = btrfs_free_space_extent_count(leaf, info);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
+@@ -495,7 +495,7 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
+
+ extent_count += new_extents;
+ btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+
+ if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
+@@ -533,7 +533,8 @@ int free_space_test_bit(struct btrfs_block_group *block_group,
+ return !!extent_buffer_test_bit(leaf, ptr, i);
+ }
+
+-static void free_space_set_bits(struct btrfs_block_group *block_group,
++static void free_space_set_bits(struct btrfs_trans_handle *trans,
++ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 *start, u64 *size,
+ int bit)
+ {
+@@ -563,7 +564,7 @@ static void free_space_set_bits(struct btrfs_block_group *block_group,
+ extent_buffer_bitmap_set(leaf, ptr, first, last - first);
+ else
+ extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ *size -= end - *start;
+ *start = end;
+@@ -656,7 +657,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
+ cur_start = start;
+ cur_size = size;
+ while (1) {
+- free_space_set_bits(block_group, path, &cur_start, &cur_size,
++ free_space_set_bits(trans, block_group, path, &cur_start, &cur_size,
+ !remove);
+ if (cur_size == 0)
+ break;
+diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
+index 4c322b720a80a..d3ff97374d48a 100644
+--- a/fs/btrfs/inode-item.c
++++ b/fs/btrfs/inode-item.c
+@@ -167,7 +167,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
+ memmove_extent_buffer(leaf, ptr, ptr + del_len,
+ item_size - (ptr + del_len - item_start));
+
+- btrfs_truncate_item(path, item_size - del_len, 1);
++ btrfs_truncate_item(trans, path, item_size - del_len, 1);
+
+ out:
+ btrfs_free_path(path);
+@@ -229,7 +229,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+ item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+ item_size - (ptr + sub_item_len - item_start));
+- btrfs_truncate_item(path, item_size - sub_item_len, 1);
++ btrfs_truncate_item(trans, path, item_size - sub_item_len, 1);
+ out:
+ btrfs_free_path(path);
+
+@@ -282,7 +282,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+ name))
+ goto out;
+
+- btrfs_extend_item(path, ins_len);
++ btrfs_extend_item(trans, path, ins_len);
+ ret = 0;
+ }
+ if (ret < 0)
+@@ -299,7 +299,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+
+ ptr = (unsigned long)&extref->name;
+ write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ out:
+ btrfs_free_path(path);
+@@ -338,7 +338,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ goto out;
+
+ old_size = btrfs_item_size(path->nodes[0], path->slots[0]);
+- btrfs_extend_item(path, ins_len);
++ btrfs_extend_item(trans, path, ins_len);
+ ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_ref);
+ ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
+@@ -364,7 +364,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ ptr = (unsigned long)(ref + 1);
+ }
+ write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ out:
+ btrfs_free_path(path);
+@@ -591,7 +591,7 @@ search_again:
+ num_dec = (orig_num_bytes - extent_num_bytes);
+ if (extent_start != 0)
+ control->sub_bytes += num_dec;
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ } else {
+ extent_num_bytes =
+ btrfs_file_extent_disk_num_bytes(leaf, fi);
+@@ -617,7 +617,7 @@ search_again:
+
+ btrfs_set_file_extent_ram_bytes(leaf, fi, size);
+ size = btrfs_file_extent_calc_inline_size(size);
+- btrfs_truncate_item(path, size, 1);
++ btrfs_truncate_item(trans, path, size, 1);
+ } else if (!del_item) {
+ /*
+ * We have to bail so the last_size is set to
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 7814b9d654ce1..c92c589b454d8 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -573,7 +573,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
+ kunmap_local(kaddr);
+ put_page(page);
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ /*
+@@ -2912,7 +2912,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+ sizeof(struct btrfs_file_extent_item));
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ /*
+@@ -3981,7 +3981,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
+ struct btrfs_inode_item);
+
+ fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_set_inode_last_trans(trans, inode);
+ ret = 0;
+ failed:
+@@ -6310,7 +6310,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
+ }
+ }
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ /*
+ * We don't need the path anymore, plus inheriting properties, adding
+ * ACLs, security xattrs, orphan item or adding the link, will result in
+@@ -6974,8 +6974,15 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
+ int ret;
+
+ alloc_hint = get_extent_allocation_hint(inode, start, len);
++again:
+ ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
+ 0, alloc_hint, &ins, 1, 1);
++ if (ret == -EAGAIN) {
++ ASSERT(btrfs_is_zoned(fs_info));
++ wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
++ TASK_UNINTERRUPTIBLE);
++ goto again;
++ }
+ if (ret)
+ return ERR_PTR(ret);
+
+@@ -9446,7 +9453,7 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
+
+ ptr = btrfs_file_extent_inline_start(ei);
+ write_extent_buffer(leaf, symname, ptr, name_len);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_free_path(path);
+
+ d_instantiate_new(dentry, inode);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 8e7d03bc1b565..0b120716aeb9c 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -663,7 +663,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ goto out;
+ }
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ inode_item = &root_item->inode;
+ btrfs_set_stack_inode_generation(inode_item, 1);
+@@ -1528,7 +1528,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
+ static noinline int copy_to_sk(struct btrfs_path *path,
+ struct btrfs_key *key,
+ struct btrfs_ioctl_search_key *sk,
+- size_t *buf_size,
++ u64 *buf_size,
+ char __user *ubuf,
+ unsigned long *sk_offset,
+ int *num_found)
+@@ -1660,7 +1660,7 @@ out:
+
+ static noinline int search_ioctl(struct inode *inode,
+ struct btrfs_ioctl_search_key *sk,
+- size_t *buf_size,
++ u64 *buf_size,
+ char __user *ubuf)
+ {
+ struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
+@@ -1733,7 +1733,7 @@ static noinline int btrfs_ioctl_tree_search(struct inode *inode,
+ struct btrfs_ioctl_search_args __user *uargs = argp;
+ struct btrfs_ioctl_search_key sk;
+ int ret;
+- size_t buf_size;
++ u64 buf_size;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+@@ -1763,8 +1763,8 @@ static noinline int btrfs_ioctl_tree_search_v2(struct inode *inode,
+ struct btrfs_ioctl_search_args_v2 __user *uarg = argp;
+ struct btrfs_ioctl_search_args_v2 args;
+ int ret;
+- size_t buf_size;
+- const size_t buf_limit = SZ_16M;
++ u64 buf_size;
++ const u64 buf_limit = SZ_16M;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+@@ -2947,7 +2947,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
+
+ btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
+ btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+
+ btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
+@@ -4351,6 +4351,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat
+ arg->clone_sources = compat_ptr(args32.clone_sources);
+ arg->parent_root = args32.parent_root;
+ arg->flags = args32.flags;
++ arg->version = args32.version;
+ memcpy(arg->reserved, args32.reserved,
+ sizeof(args32.reserved));
+ #else
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index b99230db3c820..bdaebb9fc6899 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -622,7 +622,7 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
+
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ btrfs_free_path(path);
+ return ret;
+@@ -700,7 +700,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
+ btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ btrfs_release_path(path);
+
+@@ -719,7 +719,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = 0;
+ out:
+@@ -808,7 +808,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
+ btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
+
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, l);
+
+ out:
+ btrfs_free_path(path);
+@@ -854,7 +854,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
+ btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
+
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, l);
+
+ out:
+ btrfs_free_path(path);
+@@ -896,7 +896,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
+ btrfs_set_qgroup_status_rescan(l, ptr,
+ fs_info->qgroup_rescan_progress.objectid);
+
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, l);
+
+ out:
+ btrfs_free_path(path);
+@@ -1069,7 +1069,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ BTRFS_QGROUP_STATUS_FLAGS_MASK);
+ btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ key.objectid = 0;
+ key.type = BTRFS_ROOT_REF_KEY;
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index 95d28497de7c2..e646662e61c6b 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -791,6 +791,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ dump_ref_action(fs_info, ra);
+ kfree(ref);
+ kfree(ra);
++ kfree(re);
+ goto out_unlock;
+ } else if (be->num_refs == 0) {
+ btrfs_err(fs_info,
+@@ -800,6 +801,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ dump_ref_action(fs_info, ra);
+ kfree(ref);
+ kfree(ra);
++ kfree(re);
+ goto out_unlock;
+ }
+
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index c6d4bb8cbe299..4eaac3ae5c365 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1181,7 +1181,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
+ }
+ }
+ if (dirty)
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ if (inode)
+ btrfs_add_delayed_iput(BTRFS_I(inode));
+ return ret;
+@@ -1374,13 +1374,13 @@ again:
+ */
+ btrfs_set_node_blockptr(parent, slot, new_bytenr);
+ btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+
+ btrfs_set_node_blockptr(path->nodes[level],
+ path->slots[level], old_bytenr);
+ btrfs_set_node_ptr_generation(path->nodes[level],
+ path->slots[level], old_ptr_gen);
+- btrfs_mark_buffer_dirty(path->nodes[level]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[level]);
+
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
+ blocksize, path->nodes[level]->start);
+@@ -2517,7 +2517,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
+ node->eb->start);
+ btrfs_set_node_ptr_generation(upper->eb, slot,
+ trans->transid);
+- btrfs_mark_buffer_dirty(upper->eb);
++ btrfs_mark_buffer_dirty(trans, upper->eb);
+
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
+ node->eb->start, blocksize,
+@@ -3835,7 +3835,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
+ btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
+ btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
+ BTRFS_INODE_PREALLOC);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return ret;
+diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
+index 859874579456f..5b0f1bccc409c 100644
+--- a/fs/btrfs/root-tree.c
++++ b/fs/btrfs/root-tree.c
+@@ -191,7 +191,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
+ btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
+
+ write_extent_buffer(l, item, ptr, sizeof(*item));
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -438,7 +438,7 @@ again:
+ btrfs_set_root_ref_name_len(leaf, ref, name->len);
+ ptr = (unsigned long)(ref + 1);
+ write_extent_buffer(leaf, name->name, ptr, name->len);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (key.type == BTRFS_ROOT_BACKREF_KEY) {
+ btrfs_release_path(path);
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index b877203f1dc5a..4445a52a07076 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -1798,6 +1798,9 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
+ */
+ ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
+
++ /* @found_logical_ret must be specified. */
++ ASSERT(found_logical_ret);
++
+ stripe = &sctx->stripes[sctx->cur_stripe];
+ scrub_reset_stripe(stripe);
+ ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
+@@ -1806,8 +1809,7 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
+ /* Either >0 as no more extents or <0 for error. */
+ if (ret)
+ return ret;
+- if (found_logical_ret)
+- *found_logical_ret = stripe->logical;
++ *found_logical_ret = stripe->logical;
+ sctx->cur_stripe++;
+
+ /* We filled one group, submit it. */
+@@ -2010,7 +2012,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+
+ /* Go through each extent items inside the logical range */
+ while (cur_logical < logical_end) {
+- u64 found_logical;
++ u64 found_logical = U64_MAX;
+ u64 cur_physical = physical + cur_logical - logical_start;
+
+ /* Canceled? */
+@@ -2045,6 +2047,8 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ if (ret < 0)
+ break;
+
++ /* queue_scrub_stripe() returned 0, @found_logical must be updated. */
++ ASSERT(found_logical != U64_MAX);
+ cur_logical = found_logical + BTRFS_STRIPE_LEN;
+
+ /* Don't hold CPU for too long time */
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 3a566150c531a..db94eefda27e2 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -8158,7 +8158,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
+ }
+
+ sctx->send_filp = fget(arg->send_fd);
+- if (!sctx->send_filp) {
++ if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ goto out;
+ }
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 1a093ec0f7e36..de0bfebce1269 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -79,7 +79,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data);
+
+ static void btrfs_put_super(struct super_block *sb)
+ {
+- close_ctree(btrfs_sb(sb));
++ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
++
++ btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
++ close_ctree(fs_info);
+ }
+
+ enum {
+diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
+index 5ef0b90e25c3b..6a43a64ba55ad 100644
+--- a/fs/btrfs/tests/extent-buffer-tests.c
++++ b/fs/btrfs/tests/extent-buffer-tests.c
+@@ -61,7 +61,11 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
+ key.type = BTRFS_EXTENT_CSUM_KEY;
+ key.offset = 0;
+
+- btrfs_setup_item_for_insert(root, path, &key, value_len);
++ /*
++ * Passing a NULL trans handle is fine here, we have a dummy root eb
++ * and the tree is a single node (level 0).
++ */
++ btrfs_setup_item_for_insert(NULL, root, path, &key, value_len);
+ write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
+ value_len);
+
+diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
+index 05b03f5eab83b..492d69d2fa737 100644
+--- a/fs/btrfs/tests/inode-tests.c
++++ b/fs/btrfs/tests/inode-tests.c
+@@ -34,7 +34,11 @@ static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = start;
+
+- btrfs_setup_item_for_insert(root, &path, &key, value_len);
++ /*
++ * Passing a NULL trans handle is fine here, we have a dummy root eb
++ * and the tree is a single node (level 0).
++ */
++ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
+ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi, 1);
+ btrfs_set_file_extent_type(leaf, fi, type);
+@@ -64,7 +68,11 @@ static void insert_inode_item_key(struct btrfs_root *root)
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+- btrfs_setup_item_for_insert(root, &path, &key, value_len);
++ /*
++ * Passing a NULL trans handle is fine here, we have a dummy root eb
++ * and the tree is a single node (level 0).
++ */
++ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
+ }
+
+ /*
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index cbb17b5421317..9fb64af608d12 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -504,9 +504,9 @@ insert:
+ found_size = btrfs_item_size(path->nodes[0],
+ path->slots[0]);
+ if (found_size > item_size)
+- btrfs_truncate_item(path, item_size, 1);
++ btrfs_truncate_item(trans, path, item_size, 1);
+ else if (found_size < item_size)
+- btrfs_extend_item(path, item_size - found_size);
++ btrfs_extend_item(trans, path, item_size - found_size);
+ } else if (ret) {
+ return ret;
+ }
+@@ -574,7 +574,7 @@ insert:
+ }
+ }
+ no_copy:
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+ return 0;
+ }
+@@ -3530,7 +3530,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
+ last_offset = max(last_offset, curr_end);
+ }
+ btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+ return 0;
+ }
+@@ -4488,7 +4488,7 @@ copy_item:
+ dst_index++;
+ }
+
+- btrfs_mark_buffer_dirty(dst_path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]);
+ btrfs_release_path(dst_path);
+ out:
+ kfree(ins_data);
+@@ -4693,7 +4693,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
+ write_extent_buffer(leaf, &fi,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+ sizeof(fi));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ btrfs_release_path(path);
+
+diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
+index 7c7001f42b14c..5be74f9e47ebf 100644
+--- a/fs/btrfs/uuid-tree.c
++++ b/fs/btrfs/uuid-tree.c
+@@ -124,7 +124,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ * An item with that type already exists.
+ * Extend the item and store the new subid at the end.
+ */
+- btrfs_extend_item(path, sizeof(subid_le));
++ btrfs_extend_item(trans, path, sizeof(subid_le));
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ offset = btrfs_item_ptr_offset(eb, slot);
+@@ -139,7 +139,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ ret = 0;
+ subid_le = cpu_to_le64(subid_cpu);
+ write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+
+ out:
+ btrfs_free_path(path);
+@@ -221,7 +221,7 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ move_src = offset + sizeof(subid);
+ move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
+ memmove_extent_buffer(eb, move_dst, move_src, move_len);
+- btrfs_truncate_item(path, item_size - sizeof(subid), 1);
++ btrfs_truncate_item(trans, path, item_size - sizeof(subid), 1);
+
+ out:
+ btrfs_free_path(path);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index b9ef6f54635ca..722a1dde75636 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1894,7 +1894,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
+ ptr = btrfs_device_fsid(dev_item);
+ write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
+ ptr, BTRFS_FSID_SIZE);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = 0;
+ out:
+@@ -2597,7 +2597,7 @@ next_slot:
+ if (device->fs_devices->seeding) {
+ btrfs_set_device_generation(leaf, dev_item,
+ device->generation);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+
+ path->slots[0]++;
+@@ -2895,7 +2895,7 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
+ btrfs_device_get_disk_total_bytes(device));
+ btrfs_set_device_bytes_used(leaf, dev_item,
+ btrfs_device_get_bytes_used(device));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ out:
+ btrfs_free_path(path);
+@@ -3045,15 +3045,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
+ read_unlock(&em_tree->lock);
+
+ if (!em) {
+- btrfs_crit(fs_info, "unable to find logical %llu length %llu",
++ btrfs_crit(fs_info,
++ "unable to find chunk map for logical %llu length %llu",
+ logical, length);
+ return ERR_PTR(-EINVAL);
+ }
+
+- if (em->start > logical || em->start + em->len < logical) {
++ if (em->start > logical || em->start + em->len <= logical) {
+ btrfs_crit(fs_info,
+- "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
+- logical, length, em->start, em->start + em->len);
++ "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
++ logical, logical + length, em->start, em->start + em->len);
+ free_extent_map(em);
+ return ERR_PTR(-EINVAL);
+ }
+@@ -3483,7 +3484,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
+
+ btrfs_set_balance_flags(leaf, item, bctl->flags);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ err = btrfs_commit_transaction(trans);
+@@ -7534,7 +7535,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
+ for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+ btrfs_set_dev_stats_value(eb, ptr, i,
+ btrfs_dev_stat_read(device, i));
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+
+ out:
+ btrfs_free_path(path);
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index 96828a13dd43d..b906f809650ef 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -188,15 +188,15 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
+ if (old_data_len + name_len + sizeof(*di) == item_size) {
+ /* No other xattrs packed in the same leaf item. */
+ if (size > old_data_len)
+- btrfs_extend_item(path, size - old_data_len);
++ btrfs_extend_item(trans, path, size - old_data_len);
+ else if (size < old_data_len)
+- btrfs_truncate_item(path, data_size, 1);
++ btrfs_truncate_item(trans, path, data_size, 1);
+ } else {
+ /* There are other xattrs packed in the same item. */
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ if (ret)
+ goto out;
+- btrfs_extend_item(path, data_size);
++ btrfs_extend_item(trans, path, data_size);
+ }
+
+ ptr = btrfs_item_ptr(leaf, slot, char);
+@@ -205,7 +205,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
+ btrfs_set_dir_data_len(leaf, di, size);
+ data_ptr = ((unsigned long)(di + 1)) + name_len;
+ write_extent_buffer(leaf, value, data_ptr, size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ } else {
+ /*
+ * Insert, and we had space for the xattr, so path->slots[0] is
+diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
+index 87b3753aa4b1e..c45e8c2d62e11 100644
+--- a/fs/debugfs/file.c
++++ b/fs/debugfs/file.c
+@@ -939,7 +939,7 @@ static ssize_t debugfs_write_file_str(struct file *file, const char __user *user
+ new[pos + count] = '\0';
+ strim(new);
+
+- rcu_assign_pointer(*(char **)file->private_data, new);
++ rcu_assign_pointer(*(char __rcu **)file->private_data, new);
+ synchronize_rcu();
+ kfree(old);
+
+diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
+index 5aabcb6f0f157..c93359ceaae61 100644
+--- a/fs/dlm/debug_fs.c
++++ b/fs/dlm/debug_fs.c
+@@ -973,7 +973,8 @@ void dlm_delete_debug_comms_file(void *ctx)
+
+ void dlm_create_debug_file(struct dlm_ls *ls)
+ {
+- char name[DLM_LOCKSPACE_LEN + 8];
++ /* Reserve enough space for the longest file name */
++ char name[DLM_LOCKSPACE_LEN + sizeof("_queued_asts")];
+
+ /* format 1 */
+
+@@ -986,7 +987,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
+ /* format 2 */
+
+ memset(name, 0, sizeof(name));
+- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_locks", ls->ls_name);
++ snprintf(name, sizeof(name), "%s_locks", ls->ls_name);
+
+ ls->ls_debug_locks_dentry = debugfs_create_file(name,
+ 0644,
+@@ -997,7 +998,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
+ /* format 3 */
+
+ memset(name, 0, sizeof(name));
+- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_all", ls->ls_name);
++ snprintf(name, sizeof(name), "%s_all", ls->ls_name);
+
+ ls->ls_debug_all_dentry = debugfs_create_file(name,
+ S_IFREG | S_IRUGO,
+@@ -1008,7 +1009,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
+ /* format 4 */
+
+ memset(name, 0, sizeof(name));
+- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_toss", ls->ls_name);
++ snprintf(name, sizeof(name), "%s_toss", ls->ls_name);
+
+ ls->ls_debug_toss_dentry = debugfs_create_file(name,
+ S_IFREG | S_IRUGO,
+@@ -1017,7 +1018,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
+ &format4_fops);
+
+ memset(name, 0, sizeof(name));
+- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_waiters", ls->ls_name);
++ snprintf(name, sizeof(name), "%s_waiters", ls->ls_name);
+
+ ls->ls_debug_waiters_dentry = debugfs_create_file(name,
+ 0644,
+@@ -1028,7 +1029,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
+ /* format 5 */
+
+ memset(name, 0, sizeof(name));
+- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_queued_asts", ls->ls_name);
++ snprintf(name, sizeof(name), "%s_queued_asts", ls->ls_name);
+
+ ls->ls_debug_queued_asts_dentry = debugfs_create_file(name,
+ 0644,
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index f641b36a36db0..2247ebb61be1e 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -337,13 +337,21 @@ static struct midcomms_node *nodeid2node(int nodeid)
+
+ int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
+ {
+- int ret, r = nodeid_hash(nodeid);
++ int ret, idx, r = nodeid_hash(nodeid);
+ struct midcomms_node *node;
+
+ ret = dlm_lowcomms_addr(nodeid, addr, len);
+ if (ret)
+ return ret;
+
++ idx = srcu_read_lock(&nodes_srcu);
++ node = __find_node(nodeid, r);
++ if (node) {
++ srcu_read_unlock(&nodes_srcu, idx);
++ return 0;
++ }
++ srcu_read_unlock(&nodes_srcu, idx);
++
+ node = kmalloc(sizeof(*node), GFP_NOFS);
+ if (!node)
+ return -ENOMEM;
+@@ -1030,15 +1038,15 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
+
+ break;
+ case DLM_VERSION_3_2:
++ /* send ack back if necessary */
++ dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
++
+ msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation,
+ ppc);
+ if (!msg) {
+ dlm_free_mhandle(mh);
+ goto err;
+ }
+-
+- /* send ack back if necessary */
+- dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
+ break;
+ default:
+ dlm_free_mhandle(mh);
+@@ -1260,12 +1268,23 @@ void dlm_midcomms_remove_member(int nodeid)
+
+ idx = srcu_read_lock(&nodes_srcu);
+ node = nodeid2node(nodeid);
+- if (WARN_ON_ONCE(!node)) {
++ /* in case of dlm_midcomms_close() removes node */
++ if (!node) {
+ srcu_read_unlock(&nodes_srcu, idx);
+ return;
+ }
+
+ spin_lock(&node->state_lock);
++ /* case of dlm_midcomms_addr() created node but
++ * was not added before because dlm_midcomms_close()
++ * removed the node
++ */
++ if (!node->users) {
++ spin_unlock(&node->state_lock);
++ srcu_read_unlock(&nodes_srcu, idx);
++ return;
++ }
++
+ node->users--;
+ pr_debug("node %d users dec count %d\n", nodeid, node->users);
+
+@@ -1386,10 +1405,16 @@ void dlm_midcomms_shutdown(void)
+ midcomms_shutdown(node);
+ }
+ }
+- srcu_read_unlock(&nodes_srcu, idx);
+- mutex_unlock(&close_lock);
+
+ dlm_lowcomms_shutdown();
++
++ for (i = 0; i < CONN_HASH_SIZE; i++) {
++ hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
++ midcomms_node_reset(node);
++ }
++ }
++ srcu_read_unlock(&nodes_srcu, idx);
++ mutex_unlock(&close_lock);
+ }
+
+ int dlm_midcomms_close(int nodeid)
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 992d9c7e64ae6..5ab4b87888a79 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -998,6 +998,14 @@ static int ecryptfs_getattr_link(struct mnt_idmap *idmap,
+ return rc;
+ }
+
++static int ecryptfs_do_getattr(const struct path *path, struct kstat *stat,
++ u32 request_mask, unsigned int flags)
++{
++ if (flags & AT_GETATTR_NOSEC)
++ return vfs_getattr_nosec(path, stat, request_mask, flags);
++ return vfs_getattr(path, stat, request_mask, flags);
++}
++
+ static int ecryptfs_getattr(struct mnt_idmap *idmap,
+ const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
+@@ -1006,8 +1014,8 @@ static int ecryptfs_getattr(struct mnt_idmap *idmap,
+ struct kstat lower_stat;
+ int rc;
+
+- rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat,
+- request_mask, flags);
++ rc = ecryptfs_do_getattr(ecryptfs_dentry_to_lower_path(dentry),
++ &lower_stat, request_mask, flags);
+ if (!rc) {
+ fsstack_copy_attr_all(d_inode(dentry),
+ ecryptfs_inode_to_lower(d_inode(dentry)));
+diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
+index cc6fb9e988991..4256a85719a1d 100644
+--- a/fs/erofs/utils.c
++++ b/fs/erofs/utils.c
+@@ -77,12 +77,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ struct erofs_workgroup *pre;
+
+- /*
+- * Bump up before making this visible to others for the XArray in order
+- * to avoid potential UAF without serialized by xa_lock.
+- */
+- lockref_get(&grp->lockref);
+-
++ DBG_BUGON(grp->lockref.count < 1);
+ repeat:
+ xa_lock(&sbi->managed_pslots);
+ pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
+@@ -96,7 +91,6 @@ repeat:
+ cond_resched();
+ goto repeat;
+ }
+- lockref_put_return(&grp->lockref);
+ grp = pre;
+ }
+ xa_unlock(&sbi->managed_pslots);
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 036f610e044b6..a7e6847f6f8f1 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -796,6 +796,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ return PTR_ERR(pcl);
+
+ spin_lock_init(&pcl->obj.lockref.lock);
++ pcl->obj.lockref.count = 1; /* one ref for this request */
+ pcl->algorithmformat = map->m_algorithmformat;
+ pcl->length = 0;
+ pcl->partial = true;
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index 1b9f587f6cca5..95c51b025b917 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -351,14 +351,20 @@ static int exfat_find_empty_entry(struct inode *inode,
+ if (exfat_check_max_dentries(inode))
+ return -ENOSPC;
+
+- /* we trust p_dir->size regardless of FAT type */
+- if (exfat_find_last_cluster(sb, p_dir, &last_clu))
+- return -EIO;
+-
+ /*
+ * Allocate new cluster to this directory
+ */
+- exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
++ if (ei->start_clu != EXFAT_EOF_CLUSTER) {
++ /* we trust p_dir->size regardless of FAT type */
++ if (exfat_find_last_cluster(sb, p_dir, &last_clu))
++ return -EIO;
++
++ exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
++ } else {
++ /* This directory is empty */
++ exfat_chain_set(&clu, EXFAT_EOF_CLUSTER, 0,
++ ALLOC_NO_FAT_CHAIN);
++ }
+
+ /* allocate a cluster */
+ ret = exfat_alloc_cluster(inode, 1, &clu, IS_DIRSYNC(inode));
+@@ -368,6 +374,11 @@ static int exfat_find_empty_entry(struct inode *inode,
+ if (exfat_zeroed_cluster(inode, clu.dir))
+ return -EIO;
+
++ if (ei->start_clu == EXFAT_EOF_CLUSTER) {
++ ei->start_clu = clu.dir;
++ p_dir->dir = clu.dir;
++ }
++
+ /* append to the FAT chain */
+ if (clu.flags != p_dir->flags) {
+ /* no-fat-chain bit is disabled,
+@@ -645,7 +656,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
+ info->type = exfat_get_entry_type(ep);
+ info->attr = le16_to_cpu(ep->dentry.file.attr);
+ info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
+- if ((info->type == TYPE_FILE) && (info->size == 0)) {
++ if (info->size == 0) {
+ info->flags = ALLOC_NO_FAT_CHAIN;
+ info->start_clu = EXFAT_EOF_CLUSTER;
+ } else {
+@@ -888,6 +899,9 @@ static int exfat_check_dir_empty(struct super_block *sb,
+
+ dentries_per_clu = sbi->dentries_per_clu;
+
++ if (p_dir->dir == EXFAT_EOF_CLUSTER)
++ return 0;
++
+ exfat_chain_dup(&clu, p_dir);
+
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+@@ -1255,7 +1269,8 @@ static int __exfat_rename(struct inode *old_parent_inode,
+ }
+
+ /* Free the clusters if new_inode is a dir(as if exfat_rmdir) */
+- if (new_entry_type == TYPE_DIR) {
++ if (new_entry_type == TYPE_DIR &&
++ new_ei->start_clu != EXFAT_EOF_CLUSTER) {
+ /* new_ei, new_clu_to_free */
+ struct exfat_chain new_clu_to_free;
+
+diff --git a/fs/ext2/file.c b/fs/ext2/file.c
+index 1039e5bf90afd..4ddc36f4dbd40 100644
+--- a/fs/ext2/file.c
++++ b/fs/ext2/file.c
+@@ -258,7 +258,6 @@ static ssize_t ext2_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ goto out_unlock;
+ }
+
+- iocb->ki_pos += status;
+ ret += status;
+ endbyte = pos + status - 1;
+ ret2 = filemap_write_and_wait_range(inode->i_mapping, pos,
+diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
+index 0c5a79c3b5d48..ef4c19e5f5706 100644
+--- a/fs/ext4/acl.h
++++ b/fs/ext4/acl.h
+@@ -68,6 +68,11 @@ extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
+ static inline int
+ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
+ {
++ /* usually, the umask is applied by posix_acl_create(), but if
++ ext4 ACL support is disabled at compile time, we need to do
++ it here, because posix_acl_create() will never be called */
++ inode->i_mode &= ~current_umask();
++
+ return 0;
+ }
+ #endif /* CONFIG_EXT4_FS_POSIX_ACL */
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 9418359b1d9d3..cd4ccae1e28a1 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1676,7 +1676,8 @@ struct ext4_sb_info {
+
+ /*
+ * Barrier between writepages ops and changing any inode's JOURNAL_DATA
+- * or EXTENTS flag.
++ * or EXTENTS flag or between writepages ops and changing DELALLOC or
++ * DIOREAD_NOLOCK mount options on remount.
+ */
+ struct percpu_rw_semaphore s_writepages_rwsem;
+ struct dax_device *s_daxdev;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 202c76996b621..4d8496d1a8ac4 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1010,6 +1010,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ ix = curp->p_idx;
+ }
+
++ if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
++ EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
++ return -EFSCORRUPTED;
++ }
++
+ len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
+ BUG_ON(len < 0);
+ if (len > 0) {
+@@ -1019,11 +1024,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
+ }
+
+- if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
+- EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
+- return -EFSCORRUPTED;
+- }
+-
+ ix->ei_block = cpu_to_le32(logical);
+ ext4_idx_store_pblock(ix, ptr);
+ le16_add_cpu(&curp->p_hdr->eh_entries, 1);
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 6f7de14c0fa86..f4b50652f0cce 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -152,8 +152,9 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+ struct ext4_inode_info *locked_ei);
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len);
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len,
++ struct pending_reservation **prealloc);
+
+ int __init ext4_init_es(void)
+ {
+@@ -448,6 +449,19 @@ static void ext4_es_list_del(struct inode *inode)
+ spin_unlock(&sbi->s_es_lock);
+ }
+
++static inline struct pending_reservation *__alloc_pending(bool nofail)
++{
++ if (!nofail)
++ return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
++
++ return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
++}
++
++static inline void __free_pending(struct pending_reservation *pr)
++{
++ kmem_cache_free(ext4_pending_cachep, pr);
++}
++
+ /*
+ * Returns true if we cannot fail to allocate memory for this extent_status
+ * entry and cannot reclaim it until its status changes.
+@@ -836,11 +850,12 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ {
+ struct extent_status newes;
+ ext4_lblk_t end = lblk + len - 1;
+- int err1 = 0;
+- int err2 = 0;
++ int err1 = 0, err2 = 0, err3 = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
++ struct pending_reservation *pr = NULL;
++ bool revise_pending = false;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+@@ -868,11 +883,17 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+
+ ext4_es_insert_extent_check(inode, &newes);
+
++ revise_pending = sbi->s_cluster_ratio > 1 &&
++ test_opt(inode->i_sb, DELALLOC) &&
++ (status & (EXTENT_STATUS_WRITTEN |
++ EXTENT_STATUS_UNWRITTEN));
+ retry:
+ if (err1 && !es1)
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
++ if ((err1 || err2 || err3) && revise_pending && !pr)
++ pr = __alloc_pending(true);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+@@ -897,13 +918,18 @@ retry:
+ es2 = NULL;
+ }
+
+- if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+- (status & EXTENT_STATUS_WRITTEN ||
+- status & EXTENT_STATUS_UNWRITTEN))
+- __revise_pending(inode, lblk, len);
++ if (revise_pending) {
++ err3 = __revise_pending(inode, lblk, len, &pr);
++ if (err3 != 0)
++ goto error;
++ if (pr) {
++ __free_pending(pr);
++ pr = NULL;
++ }
++ }
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+- if (err1 || err2)
++ if (err1 || err2 || err3)
+ goto retry;
+
+ ext4_es_print_tree(inode);
+@@ -1311,7 +1337,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+ rc->ndelonly--;
+ node = rb_next(&pr->rb_node);
+ rb_erase(&pr->rb_node, &tree->root);
+- kmem_cache_free(ext4_pending_cachep, pr);
++ __free_pending(pr);
+ if (!node)
+ break;
+ pr = rb_entry(node, struct pending_reservation,
+@@ -1405,8 +1431,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ }
+ }
+ if (count_reserved)
+- count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
+- &orig_es, &rc);
++ count_rsvd(inode, orig_es.es_lblk + len1,
++ orig_es.es_len - len1 - len2, &orig_es, &rc);
+ goto out_get_reserved;
+ }
+
+@@ -1907,11 +1933,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the cluster to be added
++ * @prealloc - preallocated pending entry
+ *
+ * Returns 0 on successful insertion and -ENOMEM on failure. If the
+ * pending reservation is already in the set, returns successfully.
+ */
+-static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
++static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
++ struct pending_reservation **prealloc)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
+@@ -1937,10 +1965,15 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+ }
+ }
+
+- pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+- if (pr == NULL) {
+- ret = -ENOMEM;
+- goto out;
++ if (likely(*prealloc == NULL)) {
++ pr = __alloc_pending(false);
++ if (!pr) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ } else {
++ pr = *prealloc;
++ *prealloc = NULL;
+ }
+ pr->lclu = lclu;
+
+@@ -1970,7 +2003,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
+ if (pr != NULL) {
+ tree = &EXT4_I(inode)->i_pending_tree;
+ rb_erase(&pr->rb_node, &tree->root);
+- kmem_cache_free(ext4_pending_cachep, pr);
++ __free_pending(pr);
+ }
+ }
+
+@@ -2029,10 +2062,10 @@ void ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ bool allocated)
+ {
+ struct extent_status newes;
+- int err1 = 0;
+- int err2 = 0;
++ int err1 = 0, err2 = 0, err3 = 0;
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
++ struct pending_reservation *pr = NULL;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+@@ -2052,6 +2085,8 @@ retry:
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
++ if ((err1 || err2 || err3) && allocated && !pr)
++ pr = __alloc_pending(true);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
+@@ -2074,11 +2109,18 @@ retry:
+ es2 = NULL;
+ }
+
+- if (allocated)
+- __insert_pending(inode, lblk);
++ if (allocated) {
++ err3 = __insert_pending(inode, lblk, &pr);
++ if (err3 != 0)
++ goto error;
++ if (pr) {
++ __free_pending(pr);
++ pr = NULL;
++ }
++ }
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+- if (err1 || err2)
++ if (err1 || err2 || err3)
+ goto retry;
+
+ ext4_es_print_tree(inode);
+@@ -2184,21 +2226,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
+ * @inode - file containing the range
+ * @lblk - logical block defining the start of range
+ * @len - length of range in blocks
++ * @prealloc - preallocated pending entry
+ *
+ * Used after a newly allocated extent is added to the extents status tree.
+ * Requires that the extents in the range have either written or unwritten
+ * status. Must be called while holding i_es_lock.
+ */
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len)
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len,
++ struct pending_reservation **prealloc)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t end = lblk + len - 1;
+ ext4_lblk_t first, last;
+ bool f_del = false, l_del = false;
++ int ret = 0;
+
+ if (len == 0)
+- return;
++ return 0;
+
+ /*
+ * Two cases - block range within single cluster and block range
+@@ -2219,7 +2264,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+ if (f_del) {
+- __insert_pending(inode, first);
++ ret = __insert_pending(inode, first, prealloc);
++ if (ret < 0)
++ goto out;
+ } else {
+ last = EXT4_LBLK_CMASK(sbi, end) +
+ sbi->s_cluster_ratio - 1;
+@@ -2227,9 +2274,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ l_del = __es_scan_range(inode,
+ &ext4_es_is_delonly,
+ end + 1, last);
+- if (l_del)
+- __insert_pending(inode, last);
+- else
++ if (l_del) {
++ ret = __insert_pending(inode, last, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, last);
+ }
+ } else {
+@@ -2237,18 +2286,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ if (first != lblk)
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+- if (f_del)
+- __insert_pending(inode, first);
+- else
++ if (f_del) {
++ ret = __insert_pending(inode, first, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, first);
+
+ last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
+ if (last != end)
+ l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ end + 1, last);
+- if (l_del)
+- __insert_pending(inode, last);
+- else
++ if (l_del) {
++ ret = __insert_pending(inode, last, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, last);
+ }
++out:
++ return ret;
+ }
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 6830ea3a6c59c..0166bb9ca160b 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -306,80 +306,38 @@ out:
+ }
+
+ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
+- ssize_t written, size_t count)
++ ssize_t count)
+ {
+ handle_t *handle;
+- bool truncate = false;
+- u8 blkbits = inode->i_blkbits;
+- ext4_lblk_t written_blk, end_blk;
+- int ret;
+-
+- /*
+- * Note that EXT4_I(inode)->i_disksize can get extended up to
+- * inode->i_size while the I/O was running due to writeback of delalloc
+- * blocks. But, the code in ext4_iomap_alloc() is careful to use
+- * zeroed/unwritten extents if this is possible; thus we won't leave
+- * uninitialized blocks in a file even if we didn't succeed in writing
+- * as much as we intended.
+- */
+- WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
+- if (offset + count <= EXT4_I(inode)->i_disksize) {
+- /*
+- * We need to ensure that the inode is removed from the orphan
+- * list if it has been added prematurely, due to writeback of
+- * delalloc blocks.
+- */
+- if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
+- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+-
+- if (IS_ERR(handle)) {
+- ext4_orphan_del(NULL, inode);
+- return PTR_ERR(handle);
+- }
+-
+- ext4_orphan_del(handle, inode);
+- ext4_journal_stop(handle);
+- }
+-
+- return written;
+- }
+-
+- if (written < 0)
+- goto truncate;
+
++ lockdep_assert_held_write(&inode->i_rwsem);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+- if (IS_ERR(handle)) {
+- written = PTR_ERR(handle);
+- goto truncate;
+- }
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
+
+- if (ext4_update_inode_size(inode, offset + written)) {
+- ret = ext4_mark_inode_dirty(handle, inode);
++ if (ext4_update_inode_size(inode, offset + count)) {
++ int ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret)) {
+- written = ret;
+ ext4_journal_stop(handle);
+- goto truncate;
++ return ret;
+ }
+ }
+
+- /*
+- * We may need to truncate allocated but not written blocks beyond EOF.
+- */
+- written_blk = ALIGN(offset + written, 1 << blkbits);
+- end_blk = ALIGN(offset + count, 1 << blkbits);
+- if (written_blk < end_blk && ext4_can_truncate(inode))
+- truncate = true;
+-
+- /*
+- * Remove the inode from the orphan list if it has been extended and
+- * everything went OK.
+- */
+- if (!truncate && inode->i_nlink)
++ if (inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+ ext4_journal_stop(handle);
+
+- if (truncate) {
+-truncate:
++ return count;
++}
++
++/*
++ * Clean up the inode after DIO or DAX extending write has completed and the
++ * inode size has been updated using ext4_handle_inode_extension().
++ */
++static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count)
++{
++ lockdep_assert_held_write(&inode->i_rwsem);
++ if (count < 0) {
+ ext4_truncate_failed_write(inode);
+ /*
+ * If the truncate operation failed early, then the inode may
+@@ -388,9 +346,28 @@ truncate:
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
++ return;
+ }
++ /*
++ * If i_disksize got extended due to writeback of delalloc blocks while
++ * the DIO was running we could fail to cleanup the orphan list in
++ * ext4_handle_inode_extension(). Do it now.
++ */
++ if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
++ handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+
+- return written;
++ if (IS_ERR(handle)) {
++ /*
++ * The write has successfully completed. Not much to
++ * do with the error here so just cleanup the orphan
++ * list and hope for the best.
++ */
++ ext4_orphan_del(NULL, inode);
++ return;
++ }
++ ext4_orphan_del(handle, inode);
++ ext4_journal_stop(handle);
++ }
+ }
+
+ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+@@ -399,31 +376,22 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ loff_t pos = iocb->ki_pos;
+ struct inode *inode = file_inode(iocb->ki_filp);
+
++ if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
++ error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
+ if (error)
+ return error;
+-
+- if (size && flags & IOMAP_DIO_UNWRITTEN) {
+- error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
+- if (error < 0)
+- return error;
+- }
+ /*
+- * If we are extending the file, we have to update i_size here before
+- * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
+- * buffered reads could zero out too much from page cache pages. Update
+- * of on-disk size will happen later in ext4_dio_write_iter() where
+- * we have enough information to also perform orphan list handling etc.
+- * Note that we perform all extending writes synchronously under
+- * i_rwsem held exclusively so i_size update is safe here in that case.
+- * If the write was not extending, we cannot see pos > i_size here
+- * because operations reducing i_size like truncate wait for all
+- * outstanding DIO before updating i_size.
++ * Note that EXT4_I(inode)->i_disksize can get extended up to
++ * inode->i_size while the I/O was running due to writeback of delalloc
++ * blocks. But the code in ext4_iomap_alloc() is careful to use
++ * zeroed/unwritten extents if this is possible; thus we won't leave
++ * uninitialized blocks in a file even if we didn't succeed in writing
++ * as much as we intended.
+ */
+- pos += size;
+- if (pos > i_size_read(inode))
+- i_size_write(inode, pos);
+-
+- return 0;
++ WARN_ON_ONCE(i_size_read(inode) < READ_ONCE(EXT4_I(inode)->i_disksize));
++ if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize))
++ return size;
++ return ext4_handle_inode_extension(inode, pos, size);
+ }
+
+ static const struct iomap_dio_ops ext4_dio_write_ops = {
+@@ -569,18 +537,20 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ return ext4_buffered_write_iter(iocb, from);
+ }
+
++ /*
++ * Prevent inline data from being created since we are going to allocate
++ * blocks for DIO. We know the inode does not currently have inline data
++ * because ext4_should_use_dio() checked for it, but we have to clear
++ * the state flag before the write checks because a lock cycle could
++ * introduce races with other writers.
++ */
++ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
++
+ ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend,
+ &unwritten, &dio_flags);
+ if (ret <= 0)
+ return ret;
+
+- /*
+- * Make sure inline data cannot be created anymore since we are going
+- * to allocate blocks for DIO. We know the inode does not have any
+- * inline data now because ext4_dio_supported() checked for that.
+- */
+- ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+-
+ offset = iocb->ki_pos;
+ count = ret;
+
+@@ -606,9 +576,16 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ dio_flags, NULL, 0);
+ if (ret == -ENOTBLK)
+ ret = 0;
+-
+- if (extend)
+- ret = ext4_handle_inode_extension(inode, offset, ret, count);
++ if (extend) {
++ /*
++ * We always perform extending DIO write synchronously so by
++ * now the IO is completed and ext4_handle_inode_extension()
++ * was called. Cleanup the inode in case of error or race with
++ * writeback of delalloc blocks.
++ */
++ WARN_ON_ONCE(ret == -EIOCBQUEUED);
++ ext4_inode_extension_cleanup(inode, ret);
++ }
+
+ out:
+ if (ilock_shared)
+@@ -689,8 +666,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+
+ ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+
+- if (extend)
+- ret = ext4_handle_inode_extension(inode, offset, ret, count);
++ if (extend) {
++ ret = ext4_handle_inode_extension(inode, offset, ret);
++ ext4_inode_extension_cleanup(inode, ret);
++ }
+ out:
+ inode_unlock(inode);
+ if (ret > 0)
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 4ce35f1c8b0a8..d7732320431ac 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -789,10 +789,22 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
+ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+ {
++ int ret = 0;
++
+ ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
+ inode->i_ino, create);
+- return _ext4_get_block(inode, iblock, bh_result,
++ ret = _ext4_get_block(inode, iblock, bh_result,
+ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
++
++ /*
++ * If the buffer is marked unwritten, mark it as new to make sure it is
++ * zeroed out correctly in case of partial writes. Otherwise, there is
++ * a chance of stale data getting exposed.
++ */
++ if (ret == 0 && buffer_unwritten(bh_result))
++ set_buffer_new(bh_result);
++
++ return ret;
+ }
+
+ /* Maximum number of blocks we map for direct IO at once. */
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 1e599305d85fa..a7b8558c0d093 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -417,8 +417,6 @@ static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
+
+ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+ ext4_group_t group);
+-static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+- ext4_group_t group);
+ static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
+
+ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
+@@ -1361,17 +1359,17 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+ * We place the buddy block and bitmap block
+ * close together
+ */
++ grinfo = ext4_get_group_info(sb, group);
++ if (!grinfo) {
++ err = -EFSCORRUPTED;
++ goto out;
++ }
+ if ((first_block + i) & 1) {
+ /* this is block of buddy */
+ BUG_ON(incore == NULL);
+ mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
+ group, page->index, i * blocksize);
+ trace_ext4_mb_buddy_bitmap_load(sb, group);
+- grinfo = ext4_get_group_info(sb, group);
+- if (!grinfo) {
+- err = -EFSCORRUPTED;
+- goto out;
+- }
+ grinfo->bb_fragments = 0;
+ memset(grinfo->bb_counters, 0,
+ sizeof(*grinfo->bb_counters) *
+@@ -1398,7 +1396,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+
+ /* mark all preallocated blks used in in-core bitmap */
+ ext4_mb_generate_from_pa(sb, data, group);
+- ext4_mb_generate_from_freelist(sb, data, group);
++ WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
+ ext4_unlock_group(sb, group);
+
+ /* set incore so that the buddy information can be
+@@ -4958,31 +4956,6 @@ try_group_pa:
+ return false;
+ }
+
+-/*
+- * the function goes through all block freed in the group
+- * but not yet committed and marks them used in in-core bitmap.
+- * buddy must be generated from this bitmap
+- * Need to be called with the ext4 group lock held
+- */
+-static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+- ext4_group_t group)
+-{
+- struct rb_node *n;
+- struct ext4_group_info *grp;
+- struct ext4_free_data *entry;
+-
+- grp = ext4_get_group_info(sb, group);
+- if (!grp)
+- return;
+- n = rb_first(&(grp->bb_free_root));
+-
+- while (n) {
+- entry = rb_entry(n, struct ext4_free_data, efd_node);
+- mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
+- n = rb_next(n);
+- }
+-}
+-
+ /*
+ * the function goes through all preallocation in this group and marks them
+ * used in in-core bitmap. buddy must be generated from this bitmap
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 0361c20910def..667381180b261 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -560,13 +560,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
+ if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
+ goto handle_itb;
+
+- if (meta_bg == 1) {
+- ext4_group_t first_group;
+- first_group = ext4_meta_bg_first_group(sb, group);
+- if (first_group != group + 1 &&
+- first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
+- goto handle_itb;
+- }
++ if (meta_bg == 1)
++ goto handle_itb;
+
+ block = start + ext4_bg_has_super(sb, group);
+ /* Copy all of the GDT blocks into the backup in this group */
+@@ -1191,8 +1186,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
+ ext4_group_first_block_no(sb, group));
+ BUFFER_TRACE(bh, "get_write_access");
+ if ((err = ext4_journal_get_write_access(handle, sb, bh,
+- EXT4_JTR_NONE)))
++ EXT4_JTR_NONE))) {
++ brelse(bh);
+ break;
++ }
+ lock_buffer(bh);
+ memcpy(bh->b_data, data, size);
+ if (rest)
+@@ -1601,6 +1598,8 @@ exit_journal:
+ int gdb_num_end = ((group + flex_gd->count - 1) /
+ EXT4_DESC_PER_BLOCK(sb));
+ int meta_bg = ext4_has_feature_meta_bg(sb);
++ sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
++ ext4_group_first_block_no(sb, 0);
+ sector_t old_gdb = 0;
+
+ update_backups(sb, ext4_group_first_block_no(sb, 0),
+@@ -1612,8 +1611,8 @@ exit_journal:
+ gdb_num);
+ if (old_gdb == gdb_bh->b_blocknr)
+ continue;
+- update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+- gdb_bh->b_size, meta_bg);
++ update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
++ gdb_bh->b_data, gdb_bh->b_size, meta_bg);
+ old_gdb = gdb_bh->b_blocknr;
+ }
+ }
+@@ -1980,9 +1979,7 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
+
+ errout:
+ ret = ext4_journal_stop(handle);
+- if (!err)
+- err = ret;
+- return ret;
++ return err ? err : ret;
+
+ invalid_resize_inode:
+ ext4_error(sb, "corrupted/inconsistent resize inode");
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index dbebd8b3127e5..d062383ea50ef 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -768,7 +768,8 @@ static void update_super_work(struct work_struct *work)
+ */
+ if (!sb_rdonly(sbi->s_sb) && journal) {
+ struct buffer_head *sbh = sbi->s_sbh;
+- bool call_notify_err;
++ bool call_notify_err = false;
++
+ handle = jbd2_journal_start(journal, 1);
+ if (IS_ERR(handle))
+ goto write_directly;
+@@ -6442,6 +6443,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ struct ext4_mount_options old_opts;
+ ext4_group_t g;
+ int err = 0;
++ int alloc_ctx;
+ #ifdef CONFIG_QUOTA
+ int enable_quota = 0;
+ int i, j;
+@@ -6482,7 +6484,16 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+
+ }
+
++ /*
++ * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
++ * two calls to ext4_should_dioread_nolock() to return inconsistent
++ * values, triggering WARN_ON in ext4_add_complete_io(). we grab
++ * here s_writepages_rwsem to avoid race between writepages ops and
++ * remount.
++ */
++ alloc_ctx = ext4_writepages_down_write(sb);
+ ext4_apply_options(fc, sb);
++ ext4_writepages_up_write(sb, alloc_ctx);
+
+ if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+ test_opt(sb, JOURNAL_CHECKSUM)) {
+@@ -6700,6 +6711,8 @@ restore_opts:
+ if (sb_rdonly(sb) && !(old_sb_flags & SB_RDONLY) &&
+ sb_any_quota_suspended(sb))
+ dquot_resume(sb, -1);
++
++ alloc_ctx = ext4_writepages_down_write(sb);
+ sb->s_flags = old_sb_flags;
+ sbi->s_mount_opt = old_opts.s_mount_opt;
+ sbi->s_mount_opt2 = old_opts.s_mount_opt2;
+@@ -6708,6 +6721,8 @@ restore_opts:
+ sbi->s_commit_interval = old_opts.s_commit_interval;
+ sbi->s_min_batch_time = old_opts.s_min_batch_time;
+ sbi->s_max_batch_time = old_opts.s_max_batch_time;
++ ext4_writepages_up_write(sb, alloc_ctx);
++
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
+ ext4_release_system_zone(sb);
+ #ifdef CONFIG_QUOTA
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index d820801f473e5..7514661bbfbb1 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1976,7 +1976,7 @@ void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
+ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
+ {
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+- char slab_name[32];
++ char slab_name[35];
+
+ if (!f2fs_sb_has_compression(sbi))
+ return 0;
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 916e317ac925f..1ac34eb49a0e8 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2344,8 +2344,10 @@ skip_reading_dnode:
+ f2fs_wait_on_block_writeback(inode, blkaddr);
+
+ if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
+- if (atomic_dec_and_test(&dic->remaining_pages))
++ if (atomic_dec_and_test(&dic->remaining_pages)) {
+ f2fs_decompress_cluster(dic, true);
++ break;
++ }
+ continue;
+ }
+
+@@ -3023,7 +3025,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ {
+ int ret = 0;
+ int done = 0, retry = 0;
+- struct page *pages[F2FS_ONSTACK_PAGES];
++ struct page *pages_local[F2FS_ONSTACK_PAGES];
++ struct page **pages = pages_local;
+ struct folio_batch fbatch;
+ struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct bio *bio = NULL;
+@@ -3047,6 +3050,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ #endif
+ int nr_folios, p, idx;
+ int nr_pages;
++ unsigned int max_pages = F2FS_ONSTACK_PAGES;
+ pgoff_t index;
+ pgoff_t end; /* Inclusive */
+ pgoff_t done_index;
+@@ -3056,6 +3060,15 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ int submitted = 0;
+ int i;
+
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++ if (f2fs_compressed_file(inode) &&
++ 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
++ pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
++ cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
++ max_pages = 1 << cc.log_cluster_size;
++ }
++#endif
++
+ folio_batch_init(&fbatch);
+
+ if (get_dirty_pages(mapping->host) <=
+@@ -3101,7 +3114,7 @@ again:
+ add_more:
+ pages[nr_pages] = folio_page(folio, idx);
+ folio_get(folio);
+- if (++nr_pages == F2FS_ONSTACK_PAGES) {
++ if (++nr_pages == max_pages) {
+ index = folio->index + idx + 1;
+ folio_batch_release(&fbatch);
+ goto write;
+@@ -3283,6 +3296,11 @@ next:
+ if (bio)
+ f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
+
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++ if (pages != pages_local)
++ kfree(pages);
++#endif
++
+ return ret;
+ }
+
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 0e2d49140c07f..ad8dfac73bd44 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -74,40 +74,14 @@ static void __set_extent_info(struct extent_info *ei,
+ }
+ }
+
+-static bool __may_read_extent_tree(struct inode *inode)
+-{
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-
+- if (!test_opt(sbi, READ_EXTENT_CACHE))
+- return false;
+- if (is_inode_flag_set(inode, FI_NO_EXTENT))
+- return false;
+- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+- !f2fs_sb_has_readonly(sbi))
+- return false;
+- return S_ISREG(inode->i_mode);
+-}
+-
+-static bool __may_age_extent_tree(struct inode *inode)
+-{
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-
+- if (!test_opt(sbi, AGE_EXTENT_CACHE))
+- return false;
+- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
+- return false;
+- if (file_is_cold(inode))
+- return false;
+-
+- return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
+-}
+-
+ static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
+ {
+ if (type == EX_READ)
+- return __may_read_extent_tree(inode);
+- else if (type == EX_BLOCK_AGE)
+- return __may_age_extent_tree(inode);
++ return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
++ S_ISREG(inode->i_mode);
++ if (type == EX_BLOCK_AGE)
++ return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
++ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
+ return false;
+ }
+
+@@ -120,7 +94,22 @@ static bool __may_extent_tree(struct inode *inode, enum extent_type type)
+ if (list_empty(&F2FS_I_SB(inode)->s_list))
+ return false;
+
+- return __init_may_extent_tree(inode, type);
++ if (!__init_may_extent_tree(inode, type))
++ return false;
++
++ if (type == EX_READ) {
++ if (is_inode_flag_set(inode, FI_NO_EXTENT))
++ return false;
++ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
++ !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
++ return false;
++ } else if (type == EX_BLOCK_AGE) {
++ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
++ return false;
++ if (file_is_cold(inode))
++ return false;
++ }
++ return true;
+ }
+
+ static void __try_update_largest_extent(struct extent_tree *et,
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index ca5904129b162..a06f03d23762f 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -3258,6 +3258,7 @@ int f2fs_precache_extents(struct inode *inode)
+ return -EOPNOTSUPP;
+
+ map.m_lblk = 0;
++ map.m_pblk = 0;
+ map.m_next_pgofs = NULL;
+ map.m_next_extent = &m_next_extent;
+ map.m_seg_type = NO_CHECK_TYPE;
+@@ -4005,6 +4006,15 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ F2FS_I(inode)->i_compress_algorithm = option.algorithm;
+ F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
+ F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
++ /* Set default level */
++ if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
++ F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
++ else
++ F2FS_I(inode)->i_compress_level = 0;
++ /* Adjust mount option level */
++ if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
++ F2FS_OPTION(sbi).compress_level)
++ F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
+ f2fs_mark_inode_dirty_sync(inode, true);
+
+ if (!f2fs_is_compress_backend_ready(inode))
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index ee2e1dd64f256..8b30f11f37b46 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1467,7 +1467,8 @@ page_hit:
+ ofs_of_node(page), cpver_of_node(page),
+ next_blkaddr_of_node(page));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+- err = -EINVAL;
++ f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
++ err = -EFSCORRUPTED;
+ out_err:
+ ClearPageUptodate(page);
+ out_put_err:
+@@ -2389,7 +2390,7 @@ static int scan_nat_page(struct f2fs_sb_info *sbi,
+ blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
+
+ if (blk_addr == NEW_ADDR)
+- return -EINVAL;
++ return -EFSCORRUPTED;
+
+ if (blk_addr == NULL_ADDR) {
+ add_free_nid(sbi, start_nid, true, true);
+@@ -2504,7 +2505,14 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+
+ if (ret) {
+ f2fs_up_read(&nm_i->nat_tree_lock);
+- f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
++
++ if (ret == -EFSCORRUPTED) {
++ f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
++ set_sbi_flag(sbi, SBI_NEED_FSCK);
++ f2fs_handle_error(sbi,
++ ERROR_INCONSISTENT_NAT);
++ }
++
+ return ret;
+ }
+ }
+@@ -2743,7 +2751,9 @@ recover_xnid:
+ f2fs_update_inode_page(inode);
+
+ /* 3: update and set xattr node page dirty */
+- memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
++ if (page)
++ memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
++ VALID_XATTR_BLOCK_SIZE);
+
+ set_page_dirty(xpage);
+ f2fs_put_page(xpage, 1);
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index a8c8232852bb1..bc303a0522155 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -547,6 +547,29 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
+ }
+
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
++static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
++ const char *new_ext, bool is_ext)
++{
++ unsigned char (*ext)[F2FS_EXTENSION_LEN];
++ int ext_cnt;
++ int i;
++
++ if (is_ext) {
++ ext = F2FS_OPTION(sbi).extensions;
++ ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
++ } else {
++ ext = F2FS_OPTION(sbi).noextensions;
++ ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
++ }
++
++ for (i = 0; i < ext_cnt; i++) {
++ if (!strcasecmp(new_ext, ext[i]))
++ return true;
++ }
++
++ return false;
++}
++
+ /*
+ * 1. The same extension name cannot not appear in both compress and non-compress extension
+ * at the same time.
+@@ -1149,6 +1172,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ return -EINVAL;
+ }
+
++ if (is_compress_extension_exist(sbi, name, true)) {
++ kfree(name);
++ break;
++ }
++
+ strcpy(ext[ext_cnt], name);
+ F2FS_OPTION(sbi).compress_ext_cnt++;
+ kfree(name);
+@@ -1173,6 +1201,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ return -EINVAL;
+ }
+
++ if (is_compress_extension_exist(sbi, name, false)) {
++ kfree(name);
++ break;
++ }
++
+ strcpy(noext[noext_cnt], name);
+ F2FS_OPTION(sbi).nocompress_ext_cnt++;
+ kfree(name);
+@@ -1629,7 +1662,7 @@ static void f2fs_put_super(struct super_block *sb)
+
+ f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
+
+- if (err) {
++ if (err || f2fs_cp_error(sbi)) {
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
+ truncate_inode_pages_final(META_MAPPING(sbi));
+ }
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index a657284faee30..465d145360de3 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -364,10 +364,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+
+ *xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
+ if (!*xe) {
+- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++ f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+- err = -EFSCORRUPTED;
++ err = -ENODATA;
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_XATTR);
+ goto out;
+@@ -584,13 +584,12 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+
+ if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
+- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++ f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+- error = -EFSCORRUPTED;
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_XATTR);
+- goto cleanup;
++ break;
+ }
+
+ if (!prefix)
+@@ -650,7 +649,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+
+ if (size > MAX_VALUE_LEN(inode))
+ return -E2BIG;
+-
++retry:
+ error = read_all_xattrs(inode, ipage, &base_addr);
+ if (error)
+ return error;
+@@ -660,7 +659,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ /* find entry with wanted name. */
+ here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
+ if (!here) {
+- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++ if (!F2FS_I(inode)->i_xattr_nid) {
++ f2fs_notice(F2FS_I_SB(inode),
++ "recover xattr in inode (%lu)", inode->i_ino);
++ f2fs_recover_xattr_data(inode, NULL);
++ kfree(base_addr);
++ goto retry;
++ }
++ f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ error = -EFSCORRUPTED;
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index c1af01b2c42d7..1767493dffda7 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -613,6 +613,24 @@ out_free:
+ kfree(isw);
+ }
+
++static bool isw_prepare_wbs_switch(struct inode_switch_wbs_context *isw,
++ struct list_head *list, int *nr)
++{
++ struct inode *inode;
++
++ list_for_each_entry(inode, list, i_io_list) {
++ if (!inode_prepare_wbs_switch(inode, isw->new_wb))
++ continue;
++
++ isw->inodes[*nr] = inode;
++ (*nr)++;
++
++ if (*nr >= WB_MAX_INODES_PER_ISW - 1)
++ return true;
++ }
++ return false;
++}
++
+ /**
+ * cleanup_offline_cgwb - detach associated inodes
+ * @wb: target wb
+@@ -625,7 +643,6 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+ {
+ struct cgroup_subsys_state *memcg_css;
+ struct inode_switch_wbs_context *isw;
+- struct inode *inode;
+ int nr;
+ bool restart = false;
+
+@@ -647,17 +664,17 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+
+ nr = 0;
+ spin_lock(&wb->list_lock);
+- list_for_each_entry(inode, &wb->b_attached, i_io_list) {
+- if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+- continue;
+-
+- isw->inodes[nr++] = inode;
+-
+- if (nr >= WB_MAX_INODES_PER_ISW - 1) {
+- restart = true;
+- break;
+- }
+- }
++ /*
++ * In addition to the inodes that have completed writeback, also switch
++ * cgwbs for those inodes only with dirty timestamps. Otherwise, those
++ * inodes won't be written back for a long time when lazytime is
++ * enabled, and thus pinning the dying cgwbs. It won't break the
++ * bandwidth restrictions, as writeback of inode metadata is not
++ * accounted for.
++ */
++ restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr);
++ if (!restart)
++ restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr);
+ spin_unlock(&wb->list_lock);
+
+ /* no attached inodes? bail out */
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 0eac045079047..4e63fbb63151c 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1866,16 +1866,24 @@ out:
+ int gfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
+ int mask)
+ {
++ int may_not_block = mask & MAY_NOT_BLOCK;
+ struct gfs2_inode *ip;
+ struct gfs2_holder i_gh;
++ struct gfs2_glock *gl;
+ int error;
+
+ gfs2_holder_mark_uninitialized(&i_gh);
+ ip = GFS2_I(inode);
+- if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
+- if (mask & MAY_NOT_BLOCK)
++ gl = rcu_dereference_check(ip->i_gl, !may_not_block);
++ if (unlikely(!gl)) {
++ /* inode is getting torn down, must be RCU mode */
++ WARN_ON_ONCE(!may_not_block);
++ return -ECHILD;
++ }
++ if (gfs2_glock_is_locked_by_me(gl) == NULL) {
++ if (may_not_block)
+ return -ECHILD;
+- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
++ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+ if (error)
+ return error;
+ }
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 33ca04733e933..dd64140ae6d7b 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -1281,10 +1281,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+
+ if (!sb_rdonly(sb)) {
+ error = init_threads(sdp);
+- if (error) {
+- gfs2_withdraw_delayed(sdp);
++ if (error)
+ goto fail_per_node;
+- }
+ }
+
+ error = gfs2_freeze_lock_shared(sdp);
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index 171b2713d2e5e..41d0232532a03 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -457,6 +457,17 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+ (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
+ return 0;
+
++ /*
++ * If qd_change is 0 it means a pending quota change was negated.
++ * We should not sync it, but we still have a qd reference and slot
++ * reference taken by gfs2_quota_change -> do_qc that need to be put.
++ */
++ if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
++ slot_put(qd);
++ qd_put(qd);
++ return 0;
++ }
++
+ if (!lockref_get_not_dead(&qd->qd_lockref))
+ return 0;
+
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 02d93da21b2b0..5f4ebe279aaae 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -602,13 +602,15 @@ restart:
+ }
+ spin_unlock(&sdp->sd_jindex_spin);
+
+- if (!sb_rdonly(sb)) {
++ if (!sb_rdonly(sb))
+ gfs2_make_fs_ro(sdp);
+- }
+- if (gfs2_withdrawn(sdp)) {
+- gfs2_destroy_threads(sdp);
++ else {
++ if (gfs2_withdrawn(sdp))
++ gfs2_destroy_threads(sdp);
++
+ gfs2_quota_cleanup(sdp);
+ }
++
+ WARN_ON(gfs2_withdrawing(sdp));
+
+ /* At this point, we're through modifying the disk */
+@@ -1550,7 +1552,7 @@ out:
+ wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
+ gfs2_glock_add_to_lru(ip->i_gl);
+ gfs2_glock_put_eventually(ip->i_gl);
+- ip->i_gl = NULL;
++ rcu_assign_pointer(ip->i_gl, NULL);
+ }
+ }
+
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 316c4cebd3f3d..60fce26ff9378 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -295,7 +295,7 @@ static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t byt
+ size_t res = 0;
+
+ /* First subpage to start the loop. */
+- page += offset / PAGE_SIZE;
++ page = nth_page(page, offset / PAGE_SIZE);
+ offset %= PAGE_SIZE;
+ while (1) {
+ if (is_raw_hwpoison_page_in_hugepage(page))
+@@ -309,7 +309,7 @@ static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t byt
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+- page++;
++ page = nth_page(page, 1);
+ offset = 0;
+ }
+ }
+diff --git a/fs/inode.c b/fs/inode.c
+index 84bc3c76e5ccb..ae1a6410b53d7 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -215,6 +215,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
+ lockdep_set_class_and_name(&mapping->invalidate_lock,
+ &sb->s_type->invalidate_lock_key,
+ "mapping.invalidate_lock");
++ if (sb->s_iflags & SB_I_STABLE_WRITES)
++ mapping_set_stable_writes(mapping);
+ inode->i_private = NULL;
+ inode->i_mapping = mapping;
+ INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index c269a7d29a465..5b771a3d8d9ae 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -289,6 +289,8 @@ int jbd2_journal_recover(journal_t *journal)
+ journal_superblock_t * sb;
+
+ struct recovery_info info;
++ errseq_t wb_err;
++ struct address_space *mapping;
+
+ memset(&info, 0, sizeof(info));
+ sb = journal->j_superblock;
+@@ -306,6 +308,9 @@ int jbd2_journal_recover(journal_t *journal)
+ return 0;
+ }
+
++ wb_err = 0;
++ mapping = journal->j_fs_dev->bd_inode->i_mapping;
++ errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ err = do_one_pass(journal, &info, PASS_SCAN);
+ if (!err)
+ err = do_one_pass(journal, &info, PASS_REVOKE);
+@@ -327,6 +332,9 @@ int jbd2_journal_recover(journal_t *journal)
+
+ jbd2_journal_clear_revoke(journal);
+ err2 = sync_blockdev(journal->j_fs_dev);
++ if (!err)
++ err = err2;
++ err2 = errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ if (!err)
+ err = err2;
+ /* Make sure all replayed data is on permanent storage */
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 88afd108c2dd2..11c77757ead9e 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -87,7 +87,7 @@ static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
+ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
+ static int dbFindBits(u32 word, int l2nb);
+ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
+-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
++static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl);
+ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ int nblocks);
+ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+@@ -180,7 +180,8 @@ int dbMount(struct inode *ipbmap)
+ bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
+
+ bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+- if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) {
++ if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE ||
++ bmp->db_l2nbperpage < 0) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+@@ -194,6 +195,12 @@ int dbMount(struct inode *ipbmap)
+ bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+ bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
+ bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
++ if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 ||
++ bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) {
++ err = -EINVAL;
++ goto err_release_metapage;
++ }
++
+ bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
+ bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
+ bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+@@ -1710,7 +1717,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
+ * dbFindLeaf() returns the index of the leaf at which
+ * free space was found.
+ */
+- rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
++ rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
+
+ /* release the buffer.
+ */
+@@ -1957,7 +1964,7 @@ dbAllocDmapLev(struct bmap * bmp,
+ * free space. if sufficient free space is found, dbFindLeaf()
+ * returns the index of the leaf at which free space was found.
+ */
+- if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
++ if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false))
+ return -ENOSPC;
+
+ if (leafidx < 0)
+@@ -2921,14 +2928,18 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
+ * leafidx - return pointer to be set to the index of the leaf
+ * describing at least l2nb free blocks if sufficient
+ * free blocks are found.
++ * is_ctl - determines if the tree is of type ctl
+ *
+ * RETURN VALUES:
+ * 0 - success
+ * -ENOSPC - insufficient free blocks.
+ */
+-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
++static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
+ {
+ int ti, n = 0, k, x = 0;
++ int max_size;
++
++ max_size = is_ctl ? CTLTREESIZE : TREESIZE;
+
+ /* first check the root of the tree to see if there is
+ * sufficient free space.
+@@ -2949,6 +2960,8 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
+ /* sufficient free space found. move to the next
+ * level (or quit if this is the last level).
+ */
++ if (x + n > max_size)
++ return -ENOSPC;
+ if (l2nb <= tp->dmt_stree[x + n])
+ break;
+ }
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index 923a58422c461..1b267eec3f367 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -1320,7 +1320,7 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
+ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
+ {
+ int rc, ino, iagno, addext, extno, bitno, sword;
+- int nwords, rem, i, agno;
++ int nwords, rem, i, agno, dn_numag;
+ u32 mask, inosmap, extsmap;
+ struct inode *ipimap;
+ struct metapage *mp;
+@@ -1356,6 +1356,9 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
+
+ /* get the ag number of this iag */
+ agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
++ dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
++ if (agno < 0 || agno > dn_numag)
++ return -EIO;
+
+ if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
+ /*
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 37f2d34ee090b..189447cf4acf5 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -396,6 +396,8 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
+ return -EINVAL;
+ }
+
++ /* In this case, ->private_data is protected by f_pos_lock */
++ file->private_data = NULL;
+ return vfs_setpos(file, offset, U32_MAX);
+ }
+
+@@ -425,7 +427,7 @@ static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
+ inode->i_ino, fs_umode_to_dtype(inode->i_mode));
+ }
+
+-static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
++static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
+ {
+ struct offset_ctx *so_ctx = inode->i_op->get_offset_ctx(inode);
+ XA_STATE(xas, &so_ctx->xa, ctx->pos);
+@@ -434,7 +436,7 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
+ while (true) {
+ dentry = offset_find_next(&xas);
+ if (!dentry)
+- break;
++ return ERR_PTR(-ENOENT);
+
+ if (!offset_dir_emit(ctx, dentry)) {
+ dput(dentry);
+@@ -444,6 +446,7 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
+ dput(dentry);
+ ctx->pos = xas.xa_index + 1;
+ }
++ return NULL;
+ }
+
+ /**
+@@ -476,7 +479,12 @@ static int offset_readdir(struct file *file, struct dir_context *ctx)
+ if (!dir_emit_dots(file, ctx))
+ return 0;
+
+- offset_iterate_dir(d_inode(dir), ctx);
++ /* In this case, ->private_data is protected by f_pos_lock */
++ if (ctx->pos == 2)
++ file->private_data = NULL;
++ else if (file->private_data == ERR_PTR(-ENOENT))
++ return 0;
++ file->private_data = offset_iterate_dir(d_inode(dir), ctx);
+ return 0;
+ }
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 5ee283eb9660b..0ff913b4e9e0b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5622,7 +5622,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
+
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
+ nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
+- nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
++ nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
+ }
+
+ static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+@@ -5663,7 +5663,8 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
+ data->res.server = server;
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
+- nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
++ nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
++ NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
+ }
+
+ static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
+@@ -8934,6 +8935,7 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+
+ sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
+
++try_again:
+ /* Test connection for session trunking. Async exchange_id call */
+ task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
+ if (IS_ERR(task))
+@@ -8946,11 +8948,15 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+
+ if (status == 0)
+ rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
+- else if (rpc_clnt_xprt_switch_has_addr(clnt,
++ else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
+ (struct sockaddr *)&xprt->addr))
+ rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
+
+ rpc_put_task(task);
++ if (status == -NFS4ERR_DELAY) {
++ ssleep(1);
++ goto try_again;
++ }
+ }
+ EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
+
+diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
+index 929248c6ca84c..4cbe0434cbb8c 100644
+--- a/fs/nfsd/cache.h
++++ b/fs/nfsd/cache.h
+@@ -84,8 +84,8 @@ int nfsd_net_reply_cache_init(struct nfsd_net *nn);
+ void nfsd_net_reply_cache_destroy(struct nfsd_net *nn);
+ int nfsd_reply_cache_init(struct nfsd_net *);
+ void nfsd_reply_cache_shutdown(struct nfsd_net *);
+-int nfsd_cache_lookup(struct svc_rqst *rqstp,
+- struct nfsd_cacherep **cacherep);
++int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
++ unsigned int len, struct nfsd_cacherep **cacherep);
+ void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
+ int cachetype, __be32 *statp);
+ int nfsd_reply_cache_stats_show(struct seq_file *m, void *v);
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index ee9c923192e08..07bf219f9ae48 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -989,22 +989,21 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_file *new, *nf;
+- const struct cred *cred;
++ bool stale_retry = true;
+ bool open_retry = true;
+ struct inode *inode;
+ __be32 status;
+ int ret;
+
++retry:
+ status = fh_verify(rqstp, fhp, S_IFREG,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ if (status != nfs_ok)
+ return status;
+ inode = d_inode(fhp->fh_dentry);
+- cred = get_current_cred();
+
+-retry:
+ rcu_read_lock();
+- nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
++ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
+ rcu_read_unlock();
+
+ if (nf) {
+@@ -1026,7 +1025,7 @@ retry:
+
+ rcu_read_lock();
+ spin_lock(&inode->i_lock);
+- nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
++ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
+ if (unlikely(nf)) {
+ spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
+@@ -1058,6 +1057,7 @@ wait_for_construction:
+ goto construction_err;
+ }
+ open_retry = false;
++ fh_put(fhp);
+ goto retry;
+ }
+ this_cpu_inc(nfsd_file_cache_hits);
+@@ -1074,7 +1074,6 @@ out:
+ nfsd_file_check_write_error(nf);
+ *pnf = nf;
+ }
+- put_cred(cred);
+ trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
+ return status;
+
+@@ -1088,8 +1087,20 @@ open_file:
+ status = nfs_ok;
+ trace_nfsd_file_opened(nf, status);
+ } else {
+- status = nfsd_open_verified(rqstp, fhp, may_flags,
+- &nf->nf_file);
++ ret = nfsd_open_verified(rqstp, fhp, may_flags,
++ &nf->nf_file);
++ if (ret == -EOPENSTALE && stale_retry) {
++ stale_retry = false;
++ nfsd_file_unhash(nf);
++ clear_and_wake_up_bit(NFSD_FILE_PENDING,
++ &nf->nf_flags);
++ if (refcount_dec_and_test(&nf->nf_ref))
++ nfsd_file_free(nf);
++ nf = NULL;
++ fh_put(fhp);
++ goto retry;
++ }
++ status = nfserrno(ret);
+ trace_nfsd_file_open(nf, status);
+ }
+ } else
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 8534693eb6a49..529b3ed3b3177 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2797,7 +2797,7 @@ static int client_opens_release(struct inode *inode, struct file *file)
+
+ /* XXX: alternatively, we could get/drop in seq start/stop */
+ drop_client(clp);
+- return 0;
++ return seq_release(inode, file);
+ }
+
+ static const struct file_operations client_states_fops = {
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index 80621a7095107..6cd36af2f97e1 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -368,33 +368,52 @@ nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
+ return freed;
+ }
+
+-/*
+- * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
++/**
++ * nfsd_cache_csum - Checksum incoming NFS Call arguments
++ * @buf: buffer containing a whole RPC Call message
++ * @start: starting byte of the NFS Call header
++ * @remaining: size of the NFS Call header, in bytes
++ *
++ * Compute a weak checksum of the leading bytes of an NFS procedure
++ * call header to help verify that a retransmitted Call matches an
++ * entry in the duplicate reply cache.
++ *
++ * To avoid assumptions about how the RPC message is laid out in
++ * @buf and what else it might contain (eg, a GSS MIC suffix), the
++ * caller passes us the exact location and length of the NFS Call
++ * header.
++ *
++ * Returns a 32-bit checksum value, as defined in RFC 793.
+ */
+-static __wsum
+-nfsd_cache_csum(struct svc_rqst *rqstp)
++static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start,
++ unsigned int remaining)
+ {
++ unsigned int base, len;
++ struct xdr_buf subbuf;
++ __wsum csum = 0;
++ void *p;
+ int idx;
+- unsigned int base;
+- __wsum csum;
+- struct xdr_buf *buf = &rqstp->rq_arg;
+- const unsigned char *p = buf->head[0].iov_base;
+- size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
+- RC_CSUMLEN);
+- size_t len = min(buf->head[0].iov_len, csum_len);
++
++ if (remaining > RC_CSUMLEN)
++ remaining = RC_CSUMLEN;
++ if (xdr_buf_subsegment(buf, &subbuf, start, remaining))
++ return csum;
+
+ /* rq_arg.head first */
+- csum = csum_partial(p, len, 0);
+- csum_len -= len;
++ if (subbuf.head[0].iov_len) {
++ len = min_t(unsigned int, subbuf.head[0].iov_len, remaining);
++ csum = csum_partial(subbuf.head[0].iov_base, len, csum);
++ remaining -= len;
++ }
+
+ /* Continue into page array */
+- idx = buf->page_base / PAGE_SIZE;
+- base = buf->page_base & ~PAGE_MASK;
+- while (csum_len) {
+- p = page_address(buf->pages[idx]) + base;
+- len = min_t(size_t, PAGE_SIZE - base, csum_len);
++ idx = subbuf.page_base / PAGE_SIZE;
++ base = subbuf.page_base & ~PAGE_MASK;
++ while (remaining) {
++ p = page_address(subbuf.pages[idx]) + base;
++ len = min_t(unsigned int, PAGE_SIZE - base, remaining);
+ csum = csum_partial(p, len, csum);
+- csum_len -= len;
++ remaining -= len;
+ base = 0;
+ ++idx;
+ }
+@@ -465,6 +484,8 @@ out:
+ /**
+ * nfsd_cache_lookup - Find an entry in the duplicate reply cache
+ * @rqstp: Incoming Call to find
++ * @start: starting byte in @rqstp->rq_arg of the NFS Call header
++ * @len: size of the NFS Call header, in bytes
+ * @cacherep: OUT: DRC entry for this request
+ *
+ * Try to find an entry matching the current call in the cache. When none
+@@ -478,7 +499,8 @@ out:
+ * %RC_REPLY: Reply from cache
+ * %RC_DROPIT: Do not process the request further
+ */
+-int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
++int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
++ unsigned int len, struct nfsd_cacherep **cacherep)
+ {
+ struct nfsd_net *nn;
+ struct nfsd_cacherep *rp, *found;
+@@ -494,7 +516,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
+ goto out;
+ }
+
+- csum = nfsd_cache_csum(rqstp);
++ csum = nfsd_cache_csum(&rqstp->rq_arg, start, len);
+
+ /*
+ * Since the common case is a cache miss followed by an insert,
+@@ -640,24 +662,17 @@ void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
+ return;
+ }
+
+-/*
+- * Copy cached reply to current reply buffer. Should always fit.
+- * FIXME as reply is in a page, we should just attach the page, and
+- * keep a refcount....
+- */
+ static int
+ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
+ {
+- struct kvec *vec = &rqstp->rq_res.head[0];
+-
+- if (vec->iov_len + data->iov_len > PAGE_SIZE) {
+- printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
+- data->iov_len);
+- return 0;
+- }
+- memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
+- vec->iov_len += data->iov_len;
+- return 1;
++ __be32 *p;
++
++ p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len);
++ if (unlikely(!p))
++ return false;
++ memcpy(p, data->iov_base, data->iov_len);
++ xdr_commit_encode(&rqstp->rq_res_stream);
++ return true;
+ }
+
+ /*
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index c7af1095f6b54..a87e9ef613868 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -988,6 +988,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
+ const struct svc_procedure *proc = rqstp->rq_procinfo;
+ __be32 *statp = rqstp->rq_accept_statp;
+ struct nfsd_cacherep *rp;
++ unsigned int start, len;
++ __be32 *nfs_reply;
+
+ /*
+ * Give the xdr decoder a chance to change this if it wants
+@@ -995,11 +997,18 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
+ */
+ rqstp->rq_cachetype = proc->pc_cachetype;
+
++ /*
++ * ->pc_decode advances the argument stream past the NFS
++ * Call header, so grab the header's starting location and
++ * size now for the call to nfsd_cache_lookup().
++ */
++ start = xdr_stream_pos(&rqstp->rq_arg_stream);
++ len = xdr_stream_remaining(&rqstp->rq_arg_stream);
+ if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
+ goto out_decode_err;
+
+ rp = NULL;
+- switch (nfsd_cache_lookup(rqstp, &rp)) {
++ switch (nfsd_cache_lookup(rqstp, start, len, &rp)) {
+ case RC_DOIT:
+ break;
+ case RC_REPLY:
+@@ -1008,6 +1017,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
+ goto out_dropit;
+ }
+
++ nfs_reply = xdr_inline_decode(&rqstp->rq_res_stream, 0);
+ *statp = proc->pc_func(rqstp);
+ if (test_bit(RQ_DROPME, &rqstp->rq_flags))
+ goto out_update_drop;
+@@ -1015,7 +1025,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
+ if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
+ goto out_encode_err;
+
+- nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
++ nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, nfs_reply);
+ out_cached_reply:
+ return 1;
+
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 02f5fcaad03f3..b24462efa1781 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -823,7 +823,7 @@ int nfsd_open_break_lease(struct inode *inode, int access)
+ * and additional flags.
+ * N.B. After this call fhp needs an fh_put
+ */
+-static __be32
++static int
+ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ int may_flags, struct file **filp)
+ {
+@@ -831,14 +831,12 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ struct inode *inode;
+ struct file *file;
+ int flags = O_RDONLY|O_LARGEFILE;
+- __be32 err;
+- int host_err = 0;
++ int host_err = -EPERM;
+
+ path.mnt = fhp->fh_export->ex_path.mnt;
+ path.dentry = fhp->fh_dentry;
+ inode = d_inode(path.dentry);
+
+- err = nfserr_perm;
+ if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
+ goto out;
+
+@@ -847,7 +845,7 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+
+ host_err = nfsd_open_break_lease(inode, may_flags);
+ if (host_err) /* NOMEM or WOULDBLOCK */
+- goto out_nfserr;
++ goto out;
+
+ if (may_flags & NFSD_MAY_WRITE) {
+ if (may_flags & NFSD_MAY_READ)
+@@ -859,13 +857,13 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ file = dentry_open(&path, flags, current_cred());
+ if (IS_ERR(file)) {
+ host_err = PTR_ERR(file);
+- goto out_nfserr;
++ goto out;
+ }
+
+ host_err = ima_file_check(file, may_flags);
+ if (host_err) {
+ fput(file);
+- goto out_nfserr;
++ goto out;
+ }
+
+ if (may_flags & NFSD_MAY_64BIT_COOKIE)
+@@ -874,10 +872,8 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ file->f_mode |= FMODE_32BITHASH;
+
+ *filp = file;
+-out_nfserr:
+- err = nfserrno(host_err);
+ out:
+- return err;
++ return host_err;
+ }
+
+ __be32
+@@ -885,6 +881,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ int may_flags, struct file **filp)
+ {
+ __be32 err;
++ int host_err;
+ bool retried = false;
+
+ validate_process_creds();
+@@ -904,12 +901,13 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ retry:
+ err = fh_verify(rqstp, fhp, type, may_flags);
+ if (!err) {
+- err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
+- if (err == nfserr_stale && !retried) {
++ host_err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
++ if (host_err == -EOPENSTALE && !retried) {
+ retried = true;
+ fh_put(fhp);
+ goto retry;
+ }
++ err = nfserrno(host_err);
+ }
+ validate_process_creds();
+ return err;
+@@ -922,13 +920,13 @@ retry:
+ * @may_flags: internal permission flags
+ * @filp: OUT: open "struct file *"
+ *
+- * Returns an nfsstat value in network byte order.
++ * Returns zero on success, or a negative errno value.
+ */
+-__be32
++int
+ nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
+ struct file **filp)
+ {
+- __be32 err;
++ int err;
+
+ validate_process_creds();
+ err = __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index a6890ea7b765b..e3c29596f4df1 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -104,8 +104,8 @@ __be32 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ int nfsd_open_break_lease(struct inode *, int);
+ __be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
+ int, struct file **);
+-__be32 nfsd_open_verified(struct svc_rqst *, struct svc_fh *,
+- int, struct file **);
++int nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp,
++ int may_flags, struct file **filp);
+ __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct file *file, loff_t offset,
+ unsigned long *count,
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 83ef66644c213..fca29dba7b146 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -171,7 +171,7 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
+
+ type = ovl_path_real(dentry, &realpath);
+ old_cred = ovl_override_creds(dentry->d_sb);
+- err = vfs_getattr(&realpath, stat, request_mask, flags);
++ err = ovl_do_getattr(&realpath, stat, request_mask, flags);
+ if (err)
+ goto out;
+
+@@ -196,8 +196,8 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
+ (!is_dir ? STATX_NLINK : 0);
+
+ ovl_path_lower(dentry, &realpath);
+- err = vfs_getattr(&realpath, &lowerstat,
+- lowermask, flags);
++ err = ovl_do_getattr(&realpath, &lowerstat, lowermask,
++ flags);
+ if (err)
+ goto out;
+
+@@ -249,8 +249,8 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
+
+ ovl_path_lowerdata(dentry, &realpath);
+ if (realpath.dentry) {
+- err = vfs_getattr(&realpath, &lowerdatastat,
+- lowermask, flags);
++ err = ovl_do_getattr(&realpath, &lowerdatastat,
++ lowermask, flags);
+ if (err)
+ goto out;
+ } else {
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 9817b2dcb132c..09ca82ed0f8ce 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -397,6 +397,14 @@ static inline bool ovl_open_flags_need_copy_up(int flags)
+ return ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC));
+ }
+
++static inline int ovl_do_getattr(const struct path *path, struct kstat *stat,
++ u32 request_mask, unsigned int flags)
++{
++ if (flags & AT_GETATTR_NOSEC)
++ return vfs_getattr_nosec(path, stat, request_mask, flags);
++ return vfs_getattr(path, stat, request_mask, flags);
++}
++
+ /* util.c */
+ int ovl_want_write(struct dentry *dentry);
+ void ovl_drop_write(struct dentry *dentry);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 3fa2416264a4e..c71d185980c08 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1489,7 +1489,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
+ ovl_trusted_xattr_handlers;
+ sb->s_fs_info = ofs;
+ sb->s_flags |= SB_POSIXACL;
+- sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
++ sb->s_iflags |= SB_I_SKIP_SYNC;
+
+ err = -ENOMEM;
+ root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index c88854df0b624..de484195f49fe 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -1576,7 +1576,6 @@ static const struct sysctl_alias sysctl_aliases[] = {
+ {"hung_task_panic", "kernel.hung_task_panic" },
+ {"numa_zonelist_order", "vm.numa_zonelist_order" },
+ {"softlockup_all_cpu_backtrace", "kernel.softlockup_all_cpu_backtrace" },
+- {"softlockup_panic", "kernel.softlockup_panic" },
+ { }
+ };
+
+@@ -1592,6 +1591,13 @@ static const char *sysctl_find_alias(char *param)
+ return NULL;
+ }
+
++bool sysctl_is_alias(char *param)
++{
++ const char *alias = sysctl_find_alias(param);
++
++ return alias != NULL;
++}
++
+ /* Set sysctl value passed on kernel command line. */
+ static int process_sysctl_arg(char *param, char *val,
+ const char *unused, void *arg)
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index e5bca9a004ccc..03425928d2fb3 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -464,6 +464,8 @@ out:
+ */
+ int pstore_register(struct pstore_info *psi)
+ {
++ char *new_backend;
++
+ if (backend && strcmp(backend, psi->name)) {
+ pr_warn("backend '%s' already in use: ignoring '%s'\n",
+ backend, psi->name);
+@@ -484,11 +486,16 @@ int pstore_register(struct pstore_info *psi)
+ return -EINVAL;
+ }
+
++ new_backend = kstrdup(psi->name, GFP_KERNEL);
++ if (!new_backend)
++ return -ENOMEM;
++
+ mutex_lock(&psinfo_lock);
+ if (psinfo) {
+ pr_warn("backend '%s' already loaded: ignoring '%s'\n",
+ psinfo->name, psi->name);
+ mutex_unlock(&psinfo_lock);
++ kfree(new_backend);
+ return -EBUSY;
+ }
+
+@@ -521,7 +528,7 @@ int pstore_register(struct pstore_info *psi)
+ * Update the module parameter backend, so it is visible
+ * through /sys/module/pstore/parameters/backend
+ */
+- backend = kstrdup(psi->name, GFP_KERNEL);
++ backend = new_backend;
+
+ pr_info("Registered %s as persistent store backend\n", psi->name);
+
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 31e897ad5e6a7..023b91b4e1f0a 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2351,6 +2351,20 @@ static int vfs_setup_quota_inode(struct inode *inode, int type)
+ if (sb_has_quota_loaded(sb, type))
+ return -EBUSY;
+
++ /*
++ * Quota files should never be encrypted. They should be thought of as
++ * filesystem metadata, not user data. New-style internal quota files
++ * cannot be encrypted by users anyway, but old-style external quota
++ * files could potentially be incorrectly created in an encrypted
++ * directory, hence this explicit check. Some reasons why encrypted
++ * quota files don't work include: (1) some filesystems that support
++ * encryption don't handle it in their quota_read and quota_write, and
++ * (2) cleaning up encrypted quota files at unmount would need special
++ * consideration, as quota files are cleaned up later than user files.
++ */
++ if (IS_ENCRYPTED(inode))
++ return -EINVAL;
++
+ dqopt->files[type] = igrab(inode);
+ if (!dqopt->files[type])
+ return -EIO;
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index fe1bf5b6e0cb3..59f6b8e32cc97 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -32,7 +32,7 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ * fully cached or it may be in the process of
+ * being deleted due to a lease break.
+ */
+- if (!cfid->has_lease) {
++ if (!cfid->time || !cfid->has_lease) {
+ spin_unlock(&cfids->cfid_list_lock);
+ return NULL;
+ }
+@@ -193,10 +193,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ npath = path_no_prefix(cifs_sb, path);
+ if (IS_ERR(npath)) {
+ rc = PTR_ERR(npath);
+- kfree(utf16_path);
+- return rc;
++ goto out;
+ }
+
++ if (!npath[0]) {
++ dentry = dget(cifs_sb->root);
++ } else {
++ dentry = path_to_dentry(cifs_sb, npath);
++ if (IS_ERR(dentry)) {
++ rc = -ENOENT;
++ goto out;
++ }
++ }
++ cfid->dentry = dentry;
++
+ /*
+ * We do not hold the lock for the open because in case
+ * SMB2_open needs to reconnect.
+@@ -249,6 +259,15 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+
+ smb2_set_related(&rqst[1]);
+
++ /*
++ * Set @cfid->has_lease to true before sending out compounded request so
++ * its lease reference can be put in cached_dir_lease_break() due to a
++ * potential lease break right after the request is sent or while @cfid
++ * is still being cached. Concurrent processes won't be to use it yet
++ * due to @cfid->time being zero.
++ */
++ cfid->has_lease = true;
++
+ rc = compound_send_recv(xid, ses, server,
+ flags, 2, rqst,
+ resp_buftype, rsp_iov);
+@@ -263,6 +282,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ cfid->tcon = tcon;
+ cfid->is_open = true;
+
++ spin_lock(&cfids->cfid_list_lock);
++
+ o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+ oparms.fid->volatile_fid = o_rsp->VolatileFileId;
+@@ -270,18 +291,25 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
+ #endif /* CIFS_DEBUG2 */
+
+- if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
++ rc = -EINVAL;
++ if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
++ spin_unlock(&cfids->cfid_list_lock);
+ goto oshr_free;
++ }
+
+ smb2_parse_contexts(server, o_rsp,
+ &oparms.fid->epoch,
+ oparms.fid->lease_key, &oplock,
+ NULL, NULL);
+- if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
++ if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
++ spin_unlock(&cfids->cfid_list_lock);
+ goto oshr_free;
++ }
+ qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+- if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
++ if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
++ spin_unlock(&cfids->cfid_list_lock);
+ goto oshr_free;
++ }
+ if (!smb2_validate_and_copy_iov(
+ le16_to_cpu(qi_rsp->OutputBufferOffset),
+ sizeof(struct smb2_file_all_info),
+@@ -289,37 +317,24 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ (char *)&cfid->file_all_info))
+ cfid->file_all_info_is_valid = true;
+
+- if (!npath[0])
+- dentry = dget(cifs_sb->root);
+- else {
+- dentry = path_to_dentry(cifs_sb, npath);
+- if (IS_ERR(dentry)) {
+- rc = -ENOENT;
+- goto oshr_free;
+- }
+- }
+- spin_lock(&cfids->cfid_list_lock);
+- cfid->dentry = dentry;
+ cfid->time = jiffies;
+- cfid->has_lease = true;
+ spin_unlock(&cfids->cfid_list_lock);
++ /* At this point the directory handle is fully cached */
++ rc = 0;
+
+ oshr_free:
+- kfree(utf16_path);
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_info_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+- spin_lock(&cfids->cfid_list_lock);
+- if (!cfid->has_lease) {
+- if (rc) {
+- if (cfid->on_list) {
+- list_del(&cfid->entry);
+- cfid->on_list = false;
+- cfids->num_entries--;
+- }
+- rc = -ENOENT;
+- } else {
++ if (rc) {
++ spin_lock(&cfids->cfid_list_lock);
++ if (cfid->on_list) {
++ list_del(&cfid->entry);
++ cfid->on_list = false;
++ cfids->num_entries--;
++ }
++ if (cfid->has_lease) {
+ /*
+ * We are guaranteed to have two references at this
+ * point. One for the caller and one for a potential
+@@ -327,25 +342,24 @@ oshr_free:
+ * will be closed when the caller closes the cached
+ * handle.
+ */
++ cfid->has_lease = false;
+ spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ goto out;
+ }
++ spin_unlock(&cfids->cfid_list_lock);
+ }
+- spin_unlock(&cfids->cfid_list_lock);
++out:
+ if (rc) {
+ if (cfid->is_open)
+ SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid);
+ free_cached_dir(cfid);
+- cfid = NULL;
+- }
+-out:
+- if (rc == 0) {
++ } else {
+ *ret_cfid = cfid;
+ atomic_inc(&tcon->num_remote_opens);
+ }
+-
++ kfree(utf16_path);
+ return rc;
+ }
+
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index 76922fcc4bc6e..16282ecfe17a7 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -279,6 +279,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+ struct cifs_server_iface *iface;
++ size_t iface_weight = 0, iface_min_speed = 0;
++ struct cifs_server_iface *last_iface = NULL;
+ int c, i, j;
+
+ seq_puts(m,
+@@ -452,6 +454,11 @@ skip_rdma:
+ seq_printf(m, "\n\n\tSessions: ");
+ i = 0;
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ spin_lock(&ses->ses_lock);
++ if (ses->ses_status == SES_EXITING) {
++ spin_unlock(&ses->ses_lock);
++ continue;
++ }
+ i++;
+ if ((ses->serverDomain == NULL) ||
+ (ses->serverOS == NULL) ||
+@@ -472,6 +479,7 @@ skip_rdma:
+ ses->ses_count, ses->serverOS, ses->serverNOS,
+ ses->capabilities, ses->ses_status);
+ }
++ spin_unlock(&ses->ses_lock);
+
+ seq_printf(m, "\n\tSecurity type: %s ",
+ get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
+@@ -536,11 +544,25 @@ skip_rdma:
+ "\tLast updated: %lu seconds ago",
+ ses->iface_count,
+ (jiffies - ses->iface_last_update) / HZ);
++
++ last_iface = list_last_entry(&ses->iface_list,
++ struct cifs_server_iface,
++ iface_head);
++ iface_min_speed = last_iface->speed;
++
+ j = 0;
+ list_for_each_entry(iface, &ses->iface_list,
+ iface_head) {
+ seq_printf(m, "\n\t%d)", ++j);
+ cifs_dump_iface(m, iface);
++
++ iface_weight = iface->speed / iface_min_speed;
++ seq_printf(m, "\t\tWeight (cur,total): (%zu,%zu)"
++ "\n\t\tAllocated channels: %u\n",
++ iface->weight_fulfilled,
++ iface_weight,
++ iface->num_channels);
++
+ if (is_ses_using_iface(ses, iface))
+ seq_puts(m, "\t\t[CONNECTED]\n");
+ }
+diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
+index 332588e77c311..26327442e383b 100644
+--- a/fs/smb/client/cifs_ioctl.h
++++ b/fs/smb/client/cifs_ioctl.h
+@@ -26,6 +26,11 @@ struct smb_mnt_fs_info {
+ __u64 cifs_posix_caps;
+ } __packed;
+
++struct smb_mnt_tcon_info {
++ __u32 tid;
++ __u64 session_id;
++} __packed;
++
+ struct smb_snapshot_array {
+ __u32 number_of_snapshots;
+ __u32 number_of_snapshots_returned;
+@@ -108,6 +113,7 @@ struct smb3_notify_info {
+ #define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
+ #define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
+ #define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
++#define CIFS_IOC_GET_TCON_INFO _IOR(CIFS_IOCTL_MAGIC, 12, struct smb_mnt_tcon_info)
+ #define CIFS_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+ /*
+diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
+index 6f3285f1dfee5..af7849e5974ff 100644
+--- a/fs/smb/client/cifs_spnego.c
++++ b/fs/smb/client/cifs_spnego.c
+@@ -64,8 +64,8 @@ struct key_type cifs_spnego_key_type = {
+ * strlen(";sec=ntlmsspi") */
+ #define MAX_MECH_STR_LEN 13
+
+-/* strlen of "host=" */
+-#define HOST_KEY_LEN 5
++/* strlen of ";host=" */
++#define HOST_KEY_LEN 6
+
+ /* strlen of ";ip4=" or ";ip6=" */
+ #define IP_KEY_LEN 5
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 22869cda13565..ea3a7a668b45f 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1191,6 +1191,7 @@ const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
+
+ const struct inode_operations cifs_symlink_inode_ops = {
+ .get_link = cifs_get_link,
++ .setattr = cifs_setattr,
+ .permission = cifs_permission,
+ .listxattr = cifs_listxattr,
+ };
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 02082621d8e07..b8d1c19f67714 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -969,6 +969,8 @@ struct cifs_server_iface {
+ struct list_head iface_head;
+ struct kref refcount;
+ size_t speed;
++ size_t weight_fulfilled;
++ unsigned int num_channels;
+ unsigned int rdma_capable : 1;
+ unsigned int rss_capable : 1;
+ unsigned int is_active : 1; /* unset if non existent */
+@@ -2143,6 +2145,7 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ unsigned int len, skip;
+ unsigned int nents = 0;
+ unsigned long addr;
++ size_t data_size;
+ int i, j;
+
+ /*
+@@ -2158,17 +2161,21 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+ */
+ for (i = 0; i < num_rqst; i++) {
++ data_size = iov_iter_count(&rqst[i].rq_iter);
++
+ /* We really don't want a mixture of pinned and unpinned pages
+ * in the sglist. It's hard to keep track of which is what.
+ * Instead, we convert to a BVEC-type iterator higher up.
+ */
+- if (WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
++ if (data_size &&
++ WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
+ return -EIO;
+
+ /* We also don't want to have any extra refs or pins to clean
+ * up in the sglist.
+ */
+- if (WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
++ if (data_size &&
++ WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
+ return -EIO;
+
+ for (j = 0; j < rqst[i].rq_nvec; j++) {
+@@ -2184,7 +2191,8 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ }
+ skip = 0;
+ }
+- nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
++ if (data_size)
++ nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
+ }
+ nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
+ return nents;
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index e17222fec9d29..a75220db5c1e1 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -2570,7 +2570,7 @@ typedef struct {
+
+
+ struct win_dev {
+- unsigned char type[8]; /* IntxCHR or IntxBLK */
++ unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
+ __le64 major;
+ __le64 minor;
+ } __attribute__((packed));
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 0c37eefa18a57..8e53abcfc5ec4 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -81,7 +81,7 @@ extern char *cifs_build_path_to_root(struct smb3_fs_context *ctx,
+ extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
+ char *cifs_build_devname(char *nodename, const char *prepath);
+ extern void delete_mid(struct mid_q_entry *mid);
+-extern void release_mid(struct mid_q_entry *mid);
++void __release_mid(struct kref *refcount);
+ extern void cifs_wake_up_task(struct mid_q_entry *mid);
+ extern int cifs_handle_standard(struct TCP_Server_Info *server,
+ struct mid_q_entry *mid);
+@@ -610,7 +610,7 @@ void cifs_free_hash(struct shash_desc **sdesc);
+
+ struct cifs_chan *
+ cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server);
+-int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses);
++int cifs_try_adding_channels(struct cifs_ses *ses);
+ bool is_server_using_iface(struct TCP_Server_Info *server,
+ struct cifs_server_iface *iface);
+ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
+@@ -740,4 +740,9 @@ static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
+ return true;
+ }
+
++static inline void release_mid(struct mid_q_entry *mid)
++{
++ kref_put(&mid->refcount, __release_mid);
++}
++
+ #endif /* _CIFSPROTO_H */
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 7b923e36501b0..d517651d7bcea 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -156,13 +156,14 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+ /* If server is a channel, select the primary channel */
+ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
+
+- spin_lock(&pserver->srv_lock);
++ /* if we need to signal just this channel */
+ if (!all_channels) {
+- pserver->tcpStatus = CifsNeedReconnect;
+- spin_unlock(&pserver->srv_lock);
++ spin_lock(&server->srv_lock);
++ if (server->tcpStatus != CifsExiting)
++ server->tcpStatus = CifsNeedReconnect;
++ spin_unlock(&server->srv_lock);
+ return;
+ }
+- spin_unlock(&pserver->srv_lock);
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+@@ -2033,6 +2034,12 @@ void __cifs_put_smb_ses(struct cifs_ses *ses)
+ }
+ }
+
++ /* we now account for primary channel in iface->refcount */
++ if (ses->chans[0].iface) {
++ kref_put(&ses->chans[0].iface->refcount, release_iface);
++ ses->chans[0].server = NULL;
++ }
++
+ sesInfoFree(ses);
+ cifs_put_tcp_session(server, 0);
+ }
+@@ -3560,7 +3567,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ ctx->prepath = NULL;
+
+ out:
+- cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
++ cifs_try_adding_channels(mnt_ctx.ses);
+ rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+ if (rc)
+ goto error;
+@@ -3849,8 +3856,12 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ spin_unlock(&ses->chan_lock);
+
+- if (!is_binding)
++ if (!is_binding) {
+ ses->ses_status = SES_IN_SETUP;
++
++ /* force iface_list refresh */
++ ses->iface_last_update = 0;
++ }
+ spin_unlock(&ses->ses_lock);
+
+ /* update ses ip_addr only for primary chan */
+diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
+index 81b84151450d2..a8a1d386da656 100644
+--- a/fs/smb/client/dfs.c
++++ b/fs/smb/client/dfs.c
+@@ -263,15 +263,23 @@ out:
+ return rc;
+ }
+
+-/* Resolve UNC hostname in @ctx->source and set ip addr in @ctx->dstaddr */
++/*
++ * If @ctx->dfs_automount, then update @ctx->dstaddr earlier with the DFS root
++ * server from where we'll start following any referrals. Otherwise rely on the
++ * value provided by mount(2) as the user might not have dns_resolver key set up
++ * and therefore failing to upcall to resolve UNC hostname under @ctx->source.
++ */
+ static int update_fs_context_dstaddr(struct smb3_fs_context *ctx)
+ {
+ struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
+- int rc;
++ int rc = 0;
+
+- rc = dns_resolve_server_name_to_ip(ctx->source, addr, NULL);
+- if (!rc)
+- cifs_set_port(addr, ctx->port);
++ if (!ctx->nodfs && ctx->dfs_automount) {
++ rc = dns_resolve_server_name_to_ip(ctx->source, addr, NULL);
++ if (!rc)
++ cifs_set_port(addr, ctx->port);
++ ctx->dfs_automount = false;
++ }
+ return rc;
+ }
+
+diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
+index 9d8d34af02114..cf46916286d02 100644
+--- a/fs/smb/client/fs_context.h
++++ b/fs/smb/client/fs_context.h
+@@ -268,6 +268,7 @@ struct smb3_fs_context {
+ bool witness:1; /* use witness protocol */
+ char *leaf_fullpath;
+ struct cifs_ses *dfs_root_ses;
++ bool dfs_automount:1; /* set for dfs automount only */
+ };
+
+ extern const struct fs_parameter_spec smb3_fs_parameters[];
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index d7c302442c1ec..d6aa5e474d5e7 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -592,6 +592,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
+ cifs_dbg(FYI, "Symlink\n");
+ fattr->cf_mode |= S_IFLNK;
+ fattr->cf_dtype = DT_LNK;
++ } else if (memcmp("LnxFIFO", pbuf, 8) == 0) {
++ cifs_dbg(FYI, "FIFO\n");
++ fattr->cf_mode |= S_IFIFO;
++ fattr->cf_dtype = DT_FIFO;
+ } else {
+ fattr->cf_mode |= S_IFREG; /* file? */
+ fattr->cf_dtype = DT_REG;
+@@ -744,7 +748,7 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
+ case 0: /* SMB1 symlink */
+ case IO_REPARSE_TAG_SYMLINK:
+ case IO_REPARSE_TAG_NFS:
+- fattr->cf_mode = S_IFLNK;
++ fattr->cf_mode = S_IFLNK | cifs_sb->ctx->file_mode;
+ fattr->cf_dtype = DT_LNK;
+ break;
+ default:
+@@ -819,6 +823,8 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
+
+ out_reparse:
+ if (S_ISLNK(fattr->cf_mode)) {
++ if (likely(data->symlink_target))
++ fattr->cf_eof = strnlen(data->symlink_target, PATH_MAX);
+ fattr->cf_symlink_target = data->symlink_target;
+ data->symlink_target = NULL;
+ }
+diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
+index f7160003e0ed9..73ededa8eba5c 100644
+--- a/fs/smb/client/ioctl.c
++++ b/fs/smb/client/ioctl.c
+@@ -117,6 +117,20 @@ out_drop_write:
+ return rc;
+ }
+
++static long smb_mnt_get_tcon_info(struct cifs_tcon *tcon, void __user *arg)
++{
++ int rc = 0;
++ struct smb_mnt_tcon_info tcon_inf;
++
++ tcon_inf.tid = tcon->tid;
++ tcon_inf.session_id = tcon->ses->Suid;
++
++ if (copy_to_user(arg, &tcon_inf, sizeof(struct smb_mnt_tcon_info)))
++ rc = -EFAULT;
++
++ return rc;
++}
++
+ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
+ void __user *arg)
+ {
+@@ -414,6 +428,17 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
+ tcon = tlink_tcon(pSMBFile->tlink);
+ rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
+ break;
++ case CIFS_IOC_GET_TCON_INFO:
++ cifs_sb = CIFS_SB(inode->i_sb);
++ tlink = cifs_sb_tlink(cifs_sb);
++ if (IS_ERR(tlink)) {
++ rc = PTR_ERR(tlink);
++ break;
++ }
++ tcon = tlink_tcon(tlink);
++ rc = smb_mnt_get_tcon_info(tcon, (void __user *)arg);
++ cifs_put_tlink(tlink);
++ break;
+ case CIFS_ENUMERATE_SNAPSHOTS:
+ if (pSMBFile == NULL)
+ break;
+diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
+index c8f5ed8a69f1c..a6968573b775e 100644
+--- a/fs/smb/client/namespace.c
++++ b/fs/smb/client/namespace.c
+@@ -117,6 +117,18 @@ cifs_build_devname(char *nodename, const char *prepath)
+ return dev;
+ }
+
++static bool is_dfs_mount(struct dentry *dentry)
++{
++ struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
++ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++ bool ret;
++
++ spin_lock(&tcon->tc_lock);
++ ret = !!tcon->origin_fullpath;
++ spin_unlock(&tcon->tc_lock);
++ return ret;
++}
++
+ /* Return full path out of a dentry set for automount */
+ static char *automount_fullpath(struct dentry *dentry, void *page)
+ {
+@@ -212,8 +224,9 @@ static struct vfsmount *cifs_do_automount(struct path *path)
+ ctx->source = NULL;
+ goto out;
+ }
+- cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s\n",
+- __func__, ctx->source, ctx->UNC, ctx->prepath);
++ ctx->dfs_automount = is_dfs_mount(mntpt);
++ cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s dfs_automount=%d\n",
++ __func__, ctx->source, ctx->UNC, ctx->prepath, ctx->dfs_automount);
+
+ mnt = fc_mount(fc);
+ out:
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 79f26c560edf8..80050e36f0451 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -24,7 +24,7 @@
+ #include "fs_context.h"
+
+ static int
+-cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
++cifs_ses_add_channel(struct cifs_ses *ses,
+ struct cifs_server_iface *iface);
+
+ bool
+@@ -157,14 +157,16 @@ cifs_chan_is_iface_active(struct cifs_ses *ses,
+ }
+
+ /* returns number of channels added */
+-int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
++int cifs_try_adding_channels(struct cifs_ses *ses)
+ {
+ struct TCP_Server_Info *server = ses->server;
+ int old_chan_count, new_chan_count;
+ int left;
+ int rc = 0;
+ int tries = 0;
++ size_t iface_weight = 0, iface_min_speed = 0;
+ struct cifs_server_iface *iface = NULL, *niface = NULL;
++ struct cifs_server_iface *last_iface = NULL;
+
+ spin_lock(&ses->chan_lock);
+
+@@ -186,28 +188,17 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ }
+
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+- ses->chan_max = 1;
+ spin_unlock(&ses->chan_lock);
+ cifs_server_dbg(VFS, "no multichannel support\n");
+ return 0;
+ }
+ spin_unlock(&ses->chan_lock);
+
+- /*
+- * Keep connecting to same, fastest, iface for all channels as
+- * long as its RSS. Try next fastest one if not RSS or channel
+- * creation fails.
+- */
+- spin_lock(&ses->iface_lock);
+- iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
+- iface_head);
+- spin_unlock(&ses->iface_lock);
+-
+ while (left > 0) {
+
+ tries++;
+ if (tries > 3*ses->chan_max) {
+- cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n",
++ cifs_dbg(VFS, "too many channel open attempts (%d channels left to open)\n",
+ left);
+ break;
+ }
+@@ -215,23 +206,41 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ spin_lock(&ses->iface_lock);
+ if (!ses->iface_count) {
+ spin_unlock(&ses->iface_lock);
++ cifs_dbg(VFS, "server %s does not advertise interfaces\n",
++ ses->server->hostname);
+ break;
+ }
+
++ if (!iface)
++ iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
++ iface_head);
++ last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
++ iface_head);
++ iface_min_speed = last_iface->speed;
++
+ list_for_each_entry_safe_from(iface, niface, &ses->iface_list,
+ iface_head) {
++ /* do not mix rdma and non-rdma interfaces */
++ if (iface->rdma_capable != ses->server->rdma)
++ continue;
++
+ /* skip ifaces that are unusable */
+ if (!iface->is_active ||
+ (is_ses_using_iface(ses, iface) &&
+- !iface->rss_capable)) {
++ !iface->rss_capable))
++ continue;
++
++ /* check if we already allocated enough channels */
++ iface_weight = iface->speed / iface_min_speed;
++
++ if (iface->weight_fulfilled >= iface_weight)
+ continue;
+- }
+
+ /* take ref before unlock */
+ kref_get(&iface->refcount);
+
+ spin_unlock(&ses->iface_lock);
+- rc = cifs_ses_add_channel(cifs_sb, ses, iface);
++ rc = cifs_ses_add_channel(ses, iface);
+ spin_lock(&ses->iface_lock);
+
+ if (rc) {
+@@ -242,10 +251,21 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ continue;
+ }
+
+- cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n",
++ iface->num_channels++;
++ iface->weight_fulfilled++;
++ cifs_dbg(VFS, "successfully opened new channel on iface:%pIS\n",
+ &iface->sockaddr);
+ break;
+ }
++
++ /* reached end of list. reset weight_fulfilled and start over */
++ if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++ list_for_each_entry(iface, &ses->iface_list, iface_head)
++ iface->weight_fulfilled = 0;
++ spin_unlock(&ses->iface_lock);
++ iface = NULL;
++ continue;
++ }
+ spin_unlock(&ses->iface_lock);
+
+ left--;
+@@ -264,8 +284,11 @@ int
+ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ {
+ unsigned int chan_index;
++ size_t iface_weight = 0, iface_min_speed = 0;
+ struct cifs_server_iface *iface = NULL;
+ struct cifs_server_iface *old_iface = NULL;
++ struct cifs_server_iface *last_iface = NULL;
++ struct sockaddr_storage ss;
+ int rc = 0;
+
+ spin_lock(&ses->chan_lock);
+@@ -284,14 +307,49 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ }
+ spin_unlock(&ses->chan_lock);
+
++ spin_lock(&server->srv_lock);
++ ss = server->dstaddr;
++ spin_unlock(&server->srv_lock);
++
+ spin_lock(&ses->iface_lock);
++ if (!ses->iface_count) {
++ spin_unlock(&ses->iface_lock);
++ cifs_dbg(VFS, "server %s does not advertise interfaces\n", ses->server->hostname);
++ return 0;
++ }
++
++ last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
++ iface_head);
++ iface_min_speed = last_iface->speed;
++
+ /* then look for a new one */
+ list_for_each_entry(iface, &ses->iface_list, iface_head) {
++ if (!chan_index) {
++ /* if we're trying to get the updated iface for primary channel */
++ if (!cifs_match_ipaddr((struct sockaddr *) &ss,
++ (struct sockaddr *) &iface->sockaddr))
++ continue;
++
++ kref_get(&iface->refcount);
++ break;
++ }
++
++ /* do not mix rdma and non-rdma interfaces */
++ if (iface->rdma_capable != server->rdma)
++ continue;
++
+ if (!iface->is_active ||
+ (is_ses_using_iface(ses, iface) &&
+ !iface->rss_capable)) {
+ continue;
+ }
++
++ /* check if we already allocated enough channels */
++ iface_weight = iface->speed / iface_min_speed;
++
++ if (iface->weight_fulfilled >= iface_weight)
++ continue;
++
+ kref_get(&iface->refcount);
+ break;
+ }
+@@ -302,16 +360,41 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ cifs_dbg(FYI, "unable to find a suitable iface\n");
+ }
+
++ if (!chan_index && !iface) {
++ cifs_dbg(FYI, "unable to get the interface matching: %pIS\n",
++ &ss);
++ spin_unlock(&ses->iface_lock);
++ return 0;
++ }
++
+ /* now drop the ref to the current iface */
+ if (old_iface && iface) {
+ cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
+ &old_iface->sockaddr,
+ &iface->sockaddr);
++
++ old_iface->num_channels--;
++ if (old_iface->weight_fulfilled)
++ old_iface->weight_fulfilled--;
++ iface->num_channels++;
++ iface->weight_fulfilled++;
++
+ kref_put(&old_iface->refcount, release_iface);
+ } else if (old_iface) {
+ cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
+ &old_iface->sockaddr);
++
++ old_iface->num_channels--;
++ if (old_iface->weight_fulfilled)
++ old_iface->weight_fulfilled--;
++
+ kref_put(&old_iface->refcount, release_iface);
++ } else if (!chan_index) {
++ /* special case: update interface for primary channel */
++ cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
++ &iface->sockaddr);
++ iface->num_channels++;
++ iface->weight_fulfilled++;
+ } else {
+ WARN_ON(!iface);
+ cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
+@@ -355,7 +438,7 @@ cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ }
+
+ static int
+-cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
++cifs_ses_add_channel(struct cifs_ses *ses,
+ struct cifs_server_iface *iface)
+ {
+ struct TCP_Server_Info *chan_server;
+@@ -434,7 +517,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+ * This will be used for encoding/decoding user/domain/pw
+ * during sess setup auth.
+ */
+- ctx->local_nls = cifs_sb->local_nls;
++ ctx->local_nls = ses->local_nls;
+
+ /* Use RDMA if possible */
+ ctx->rdma = iface->rdma_capable;
+@@ -480,7 +563,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+
+ rc = cifs_negotiate_protocol(xid, ses, chan->server);
+ if (!rc)
+- rc = cifs_setup_session(xid, ses, chan->server, cifs_sb->local_nls);
++ rc = cifs_setup_session(xid, ses, chan->server, ses->local_nls);
+
+ mutex_unlock(&ses->session_mutex);
+
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+index 25f7cd6f23d64..32dfa0f7a78c3 100644
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -787,7 +787,7 @@ __smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
+ {
+ struct close_cancelled_open *cancelled;
+
+- cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
++ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ if (!cancelled)
+ return -ENOMEM;
+
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 9aeecee6b91b3..b2a60aa6564fd 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -756,6 +756,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ unsigned int ret_data_len = 0;
+ struct network_interface_info_ioctl_rsp *out_buf = NULL;
+ struct cifs_ses *ses = tcon->ses;
++ struct TCP_Server_Info *pserver;
+
+ /* do not query too frequently */
+ if (ses->iface_last_update &&
+@@ -780,6 +781,11 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ if (rc)
+ goto out;
+
++ /* check if iface is still active */
++ pserver = ses->chans[0].server;
++ if (pserver && !cifs_chan_is_iface_active(ses, pserver))
++ cifs_chan_update_iface(ses, pserver);
++
+ out:
+ kfree(out_buf);
+ return rc;
+@@ -3299,6 +3305,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ struct inode *inode = file_inode(file);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifsFileInfo *cfile = file->private_data;
++ unsigned long long new_size;
+ long rc;
+ unsigned int xid;
+ __le64 eof;
+@@ -3329,10 +3336,15 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ /*
+ * do we also need to change the size of the file?
+ */
+- if (keep_size == false && i_size_read(inode) < offset + len) {
+- eof = cpu_to_le64(offset + len);
++ new_size = offset + len;
++ if (keep_size == false && (unsigned long long)i_size_read(inode) < new_size) {
++ eof = cpu_to_le64(new_size);
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, cfile->pid, &eof);
++ if (rc >= 0) {
++ truncate_setsize(inode, new_size);
++ fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
++ }
+ }
+
+ zero_range_exit:
+@@ -3727,6 +3739,9 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
+ if (rc < 0)
+ goto out_2;
+
++ truncate_setsize(inode, old_eof + len);
++ fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
++
+ rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
+ if (rc < 0)
+ goto out_2;
+@@ -5087,7 +5102,7 @@ smb2_make_node(unsigned int xid, struct inode *inode,
+ * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
+ */
+
+- if (!S_ISCHR(mode) && !S_ISBLK(mode))
++ if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode))
+ return rc;
+
+ cifs_dbg(FYI, "sfu compat create special file\n");
+@@ -5135,6 +5150,12 @@ smb2_make_node(unsigned int xid, struct inode *inode,
+ pdev->minor = cpu_to_le64(MINOR(dev));
+ rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ &bytes_written, iov, 1);
++ } else if (S_ISFIFO(mode)) {
++ memcpy(pdev->type, "LnxFIFO", 8);
++ pdev->major = 0;
++ pdev->minor = 0;
++ rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
++ &bytes_written, iov, 1);
+ }
+ tcon->ses->server->ops->close(xid, tcon, &fid);
+ d_drop(dentry);
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index 23c50ed7d4b59..a136fc4cc2b5f 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -452,6 +452,8 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ ptriplet->encryption.context,
+ ses->smb3encryptionkey,
+ SMB3_ENC_DEC_KEY_SIZE);
++ if (rc)
++ return rc;
+ rc = generate_key(ses, ptriplet->decryption.label,
+ ptriplet->decryption.context,
+ ses->smb3decryptionkey,
+@@ -460,9 +462,6 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ return rc;
+ }
+
+- if (rc)
+- return rc;
+-
+ #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+ cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__);
+ /*
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index 14710afdc2a36..d553b7a54621b 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -76,7 +76,7 @@ alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
+ return temp;
+ }
+
+-static void __release_mid(struct kref *refcount)
++void __release_mid(struct kref *refcount)
+ {
+ struct mid_q_entry *midEntry =
+ container_of(refcount, struct mid_q_entry, refcount);
+@@ -156,15 +156,6 @@ static void __release_mid(struct kref *refcount)
+ mempool_free(midEntry, cifs_mid_poolp);
+ }
+
+-void release_mid(struct mid_q_entry *mid)
+-{
+- struct TCP_Server_Info *server = mid->server;
+-
+- spin_lock(&server->mid_lock);
+- kref_put(&mid->refcount, __release_mid);
+- spin_unlock(&server->mid_lock);
+-}
+-
+ void
+ delete_mid(struct mid_q_entry *mid)
+ {
+diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
+index 4ad5531686d81..c2bf829310bee 100644
+--- a/fs/smb/client/xattr.c
++++ b/fs/smb/client/xattr.c
+@@ -150,10 +150,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ goto out;
+
+- if (pTcon->ses->server->ops->set_EA)
++ if (pTcon->ses->server->ops->set_EA) {
+ rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
+ full_path, name, value, (__u16)size,
+ cifs_sb->local_nls, cifs_sb);
++ if (rc == 0)
++ inode_set_ctime_current(inode);
++ }
+ break;
+
+ case XATTR_CIFS_ACL:
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 93262ca3f58a7..269fbfb3cd678 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2380,7 +2380,8 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+ rc = 0;
+ } else {
+ rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value,
+- le16_to_cpu(eabuf->EaValueLength), 0);
++ le16_to_cpu(eabuf->EaValueLength),
++ 0, true);
+ if (rc < 0) {
+ ksmbd_debug(SMB,
+ "ksmbd_vfs_setxattr is failed(%d)\n",
+@@ -2443,7 +2444,7 @@ static noinline int smb2_set_stream_name_xattr(const struct path *path,
+ return -EBADF;
+ }
+
+- rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0);
++ rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0, false);
+ if (rc < 0)
+ pr_err("Failed to store XATTR stream name :%d\n", rc);
+ return 0;
+@@ -2518,7 +2519,7 @@ static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *
+ da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+ XATTR_DOSINFO_ITIME;
+
+- rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da);
++ rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, false);
+ if (rc)
+ ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
+ }
+@@ -2608,7 +2609,7 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
+ sizeof(struct create_sd_buf_req))
+ return -EINVAL;
+ return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
+- le32_to_cpu(sd_buf->ccontext.DataLength), true);
++ le32_to_cpu(sd_buf->ccontext.DataLength), true, false);
+ }
+
+ static void ksmbd_acls_fattr(struct smb_fattr *fattr,
+@@ -3152,7 +3153,8 @@ int smb2_open(struct ksmbd_work *work)
+ idmap,
+ &path,
+ pntsd,
+- pntsd_size);
++ pntsd_size,
++ false);
+ kfree(pntsd);
+ if (rc)
+ pr_err("failed to store ntacl in xattr : %d\n",
+@@ -3228,12 +3230,6 @@ int smb2_open(struct ksmbd_work *work)
+ if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)
+ ksmbd_fd_set_delete_on_close(fp, file_info);
+
+- if (need_truncate) {
+- rc = smb2_create_truncate(&path);
+- if (rc)
+- goto err_out;
+- }
+-
+ if (req->CreateContextsOffset) {
+ struct create_alloc_size_req *az_req;
+
+@@ -3398,11 +3394,12 @@ int smb2_open(struct ksmbd_work *work)
+ }
+
+ err_out:
+- if (file_present || created) {
+- inode_unlock(d_inode(parent_path.dentry));
+- path_put(&path);
+- path_put(&parent_path);
+- }
++ if (file_present || created)
++ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
++
++ if (fp && need_truncate)
++ rc = smb2_create_truncate(&fp->filp->f_path);
++
+ ksmbd_revert_fsids(work);
+ err_out1:
+ if (!rc) {
+@@ -5537,7 +5534,7 @@ static int smb2_rename(struct ksmbd_work *work,
+ rc = ksmbd_vfs_setxattr(file_mnt_idmap(fp->filp),
+ &fp->filp->f_path,
+ xattr_stream_name,
+- NULL, 0, 0);
++ NULL, 0, 0, true);
+ if (rc < 0) {
+ pr_err("failed to store stream name in xattr: %d\n",
+ rc);
+@@ -5630,11 +5627,9 @@ static int smb2_create_link(struct ksmbd_work *work,
+ if (rc)
+ rc = -EINVAL;
+ out:
+- if (file_present) {
+- inode_unlock(d_inode(parent_path.dentry));
+- path_put(&path);
+- path_put(&parent_path);
+- }
++ if (file_present)
++ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
++
+ if (!IS_ERR(link_name))
+ kfree(link_name);
+ kfree(pathname);
+@@ -5701,7 +5696,8 @@ static int set_file_basic_info(struct ksmbd_file *fp,
+ da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+ XATTR_DOSINFO_ITIME;
+
+- rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da);
++ rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da,
++ true);
+ if (rc)
+ ksmbd_debug(SMB,
+ "failed to restore file attribute in EA\n");
+@@ -6013,7 +6009,7 @@ static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info,
+ fp->saccess |= FILE_SHARE_DELETE_LE;
+
+ return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd,
+- buf_len, false);
++ buf_len, false, true);
+ }
+
+ /**
+@@ -7582,7 +7578,8 @@ static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
+
+ da.attr = le32_to_cpu(fp->f_ci->m_fattr);
+ ret = ksmbd_vfs_set_dos_attrib_xattr(idmap,
+- &fp->filp->f_path, &da);
++ &fp->filp->f_path,
++ &da, true);
+ if (ret)
+ fp->f_ci->m_fattr = old_fattr;
+ }
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index e6ba1e9b8589a..6691ae68af0c0 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -366,11 +366,22 @@ static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
+ return 0;
+ }
+
++/**
++ * set_smb1_rsp_status() - set error type in smb response header
++ * @work: smb work containing smb response header
++ * @err: error code to set in response
++ */
++static void set_smb1_rsp_status(struct ksmbd_work *work, __le32 err)
++{
++ work->send_no_response = 1;
++}
++
+ static struct smb_version_ops smb1_server_ops = {
+ .get_cmd_val = get_smb1_cmd_val,
+ .init_rsp_hdr = init_smb1_rsp_hdr,
+ .allocate_rsp_buf = smb1_allocate_rsp_buf,
+ .check_user_session = smb1_check_user_session,
++ .set_rsp_status = set_smb1_rsp_status,
+ };
+
+ static int smb1_negotiate(struct ksmbd_work *work)
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index 6c0305be895e5..1164365533f08 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -1107,6 +1107,7 @@ pass:
+ struct smb_acl *pdacl;
+ struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
+ int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
++ int pntsd_alloc_size;
+
+ if (parent_pntsd->osidoffset) {
+ powner_sid = (struct smb_sid *)((char *)parent_pntsd +
+@@ -1119,9 +1120,10 @@ pass:
+ pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
+ }
+
+- pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
+- pgroup_sid_size + sizeof(struct smb_acl) +
+- nt_size, GFP_KERNEL);
++ pntsd_alloc_size = sizeof(struct smb_ntsd) + powner_sid_size +
++ pgroup_sid_size + sizeof(struct smb_acl) + nt_size;
++
++ pntsd = kzalloc(pntsd_alloc_size, GFP_KERNEL);
+ if (!pntsd) {
+ rc = -ENOMEM;
+ goto free_aces_base;
+@@ -1136,6 +1138,27 @@ pass:
+ pntsd->gsidoffset = parent_pntsd->gsidoffset;
+ pntsd->dacloffset = parent_pntsd->dacloffset;
+
++ if ((u64)le32_to_cpu(pntsd->osidoffset) + powner_sid_size >
++ pntsd_alloc_size) {
++ rc = -EINVAL;
++ kfree(pntsd);
++ goto free_aces_base;
++ }
++
++ if ((u64)le32_to_cpu(pntsd->gsidoffset) + pgroup_sid_size >
++ pntsd_alloc_size) {
++ rc = -EINVAL;
++ kfree(pntsd);
++ goto free_aces_base;
++ }
++
++ if ((u64)le32_to_cpu(pntsd->dacloffset) + sizeof(struct smb_acl) + nt_size >
++ pntsd_alloc_size) {
++ rc = -EINVAL;
++ kfree(pntsd);
++ goto free_aces_base;
++ }
++
+ if (pntsd->osidoffset) {
+ struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->osidoffset));
+@@ -1162,7 +1185,7 @@ pass:
+ pntsd_size += sizeof(struct smb_acl) + nt_size;
+ }
+
+- ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size);
++ ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size, false);
+ kfree(pntsd);
+ }
+
+@@ -1354,7 +1377,7 @@ err_out:
+
+ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+ const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
+- bool type_check)
++ bool type_check, bool get_write)
+ {
+ int rc;
+ struct smb_fattr fattr = {{0}};
+@@ -1414,7 +1437,8 @@ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+ if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) {
+ /* Update WinACL in xattr */
+ ksmbd_vfs_remove_sd_xattrs(idmap, path);
+- ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len);
++ ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len,
++ get_write);
+ }
+
+ out:
+diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h
+index 49a8c292bd2e8..2b52861707d8c 100644
+--- a/fs/smb/server/smbacl.h
++++ b/fs/smb/server/smbacl.h
+@@ -207,7 +207,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+ __le32 *pdaccess, int uid);
+ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+ const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
+- bool type_check);
++ bool type_check, bool get_write);
+ void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
+ void ksmbd_init_domain(u32 *sub_auth);
+
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index b5a5e50fc9ca3..5a41c0b4e9335 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -97,6 +97,13 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
+ return -ENOENT;
+ }
+
++ err = mnt_want_write(parent_path->mnt);
++ if (err) {
++ path_put(parent_path);
++ putname(filename);
++ return -ENOENT;
++ }
++
+ inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT);
+ d = lookup_one_qstr_excl(&last, parent_path->dentry, 0);
+ if (IS_ERR(d))
+@@ -123,6 +130,7 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
+
+ err_out:
+ inode_unlock(d_inode(parent_path->dentry));
++ mnt_drop_write(parent_path->mnt);
+ path_put(parent_path);
+ putname(filename);
+ return -ENOENT;
+@@ -173,10 +181,6 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+ return err;
+ }
+
+- err = mnt_want_write(path.mnt);
+- if (err)
+- goto out_err;
+-
+ mode |= S_IFREG;
+ err = vfs_create(mnt_idmap(path.mnt), d_inode(path.dentry),
+ dentry, mode, true);
+@@ -186,9 +190,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+ } else {
+ pr_err("File(%s): creation failed (err:%d)\n", name, err);
+ }
+- mnt_drop_write(path.mnt);
+
+-out_err:
+ done_path_create(&path, dentry);
+ return err;
+ }
+@@ -219,10 +221,6 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+ return err;
+ }
+
+- err = mnt_want_write(path.mnt);
+- if (err)
+- goto out_err2;
+-
+ idmap = mnt_idmap(path.mnt);
+ mode |= S_IFDIR;
+ err = vfs_mkdir(idmap, d_inode(path.dentry), dentry, mode);
+@@ -233,21 +231,19 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+ dentry->d_name.len);
+ if (IS_ERR(d)) {
+ err = PTR_ERR(d);
+- goto out_err1;
++ goto out_err;
+ }
+ if (unlikely(d_is_negative(d))) {
+ dput(d);
+ err = -ENOENT;
+- goto out_err1;
++ goto out_err;
+ }
+
+ ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
+ dput(d);
+ }
+
+-out_err1:
+- mnt_drop_write(path.mnt);
+-out_err2:
++out_err:
+ done_path_create(&path, dentry);
+ if (err)
+ pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
+@@ -463,7 +459,8 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+ fp->stream.name,
+ (void *)stream_buf,
+ size,
+- 0);
++ 0,
++ true);
+ if (err < 0)
+ goto out;
+
+@@ -605,10 +602,6 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
+ goto out_err;
+ }
+
+- err = mnt_want_write(path->mnt);
+- if (err)
+- goto out_err;
+-
+ idmap = mnt_idmap(path->mnt);
+ if (S_ISDIR(d_inode(path->dentry)->i_mode)) {
+ err = vfs_rmdir(idmap, d_inode(parent), path->dentry);
+@@ -619,7 +612,6 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
+ if (err)
+ ksmbd_debug(VFS, "unlink failed, err %d\n", err);
+ }
+- mnt_drop_write(path->mnt);
+
+ out_err:
+ ksmbd_revert_fsids(work);
+@@ -665,16 +657,11 @@ int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
+ goto out3;
+ }
+
+- err = mnt_want_write(newpath.mnt);
+- if (err)
+- goto out3;
+-
+ err = vfs_link(oldpath.dentry, mnt_idmap(newpath.mnt),
+ d_inode(newpath.dentry),
+ dentry, NULL);
+ if (err)
+ ksmbd_debug(VFS, "vfs_link failed err %d\n", err);
+- mnt_drop_write(newpath.mnt);
+
+ out3:
+ done_path_create(&newpath, dentry);
+@@ -924,18 +911,22 @@ ssize_t ksmbd_vfs_getxattr(struct mnt_idmap *idmap,
+ * @attr_value: xattr value to set
+ * @attr_size: size of xattr value
+ * @flags: destination buffer length
++ * @get_write: get write access to a mount
+ *
+ * Return: 0 on success, otherwise error
+ */
+ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
+ const struct path *path, const char *attr_name,
+- void *attr_value, size_t attr_size, int flags)
++ void *attr_value, size_t attr_size, int flags,
++ bool get_write)
+ {
+ int err;
+
+- err = mnt_want_write(path->mnt);
+- if (err)
+- return err;
++ if (get_write == true) {
++ err = mnt_want_write(path->mnt);
++ if (err)
++ return err;
++ }
+
+ err = vfs_setxattr(idmap,
+ path->dentry,
+@@ -945,7 +936,8 @@ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
+ flags);
+ if (err)
+ ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
+- mnt_drop_write(path->mnt);
++ if (get_write == true)
++ mnt_drop_write(path->mnt);
+ return err;
+ }
+
+@@ -1268,6 +1260,13 @@ out1:
+ }
+
+ if (!err) {
++ err = mnt_want_write(parent_path->mnt);
++ if (err) {
++ path_put(path);
++ path_put(parent_path);
++ return err;
++ }
++
+ err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
+ if (err) {
+ path_put(path);
+@@ -1277,6 +1276,14 @@ out1:
+ return err;
+ }
+
++void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path)
++{
++ inode_unlock(d_inode(parent_path->dentry));
++ mnt_drop_write(parent_path->mnt);
++ path_put(path);
++ path_put(parent_path);
++}
++
+ struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ const char *name,
+ unsigned int flags,
+@@ -1431,7 +1438,8 @@ out:
+ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+ struct mnt_idmap *idmap,
+ const struct path *path,
+- struct smb_ntsd *pntsd, int len)
++ struct smb_ntsd *pntsd, int len,
++ bool get_write)
+ {
+ int rc;
+ struct ndr sd_ndr = {0}, acl_ndr = {0};
+@@ -1491,7 +1499,7 @@ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+
+ rc = ksmbd_vfs_setxattr(idmap, path,
+ XATTR_NAME_SD, sd_ndr.data,
+- sd_ndr.offset, 0);
++ sd_ndr.offset, 0, get_write);
+ if (rc < 0)
+ pr_err("Failed to store XATTR ntacl :%d\n", rc);
+
+@@ -1580,7 +1588,8 @@ free_n_data:
+
+ int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
+ const struct path *path,
+- struct xattr_dos_attrib *da)
++ struct xattr_dos_attrib *da,
++ bool get_write)
+ {
+ struct ndr n;
+ int err;
+@@ -1590,7 +1599,7 @@ int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
+ return err;
+
+ err = ksmbd_vfs_setxattr(idmap, path, XATTR_NAME_DOS_ATTRIBUTE,
+- (void *)n.data, n.offset, 0);
++ (void *)n.data, n.offset, 0, get_write);
+ if (err)
+ ksmbd_debug(SMB, "failed to store dos attribute in xattr\n");
+ kfree(n.data);
+@@ -1862,10 +1871,6 @@ int ksmbd_vfs_set_init_posix_acl(struct mnt_idmap *idmap,
+ }
+ posix_state_to_acl(&acl_state, acls->a_entries);
+
+- rc = mnt_want_write(path->mnt);
+- if (rc)
+- goto out_err;
+-
+ rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls);
+ if (rc < 0)
+ ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+@@ -1877,9 +1882,7 @@ int ksmbd_vfs_set_init_posix_acl(struct mnt_idmap *idmap,
+ ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+ rc);
+ }
+- mnt_drop_write(path->mnt);
+
+-out_err:
+ free_acl_state(&acl_state);
+ posix_acl_release(acls);
+ return rc;
+@@ -1909,10 +1912,6 @@ int ksmbd_vfs_inherit_posix_acl(struct mnt_idmap *idmap,
+ }
+ }
+
+- rc = mnt_want_write(path->mnt);
+- if (rc)
+- goto out_err;
+-
+ rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls);
+ if (rc < 0)
+ ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+@@ -1924,9 +1923,7 @@ int ksmbd_vfs_inherit_posix_acl(struct mnt_idmap *idmap,
+ ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+ rc);
+ }
+- mnt_drop_write(path->mnt);
+
+-out_err:
+ posix_acl_release(acls);
+ return rc;
+ }
+diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
+index 00968081856e3..cfe1c8092f230 100644
+--- a/fs/smb/server/vfs.h
++++ b/fs/smb/server/vfs.h
+@@ -109,7 +109,8 @@ ssize_t ksmbd_vfs_casexattr_len(struct mnt_idmap *idmap,
+ int attr_name_len);
+ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
+ const struct path *path, const char *attr_name,
+- void *attr_value, size_t attr_size, int flags);
++ void *attr_value, size_t attr_size, int flags,
++ bool get_write);
+ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
+ size_t *xattr_stream_name_size, int s_type);
+ int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
+@@ -117,6 +118,7 @@ int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
+ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
+ unsigned int flags, struct path *parent_path,
+ struct path *path, bool caseless);
++void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path);
+ struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ const char *name,
+ unsigned int flags,
+@@ -144,14 +146,16 @@ int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path)
+ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+ struct mnt_idmap *idmap,
+ const struct path *path,
+- struct smb_ntsd *pntsd, int len);
++ struct smb_ntsd *pntsd, int len,
++ bool get_write);
+ int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
+ struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ struct smb_ntsd **pntsd);
+ int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
+ const struct path *path,
+- struct xattr_dos_attrib *da);
++ struct xattr_dos_attrib *da,
++ bool get_write);
+ int ksmbd_vfs_get_dos_attrib_xattr(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ struct xattr_dos_attrib *da);
+diff --git a/fs/stat.c b/fs/stat.c
+index d43a5cc1bfa46..5375be5f97ccf 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -133,7 +133,8 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
+ idmap = mnt_idmap(path->mnt);
+ if (inode->i_op->getattr)
+ return inode->i_op->getattr(idmap, path, stat,
+- request_mask, query_flags);
++ request_mask,
++ query_flags | AT_GETATTR_NOSEC);
+
+ generic_fillattr(idmap, request_mask, inode, stat);
+ return 0;
+@@ -166,6 +167,9 @@ int vfs_getattr(const struct path *path, struct kstat *stat,
+ {
+ int retval;
+
++ if (WARN_ON_ONCE(query_flags & AT_GETATTR_NOSEC))
++ return -EPERM;
++
+ retval = security_inode_getattr(path);
+ if (retval)
+ return retval;
+diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
+index 8c8d64e76103e..efbdc47c74dcf 100644
+--- a/fs/tracefs/event_inode.c
++++ b/fs/tracefs/event_inode.c
+@@ -38,7 +38,10 @@ struct eventfs_inode {
+ * @fop: file_operations for file or directory
+ * @iop: inode_operations for file or directory
+ * @data: something that the caller will want to get to later on
++ * @is_freed: Flag set if the eventfs is on its way to be freed
+ * @mode: the permission that the file or directory should have
++ * @uid: saved uid if changed
++ * @gid: saved gid if changed
+ */
+ struct eventfs_file {
+ const char *name;
+@@ -50,22 +53,32 @@ struct eventfs_file {
+ const struct inode_operations *iop;
+ /*
+ * Union - used for deletion
+- * @del_list: list of eventfs_file to delete
++ * @llist: for calling dput() if needed after RCU
+ * @rcu: eventfs_file to delete in RCU
+- * @is_freed: node is freed if one of the above is set
+ */
+ union {
+- struct list_head del_list;
++ struct llist_node llist;
+ struct rcu_head rcu;
+- unsigned long is_freed;
+ };
+ void *data;
+- umode_t mode;
++ unsigned int is_freed:1;
++ unsigned int mode:31;
++ kuid_t uid;
++ kgid_t gid;
+ };
+
+ static DEFINE_MUTEX(eventfs_mutex);
+ DEFINE_STATIC_SRCU(eventfs_srcu);
+
++/* Mode is unsigned short, use the upper bits for flags */
++enum {
++ EVENTFS_SAVE_MODE = BIT(16),
++ EVENTFS_SAVE_UID = BIT(17),
++ EVENTFS_SAVE_GID = BIT(18),
++};
++
++#define EVENTFS_MODE_MASK (EVENTFS_SAVE_MODE - 1)
++
+ static struct dentry *eventfs_root_lookup(struct inode *dir,
+ struct dentry *dentry,
+ unsigned int flags);
+@@ -73,8 +86,53 @@ static int dcache_dir_open_wrapper(struct inode *inode, struct file *file);
+ static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx);
+ static int eventfs_release(struct inode *inode, struct file *file);
+
++static void update_attr(struct eventfs_file *ef, struct iattr *iattr)
++{
++ unsigned int ia_valid = iattr->ia_valid;
++
++ if (ia_valid & ATTR_MODE) {
++ ef->mode = (ef->mode & ~EVENTFS_MODE_MASK) |
++ (iattr->ia_mode & EVENTFS_MODE_MASK) |
++ EVENTFS_SAVE_MODE;
++ }
++ if (ia_valid & ATTR_UID) {
++ ef->mode |= EVENTFS_SAVE_UID;
++ ef->uid = iattr->ia_uid;
++ }
++ if (ia_valid & ATTR_GID) {
++ ef->mode |= EVENTFS_SAVE_GID;
++ ef->gid = iattr->ia_gid;
++ }
++}
++
++static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry,
++ struct iattr *iattr)
++{
++ struct eventfs_file *ef;
++ int ret;
++
++ mutex_lock(&eventfs_mutex);
++ ef = dentry->d_fsdata;
++ if (ef && ef->is_freed) {
++ /* Do not allow changes if the event is about to be removed. */
++ mutex_unlock(&eventfs_mutex);
++ return -ENODEV;
++ }
++
++ ret = simple_setattr(idmap, dentry, iattr);
++ if (!ret && ef)
++ update_attr(ef, iattr);
++ mutex_unlock(&eventfs_mutex);
++ return ret;
++}
++
+ static const struct inode_operations eventfs_root_dir_inode_operations = {
+ .lookup = eventfs_root_lookup,
++ .setattr = eventfs_set_attr,
++};
++
++static const struct inode_operations eventfs_file_inode_operations = {
++ .setattr = eventfs_set_attr,
+ };
+
+ static const struct file_operations eventfs_file_operations = {
+@@ -85,10 +143,20 @@ static const struct file_operations eventfs_file_operations = {
+ .release = eventfs_release,
+ };
+
++static void update_inode_attr(struct inode *inode, struct eventfs_file *ef)
++{
++ inode->i_mode = ef->mode & EVENTFS_MODE_MASK;
++
++ if (ef->mode & EVENTFS_SAVE_UID)
++ inode->i_uid = ef->uid;
++
++ if (ef->mode & EVENTFS_SAVE_GID)
++ inode->i_gid = ef->gid;
++}
++
+ /**
+ * create_file - create a file in the tracefs filesystem
+- * @name: the name of the file to create.
+- * @mode: the permission that the file should have.
++ * @ef: the eventfs_file
+ * @parent: parent dentry for this file.
+ * @data: something that the caller will want to get to later on.
+ * @fop: struct file_operations that should be used for this file.
+@@ -104,7 +172,7 @@ static const struct file_operations eventfs_file_operations = {
+ * If tracefs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ */
+-static struct dentry *create_file(const char *name, umode_t mode,
++static struct dentry *create_file(struct eventfs_file *ef,
+ struct dentry *parent, void *data,
+ const struct file_operations *fop)
+ {
+@@ -112,13 +180,13 @@ static struct dentry *create_file(const char *name, umode_t mode,
+ struct dentry *dentry;
+ struct inode *inode;
+
+- if (!(mode & S_IFMT))
+- mode |= S_IFREG;
++ if (!(ef->mode & S_IFMT))
++ ef->mode |= S_IFREG;
+
+- if (WARN_ON_ONCE(!S_ISREG(mode)))
++ if (WARN_ON_ONCE(!S_ISREG(ef->mode)))
+ return NULL;
+
+- dentry = eventfs_start_creating(name, parent);
++ dentry = eventfs_start_creating(ef->name, parent);
+
+ if (IS_ERR(dentry))
+ return dentry;
+@@ -127,7 +195,10 @@ static struct dentry *create_file(const char *name, umode_t mode,
+ if (unlikely(!inode))
+ return eventfs_failed_creating(dentry);
+
+- inode->i_mode = mode;
++ /* If the user updated the directory's attributes, use them */
++ update_inode_attr(inode, ef);
++
++ inode->i_op = &eventfs_file_inode_operations;
+ inode->i_fop = fop;
+ inode->i_private = data;
+
+@@ -140,7 +211,7 @@ static struct dentry *create_file(const char *name, umode_t mode,
+
+ /**
+ * create_dir - create a dir in the tracefs filesystem
+- * @name: the name of the file to create.
++ * @ei: the eventfs_inode that represents the directory to create
+ * @parent: parent dentry for this file.
+ * @data: something that the caller will want to get to later on.
+ *
+@@ -155,13 +226,14 @@ static struct dentry *create_file(const char *name, umode_t mode,
+ * If tracefs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ */
+-static struct dentry *create_dir(const char *name, struct dentry *parent, void *data)
++static struct dentry *create_dir(struct eventfs_file *ef,
++ struct dentry *parent, void *data)
+ {
+ struct tracefs_inode *ti;
+ struct dentry *dentry;
+ struct inode *inode;
+
+- dentry = eventfs_start_creating(name, parent);
++ dentry = eventfs_start_creating(ef->name, parent);
+ if (IS_ERR(dentry))
+ return dentry;
+
+@@ -169,7 +241,8 @@ static struct dentry *create_dir(const char *name, struct dentry *parent, void *
+ if (unlikely(!inode))
+ return eventfs_failed_creating(dentry);
+
+- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
++ update_inode_attr(inode, ef);
++
+ inode->i_op = &eventfs_root_dir_inode_operations;
+ inode->i_fop = &eventfs_file_operations;
+ inode->i_private = data;
+@@ -184,6 +257,13 @@ static struct dentry *create_dir(const char *name, struct dentry *parent, void *
+ return eventfs_end_creating(dentry);
+ }
+
++static void free_ef(struct eventfs_file *ef)
++{
++ kfree(ef->name);
++ kfree(ef->ei);
++ kfree(ef);
++}
++
+ /**
+ * eventfs_set_ef_status_free - set the ef->status to free
+ * @ti: the tracefs_inode of the dentry
+@@ -194,59 +274,37 @@ static struct dentry *create_dir(const char *name, struct dentry *parent, void *
+ */
+ void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry)
+ {
+- struct tracefs_inode *ti_parent;
+ struct eventfs_inode *ei;
+- struct eventfs_file *ef, *tmp;
++ struct eventfs_file *ef;
+
+ /* The top level events directory may be freed by this */
+ if (unlikely(ti->flags & TRACEFS_EVENT_TOP_INODE)) {
+- LIST_HEAD(ef_del_list);
+-
+ mutex_lock(&eventfs_mutex);
+-
+ ei = ti->private;
+
+- /* Record all the top level files */
+- list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+- lockdep_is_held(&eventfs_mutex)) {
+- list_add_tail(&ef->del_list, &ef_del_list);
+- }
+-
+ /* Nothing should access this, but just in case! */
+ ti->private = NULL;
+-
+ mutex_unlock(&eventfs_mutex);
+
+- /* Now safely free the top level files and their children */
+- list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
+- list_del(&ef->del_list);
+- eventfs_remove(ef);
+- }
+-
+- kfree(ei);
++ ef = dentry->d_fsdata;
++ if (ef)
++ free_ef(ef);
+ return;
+ }
+
+ mutex_lock(&eventfs_mutex);
+
+- ti_parent = get_tracefs(dentry->d_parent->d_inode);
+- if (!ti_parent || !(ti_parent->flags & TRACEFS_EVENT_INODE))
+- goto out;
+-
+ ef = dentry->d_fsdata;
+ if (!ef)
+ goto out;
+
+- /*
+- * If ef was freed, then the LSB bit is set for d_fsdata.
+- * But this should not happen, as it should still have a
+- * ref count that prevents it. Warn in case it does.
+- */
+- if (WARN_ON_ONCE((unsigned long)ef & 1))
+- goto out;
++ if (ef->is_freed) {
++ free_ef(ef);
++ } else {
++ ef->dentry = NULL;
++ }
+
+ dentry->d_fsdata = NULL;
+- ef->dentry = NULL;
+ out:
+ mutex_unlock(&eventfs_mutex);
+ }
+@@ -306,10 +364,9 @@ create_dentry(struct eventfs_file *ef, struct dentry *parent, bool lookup)
+ inode_lock(parent->d_inode);
+
+ if (ef->ei)
+- dentry = create_dir(ef->name, parent, ef->data);
++ dentry = create_dir(ef, parent, ef->data);
+ else
+- dentry = create_file(ef->name, ef->mode, parent,
+- ef->data, ef->fop);
++ dentry = create_file(ef, parent, ef->data, ef->fop);
+
+ if (!lookup)
+ inode_unlock(parent->d_inode);
+@@ -475,6 +532,7 @@ static int dcache_dir_open_wrapper(struct inode *inode, struct file *file)
+ if (d) {
+ struct dentry **tmp;
+
++
+ tmp = krealloc(dentries, sizeof(d) * (cnt + 2), GFP_KERNEL);
+ if (!tmp)
+ break;
+@@ -549,13 +607,14 @@ static struct eventfs_file *eventfs_prepare_ef(const char *name, umode_t mode,
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&ef->ei->e_top_files);
++ ef->mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+ } else {
+ ef->ei = NULL;
++ ef->mode = mode;
+ }
+
+ ef->iop = iop;
+ ef->fop = fop;
+- ef->mode = mode;
+ ef->data = data;
+ return ef;
+ }
+@@ -772,25 +831,64 @@ int eventfs_add_file(const char *name, umode_t mode,
+ return 0;
+ }
+
+-static void free_ef(struct rcu_head *head)
++static LLIST_HEAD(free_list);
++
++static void eventfs_workfn(struct work_struct *work)
++{
++ struct eventfs_file *ef, *tmp;
++ struct llist_node *llnode;
++
++ llnode = llist_del_all(&free_list);
++ llist_for_each_entry_safe(ef, tmp, llnode, llist) {
++ /* This should only get here if it had a dentry */
++ if (!WARN_ON_ONCE(!ef->dentry))
++ dput(ef->dentry);
++ }
++}
++
++static DECLARE_WORK(eventfs_work, eventfs_workfn);
++
++static void free_rcu_ef(struct rcu_head *head)
+ {
+ struct eventfs_file *ef = container_of(head, struct eventfs_file, rcu);
+
+- kfree(ef->name);
+- kfree(ef->ei);
+- kfree(ef);
++ if (ef->dentry) {
++ /* Do not free the ef until all references of dentry are gone */
++ if (llist_add(&ef->llist, &free_list))
++ queue_work(system_unbound_wq, &eventfs_work);
++ return;
++ }
++
++ free_ef(ef);
++}
++
++static void unhook_dentry(struct dentry *dentry)
++{
++ if (!dentry)
++ return;
++ /*
++ * Need to add a reference to the dentry that is expected by
++ * simple_recursive_removal(), which will include a dput().
++ */
++ dget(dentry);
++
++ /*
++ * Also add a reference for the dput() in eventfs_workfn().
++ * That is required as that dput() will free the ei after
++ * the SRCU grace period is over.
++ */
++ dget(dentry);
+ }
+
+ /**
+ * eventfs_remove_rec - remove eventfs dir or file from list
+ * @ef: eventfs_file to be removed.
+- * @head: to create list of eventfs_file to be deleted
+ * @level: to check recursion depth
+ *
+ * The helper function eventfs_remove_rec() is used to clean up and free the
+ * associated data from eventfs for both of the added functions.
+ */
+-static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head, int level)
++static void eventfs_remove_rec(struct eventfs_file *ef, int level)
+ {
+ struct eventfs_file *ef_child;
+
+@@ -810,12 +908,16 @@ static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head,
+ /* search for nested folders or files */
+ list_for_each_entry_srcu(ef_child, &ef->ei->e_top_files, list,
+ lockdep_is_held(&eventfs_mutex)) {
+- eventfs_remove_rec(ef_child, head, level + 1);
++ eventfs_remove_rec(ef_child, level + 1);
+ }
+ }
+
++ ef->is_freed = 1;
++
++ unhook_dentry(ef->dentry);
++
+ list_del_rcu(&ef->list);
+- list_add_tail(&ef->del_list, head);
++ call_srcu(&eventfs_srcu, &ef->rcu, free_rcu_ef);
+ }
+
+ /**
+@@ -826,61 +928,22 @@ static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head,
+ */
+ void eventfs_remove(struct eventfs_file *ef)
+ {
+- struct eventfs_file *tmp;
+- LIST_HEAD(ef_del_list);
+- struct dentry *dentry_list = NULL;
+ struct dentry *dentry;
+
+ if (!ef)
+ return;
+
+ mutex_lock(&eventfs_mutex);
+- eventfs_remove_rec(ef, &ef_del_list, 0);
+- list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
+- if (ef->dentry) {
+- unsigned long ptr = (unsigned long)dentry_list;
+-
+- /* Keep the dentry from being freed yet */
+- dget(ef->dentry);
+-
+- /*
+- * Paranoid: The dget() above should prevent the dentry
+- * from being freed and calling eventfs_set_ef_status_free().
+- * But just in case, set the link list LSB pointer to 1
+- * and have eventfs_set_ef_status_free() check that to
+- * make sure that if it does happen, it will not think
+- * the d_fsdata is an event_file.
+- *
+- * For this to work, no event_file should be allocated
+- * on a odd space, as the ef should always be allocated
+- * to be at least word aligned. Check for that too.
+- */
+- WARN_ON_ONCE(ptr & 1);
+-
+- ef->dentry->d_fsdata = (void *)(ptr | 1);
+- dentry_list = ef->dentry;
+- ef->dentry = NULL;
+- }
+- call_srcu(&eventfs_srcu, &ef->rcu, free_ef);
+- }
++ dentry = ef->dentry;
++ eventfs_remove_rec(ef, 0);
+ mutex_unlock(&eventfs_mutex);
+
+- while (dentry_list) {
+- unsigned long ptr;
+-
+- dentry = dentry_list;
+- ptr = (unsigned long)dentry->d_fsdata & ~1UL;
+- dentry_list = (struct dentry *)ptr;
+- dentry->d_fsdata = NULL;
+- d_invalidate(dentry);
+- mutex_lock(&eventfs_mutex);
+- /* dentry should now have at least a single reference */
+- WARN_ONCE((int)d_count(dentry) < 1,
+- "dentry %p less than one reference (%d) after invalidate\n",
+- dentry, d_count(dentry));
+- mutex_unlock(&eventfs_mutex);
+- dput(dentry);
+- }
++ /*
++ * If any of the ei children has a dentry, then the ei itself
++ * must have a dentry.
++ */
++ if (dentry)
++ simple_recursive_removal(dentry, NULL);
+ }
+
+ /**
+@@ -891,6 +954,8 @@ void eventfs_remove(struct eventfs_file *ef)
+ */
+ void eventfs_remove_events_dir(struct dentry *dentry)
+ {
++ struct eventfs_file *ef_child;
++ struct eventfs_inode *ei;
+ struct tracefs_inode *ti;
+
+ if (!dentry || !dentry->d_inode)
+@@ -900,6 +965,11 @@ void eventfs_remove_events_dir(struct dentry *dentry)
+ if (!ti || !(ti->flags & TRACEFS_EVENT_INODE))
+ return;
+
+- d_invalidate(dentry);
+- dput(dentry);
++ mutex_lock(&eventfs_mutex);
++ ei = ti->private;
++ list_for_each_entry_srcu(ef_child, &ei->e_top_files, list,
++ lockdep_is_held(&eventfs_mutex)) {
++ eventfs_remove_rec(ef_child, 0);
++ }
++ mutex_unlock(&eventfs_mutex);
+ }
+diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c
+index 0e5dba2343ea1..e6609067ef261 100644
+--- a/fs/xfs/xfs_inode_item_recover.c
++++ b/fs/xfs/xfs_inode_item_recover.c
+@@ -369,24 +369,26 @@ xlog_recover_inode_commit_pass2(
+ * superblock flag to determine whether we need to look at di_flushiter
+ * to skip replay when the on disk inode is newer than the log one
+ */
+- if (!xfs_has_v3inodes(mp) &&
+- ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+- /*
+- * Deal with the wrap case, DI_MAX_FLUSH is less
+- * than smaller numbers
+- */
+- if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
+- ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
+- /* do nothing */
+- } else {
+- trace_xfs_log_recover_inode_skip(log, in_f);
+- error = 0;
+- goto out_release;
++ if (!xfs_has_v3inodes(mp)) {
++ if (ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
++ /*
++ * Deal with the wrap case, DI_MAX_FLUSH is less
++ * than smaller numbers
++ */
++ if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
++ ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
++ /* do nothing */
++ } else {
++ trace_xfs_log_recover_inode_skip(log, in_f);
++ error = 0;
++ goto out_release;
++ }
+ }
++
++ /* Take the opportunity to reset the flush iteration count */
++ ldip->di_flushiter = 0;
+ }
+
+- /* Take the opportunity to reset the flush iteration count */
+- ldip->di_flushiter = 0;
+
+ if (unlikely(S_ISREG(ldip->di_mode))) {
+ if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index 254685085c825..0b7eab0ef7d7f 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -539,6 +539,7 @@ int acpi_device_set_power(struct acpi_device *device, int state);
+ int acpi_bus_init_power(struct acpi_device *device);
+ int acpi_device_fix_up_power(struct acpi_device *device);
+ void acpi_device_fix_up_power_extended(struct acpi_device *adev);
++void acpi_device_fix_up_power_children(struct acpi_device *adev);
+ int acpi_bus_update_power(acpi_handle handle, int *state_p);
+ int acpi_device_update_power(struct acpi_device *device, int *state_p);
+ bool acpi_bus_power_manageable(acpi_handle handle);
+diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
+index 3c8bba9f1114a..be1dd4c1a9174 100644
+--- a/include/acpi/ghes.h
++++ b/include/acpi/ghes.h
+@@ -73,8 +73,12 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
+ void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
+
+ struct list_head *ghes_get_devices(void);
++
++void ghes_estatus_pool_region_free(unsigned long addr, u32 size);
+ #else
+ static inline struct list_head *ghes_get_devices(void) { return NULL; }
++
++static inline void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { return; }
+ #endif
+
+ int ghes_estatus_pool_init(unsigned int num_ghes);
+diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
+index 05100e91ecb96..6fc9bb2979e45 100644
+--- a/include/drm/bridge/samsung-dsim.h
++++ b/include/drm/bridge/samsung-dsim.h
+@@ -53,6 +53,7 @@ struct samsung_dsim_driver_data {
+ unsigned int plltmr_reg;
+ unsigned int has_freqband:1;
+ unsigned int has_clklane_stop:1;
++ unsigned int has_broken_fifoctrl_emptyhdr:1;
+ unsigned int num_clks;
+ unsigned int min_freq;
+ unsigned int max_freq;
+diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
+index 446394f846064..6ad02ad9c7b42 100644
+--- a/include/linux/amd-pstate.h
++++ b/include/linux/amd-pstate.h
+@@ -70,6 +70,10 @@ struct amd_cpudata {
+ u32 nominal_perf;
+ u32 lowest_nonlinear_perf;
+ u32 lowest_perf;
++ u32 min_limit_perf;
++ u32 max_limit_perf;
++ u32 min_limit_freq;
++ u32 max_limit_freq;
+
+ u32 max_freq;
+ u32 min_freq;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 49f8b691496c4..392f581af2cee 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -903,10 +903,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
+ aux->ctx_field_size = size;
+ }
+
++static bool bpf_is_ldimm64(const struct bpf_insn *insn)
++{
++ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
++}
++
+ static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+ {
+- return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
+- insn->src_reg == BPF_PSEUDO_FUNC;
++ return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
+ }
+
+ struct bpf_prog_ops {
+@@ -1029,6 +1033,11 @@ struct btf_func_model {
+ */
+ #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
+
++/* Indicate that current trampoline is in a tail call context. Then, it has to
++ * cache and restore tail_call_cnt to avoid infinite tail call loop.
++ */
++#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
++
+ /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
+ * bytes on x86.
+ */
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index ec32ec58c59f7..ace3a4ce2fc98 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -74,7 +74,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *core,
+ unsigned long parent_rate);
+
+ /**
+- * struct clk_duty - Struture encoding the duty cycle ratio of a clock
++ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
+ *
+ * @num: Numerator of the duty cycle ratio
+ * @den: Denominator of the duty cycle ratio
+@@ -129,7 +129,7 @@ struct clk_duty {
+ * @restore_context: Restore the context of the clock after a restoration
+ * of power.
+ *
+- * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
++ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
+ * parent rate is an input parameter. It is up to the caller to
+ * ensure that the prepare_mutex is held across this call. If the
+ * driver cannot figure out a rate for this clock, it must return
+@@ -456,7 +456,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+- * @parent_name: name of clock's parent
++ * @parent_data: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+@@ -471,7 +471,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+- * @parent_name: name of clock's parent
++ * @parent_data: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+@@ -649,7 +649,7 @@ struct clk_div_table {
+ * Clock with an adjustable divider affecting its output frequency. Implements
+ * .recalc_rate, .set_rate and .round_rate
+ *
+- * Flags:
++ * @flags:
+ * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
+ * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
+ * the raw value read from the register, with the value of zero considered
+@@ -1130,11 +1130,12 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ * @mwidth: width of the numerator bit field
+ * @nshift: shift to the denominator bit field
+ * @nwidth: width of the denominator bit field
++ * @approximation: clk driver's callback for calculating the divider clock
+ * @lock: register lock
+ *
+ * Clock with adjustable fractional divider affecting its output frequency.
+ *
+- * Flags:
++ * @flags:
+ * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
+ * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
+ * is set then the numerator and denominator are both the value read
+@@ -1191,7 +1192,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
+ * Clock with an adjustable multiplier affecting its output frequency.
+ * Implements .recalc_rate, .set_rate and .round_rate
+ *
+- * Flags:
++ * @flags:
+ * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
+ * from the register, with 0 being a valid value effectively
+ * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 068f7738be22a..28c1d3d77b70f 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -189,6 +189,7 @@ enum cpuhp_state {
+ /* Must be the last timer callback */
+ CPUHP_AP_DUMMY_TIMER_STARTING,
+ CPUHP_AP_ARM_XEN_STARTING,
++ CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ CPUHP_AP_ARM64_ISNDEP_STARTING,
+diff --git a/include/linux/damon.h b/include/linux/damon.h
+index ae2664d1d5f1d..c70cca8a839f7 100644
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -642,6 +642,13 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+ return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+ }
+
++static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
++{
++ /* {aggr,sample}_interval are unsigned long, hence could overflow */
++ return min(attrs->aggr_interval / attrs->sample_interval,
++ (unsigned long)UINT_MAX);
++}
++
+
+ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
+index ebe78bd3d121d..b3772edca2e6e 100644
+--- a/include/linux/dma-fence.h
++++ b/include/linux/dma-fence.h
+@@ -498,6 +498,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
+ return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
+ }
+
++/**
++ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
++ * @f1: the first fence from the same context
++ * @f2: the second fence from the same context
++ *
++ * Returns true if f1 is chronologically later than f2 or the same fence. Both
++ * fences must be from the same context, since a seqno is not re-used across
++ * contexts.
++ */
++static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
++ struct dma_fence *f2)
++{
++ return f1 == f2 || dma_fence_is_later(f1, f2);
++}
++
+ /**
+ * dma_fence_later - return the chronologically later fence
+ * @f1: the first fence from the same context
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 62b61527bcc4f..1b523fd48586f 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -1045,10 +1045,10 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
+
+ /**
+ * ethtool_sprintf - Write formatted string to ethtool string data
+- * @data: Pointer to start of string to update
++ * @data: Pointer to a pointer to the start of string to update
+ * @fmt: Format of string to write
+ *
+- * Write formatted string to data. Update data to point at start of
++ * Write formatted string to *data. Update *data to point at start of
+ * next string.
+ */
+ extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index a82a4bb6ce68b..cf1adceb02697 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -104,6 +104,7 @@ enum f2fs_error {
+ ERROR_CORRUPTED_VERITY_XATTR,
+ ERROR_CORRUPTED_XATTR,
+ ERROR_INVALID_NODE_REFERENCE,
++ ERROR_INCONSISTENT_NAT,
+ ERROR_MAX,
+ };
+
+diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
+index 107613f7d7920..f6cd0f909d9fb 100644
+--- a/include/linux/generic-radix-tree.h
++++ b/include/linux/generic-radix-tree.h
+@@ -38,6 +38,7 @@
+
+ #include <asm/page.h>
+ #include <linux/bug.h>
++#include <linux/limits.h>
+ #include <linux/log2.h>
+ #include <linux/math.h>
+ #include <linux/types.h>
+@@ -184,6 +185,12 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
+ static inline void __genradix_iter_advance(struct genradix_iter *iter,
+ size_t obj_size)
+ {
++ if (iter->offset + obj_size < iter->offset) {
++ iter->offset = SIZE_MAX;
++ iter->pos = SIZE_MAX;
++ return;
++ }
++
+ iter->offset += obj_size;
+
+ if (!is_power_of_2(obj_size) &&
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 964ca1f15e3f6..3b08a29572298 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -679,6 +679,7 @@ struct hid_device { /* device report descriptor */
+ struct list_head debug_list;
+ spinlock_t debug_list_lock;
+ wait_queue_head_t debug_wait;
++ struct kref ref;
+
+ unsigned int id; /* system unique id */
+
+@@ -687,6 +688,8 @@ struct hid_device { /* device report descriptor */
+ #endif /* CONFIG_BPF */
+ };
+
++void hiddev_free(struct kref *ref);
++
+ #define to_hid_device(pdev) \
+ container_of(pdev, struct hid_device, dev)
+
+diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
+index 39fbfb4be944b..9da4f3f1e6d61 100644
+--- a/include/linux/hisi_acc_qm.h
++++ b/include/linux/hisi_acc_qm.h
+@@ -144,6 +144,13 @@ enum qm_vf_state {
+ QM_NOT_READY,
+ };
+
++enum qm_misc_ctl_bits {
++ QM_DRIVER_REMOVING = 0x0,
++ QM_RST_SCHED,
++ QM_RESETTING,
++ QM_MODULE_PARAM,
++};
++
+ enum qm_cap_bits {
+ QM_SUPPORT_DB_ISOLATION = 0x0,
+ QM_SUPPORT_FUNC_QOS,
+diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
+index 8a3115516a1ba..136e9842120e8 100644
+--- a/include/linux/hw_random.h
++++ b/include/linux/hw_random.h
+@@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng);
+ extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
+
+ extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
++extern long hwrng_yield(struct hwrng *rng);
+
+ #endif /* LINUX_HWRANDOM_H_ */
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index a0dce14090a9e..da5f5fa4a3a6a 100644
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
+ */
+ #define idr_for_each_entry_ul(idr, entry, tmp, id) \
+ for (tmp = 0, id = 0; \
+- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
++ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ tmp = id, ++id)
+
+ /**
+@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
+ * @id: Entry ID.
+ *
+ * Continue to iterate over entries, continuing after the current position.
++ * After normal termination @entry is left with the value NULL. This
++ * is convenient for a "not found" value.
+ */
+ #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
+ for (tmp = id; \
+- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
++ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ tmp = id, ++id)
+
+ /*
+diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
+index 13d19b9be9f4a..5fd664fb71c86 100644
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -327,6 +327,9 @@ struct io_ring_ctx {
+
+ struct list_head io_buffers_cache;
+
++ /* deferred free list, protected by ->uring_lock */
++ struct hlist_head io_buf_list;
++
+ /* Keep this last, we don't need it for the fast path */
+ struct wait_queue_head poll_wq;
+ struct io_restriction restrictions;
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index c50a769d569a6..0225cf7445de2 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -703,6 +703,7 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv)
+ dev->iommu->priv = priv;
+ }
+
++extern struct mutex iommu_probe_device_lock;
+ int iommu_probe_device(struct device *dev);
+
+ int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index d8a6fdce93738..90081afa10ce5 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -215,8 +215,6 @@ struct irq_data {
+ * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
+ * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
+ * IRQD_CAN_RESERVE - Can use reservation mode
+- * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
+- * required
+ * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
+ * from actual interrupt context.
+ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
+@@ -247,11 +245,10 @@ enum {
+ IRQD_SINGLE_TARGET = BIT(24),
+ IRQD_DEFAULT_TRIGGER_SET = BIT(25),
+ IRQD_CAN_RESERVE = BIT(26),
+- IRQD_MSI_NOMASK_QUIRK = BIT(27),
+- IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28),
+- IRQD_AFFINITY_ON_ACTIVATE = BIT(29),
+- IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30),
+- IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31),
++ IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27),
++ IRQD_AFFINITY_ON_ACTIVATE = BIT(28),
++ IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29),
++ IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30),
+ };
+
+ #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+@@ -426,21 +423,6 @@ static inline bool irqd_can_reserve(struct irq_data *d)
+ return __irqd_to_state(d) & IRQD_CAN_RESERVE;
+ }
+
+-static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
+-{
+- __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+-{
+- __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+-{
+- return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+ static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+ {
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index ac962c4cb44b1..2b8d85aae0832 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
+ LSM_HOOK(int, 0, syslog, int type)
+ LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
+ const struct timezone *tz)
+-LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
++LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
+ LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
+ LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+ LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+@@ -273,7 +273,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
+ LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
+ LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
+ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
+-LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
++LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
+ u32 *ctxlen)
+
+ #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
+index 47e7a3a61ce69..e8bcad641d8c2 100644
+--- a/include/linux/mfd/core.h
++++ b/include/linux/mfd/core.h
+@@ -92,7 +92,7 @@ struct mfd_cell {
+ * (above) when matching OF nodes with devices that have identical
+ * compatible strings
+ */
+- const u64 of_reg;
++ u64 of_reg;
+
+ /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
+ bool use_of_reg;
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index daa2f40d9ce65..7b12eebc5586d 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -295,7 +295,9 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+ #define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
+ #define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
++#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
+
++ bool written_flag; /* Indicates eMMC has been written since power on */
+ bool reenable_cmdq; /* Re-enable Command Queue */
+
+ unsigned int erase_size; /* erase size in sectors */
+diff --git a/include/linux/msi.h b/include/linux/msi.h
+index a50ea79522f85..ddace8c34dcf9 100644
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -547,12 +547,6 @@ enum {
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
+ /* Free MSI descriptors */
+ MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
+- /*
+- * Quirk to handle MSI implementations which do not provide
+- * masking. Currently known to affect x86, but has to be partially
+- * handled in the core MSI code.
+- */
+- MSI_FLAG_NOMASK_QUIRK = (1 << 7),
+
+ /* Mask for the generic functionality */
+ MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 0896aaa91dd7b..b8e60a20416ba 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1774,6 +1774,13 @@ enum netdev_ml_priv_type {
+ ML_PRIV_CAN,
+ };
+
++enum netdev_stat_type {
++ NETDEV_PCPU_STAT_NONE,
++ NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
++ NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
++ NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
++};
++
+ /**
+ * struct net_device - The DEVICE structure.
+ *
+@@ -1968,10 +1975,14 @@ enum netdev_ml_priv_type {
+ *
+ * @ml_priv: Mid-layer private
+ * @ml_priv_type: Mid-layer private type
+- * @lstats: Loopback statistics
+- * @tstats: Tunnel statistics
+- * @dstats: Dummy statistics
+- * @vstats: Virtual ethernet statistics
++ *
++ * @pcpu_stat_type: Type of device statistics which the core should
++ * allocate/free: none, lstats, tstats, dstats. none
++ * means the driver is handling statistics allocation/
++ * freeing internally.
++ * @lstats: Loopback statistics: packets, bytes
++ * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes
++ * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes
+ *
+ * @garp_port: GARP
+ * @mrp_port: MRP
+@@ -2328,6 +2339,7 @@ struct net_device {
+ void *ml_priv;
+ enum netdev_ml_priv_type ml_priv_type;
+
++ enum netdev_stat_type pcpu_stat_type:8;
+ union {
+ struct pcpu_lstats __percpu *lstats;
+ struct pcpu_sw_netstats __percpu *tstats;
+@@ -2725,6 +2737,16 @@ struct pcpu_sw_netstats {
+ struct u64_stats_sync syncp;
+ } __aligned(4 * sizeof(u64));
+
++struct pcpu_dstats {
++ u64 rx_packets;
++ u64 rx_bytes;
++ u64 rx_drops;
++ u64 tx_packets;
++ u64 tx_bytes;
++ u64 tx_drops;
++ struct u64_stats_sync syncp;
++} __aligned(8 * sizeof(u64));
++
+ struct pcpu_lstats {
+ u64_stats_t packets;
+ u64_stats_t bytes;
+@@ -5214,5 +5236,6 @@ extern struct net_device *blackhole_netdev;
+ #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
+ #define DEV_STATS_ADD(DEV, FIELD, VAL) \
+ atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
++#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
+
+ #endif /* _LINUX_NETDEVICE_H */
+diff --git a/include/linux/numa.h b/include/linux/numa.h
+index 59df211d051fa..a904861de8000 100644
+--- a/include/linux/numa.h
++++ b/include/linux/numa.h
+@@ -12,6 +12,7 @@
+ #define MAX_NUMNODES (1 << NODES_SHIFT)
+
+ #define NUMA_NO_NODE (-1)
++#define NUMA_NO_MEMBLK (-1)
+
+ /* optionally keep NUMA memory info available post init */
+ #ifdef CONFIG_NUMA_KEEP_MEMINFO
+@@ -25,7 +26,7 @@
+ #include <asm/sparsemem.h>
+
+ /* Generic implementation available */
+-int numa_map_to_online_node(int node);
++int numa_nearest_node(int node, unsigned int state);
+
+ #ifndef memory_add_physaddr_to_nid
+ static inline int memory_add_physaddr_to_nid(u64 start)
+@@ -43,11 +44,18 @@ static inline int phys_to_target_node(u64 start)
+ return 0;
+ }
+ #endif
++#ifndef numa_fill_memblks
++static inline int __init numa_fill_memblks(u64 start, u64 end)
++{
++ return NUMA_NO_MEMBLK;
++}
++#endif
+ #else /* !CONFIG_NUMA */
+-static inline int numa_map_to_online_node(int node)
++static inline int numa_nearest_node(int node, unsigned int state)
+ {
+ return NUMA_NO_NODE;
+ }
++
+ static inline int memory_add_physaddr_to_nid(u64 start)
+ {
+ return 0;
+@@ -58,6 +66,8 @@ static inline int phys_to_target_node(u64 start)
+ }
+ #endif
+
++#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
++
+ #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
+ extern const struct attribute_group arch_node_dev_group;
+ #endif
+diff --git a/include/linux/objtool.h b/include/linux/objtool.h
+index 03f82c2c2ebf6..b5440e7da55bf 100644
+--- a/include/linux/objtool.h
++++ b/include/linux/objtool.h
+@@ -130,7 +130,8 @@
+ * it will be ignored.
+ */
+ .macro VALIDATE_UNRET_BEGIN
+-#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
++#if defined(CONFIG_NOINSTR_VALIDATION) && \
++ (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
+ .Lhere_\@:
+ .pushsection .discard.validate_unret
+ .long .Lhere_\@ - .
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 351c3b7f93a14..8c9608b217b00 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -204,6 +204,8 @@ enum mapping_flags {
+ AS_NO_WRITEBACK_TAGS = 5,
+ AS_LARGE_FOLIO_SUPPORT = 6,
+ AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
++ AS_STABLE_WRITES, /* must wait for writeback before modifying
++ folio contents */
+ };
+
+ /**
+@@ -289,6 +291,21 @@ static inline void mapping_clear_release_always(struct address_space *mapping)
+ clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+ }
+
++static inline bool mapping_stable_writes(const struct address_space *mapping)
++{
++ return test_bit(AS_STABLE_WRITES, &mapping->flags);
++}
++
++static inline void mapping_set_stable_writes(struct address_space *mapping)
++{
++ set_bit(AS_STABLE_WRITES, &mapping->flags);
++}
++
++static inline void mapping_clear_stable_writes(struct address_space *mapping)
++{
++ clear_bit(AS_STABLE_WRITES, &mapping->flags);
++}
++
+ static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+ {
+ return mapping->gfp_mask;
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 8c7c2c3c6c652..b56417276042d 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1624,6 +1624,8 @@ struct msix_entry {
+ u16 entry; /* Driver uses to specify entry, OS writes */
+ };
+
++struct msi_domain_template;
++
+ #ifdef CONFIG_PCI_MSI
+ int pci_msi_vec_count(struct pci_dev *dev);
+ void pci_disable_msi(struct pci_dev *dev);
+@@ -1656,6 +1658,11 @@ void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
+ void pci_free_irq_vectors(struct pci_dev *dev);
+ int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
+ const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
++bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
++ unsigned int hwsize, void *data);
++struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
++ const struct irq_affinity_desc *affdesc);
++void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
+
+ #else
+ static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
+@@ -1719,6 +1726,25 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
+ {
+ return cpu_possible_mask;
+ }
++
++static inline bool pci_create_ims_domain(struct pci_dev *pdev,
++ const struct msi_domain_template *template,
++ unsigned int hwsize, void *data)
++{ return false; }
++
++static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev,
++ union msi_instance_cookie *icookie,
++ const struct irq_affinity_desc *affdesc)
++{
++ struct msi_map map = { .index = -ENOSYS, };
++
++ return map;
++}
++
++static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map)
++{
++}
++
+ #endif
+
+ /**
+@@ -2616,14 +2642,6 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
+ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
+ #endif
+
+-struct msi_domain_template;
+-
+-bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
+- unsigned int hwsize, void *data);
+-struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
+- const struct irq_affinity_desc *affdesc);
+-void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
+-
+ #include <linux/dma-mapping.h>
+
+ #define pci_printk(level, pdev, fmt, arg...) \
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 5fb3d4c393a9e..fe4a3589bb3fd 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -180,6 +180,8 @@
+ #define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
+ #define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
+
++#define PCI_VENDOR_ID_ITTIM 0x0b48
++
+ #define PCI_VENDOR_ID_COMPAQ 0x0e11
+ #define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
+ #define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
+@@ -579,6 +581,7 @@
+ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
+ #define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+ #define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
++#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
+ #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
+ #define PCI_DEVICE_ID_AMD_LANCE 0x2000
+ #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 7b5406e3288d9..e846f87e2d099 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -843,11 +843,11 @@ struct perf_event {
+ };
+
+ /*
+- * ,-----------------------[1:n]----------------------.
+- * V V
+- * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event
+- * ^ ^ | |
+- * `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-'
++ * ,-----------------------[1:n]------------------------.
++ * V V
++ * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
++ * | |
++ * `--[n:1]-> pmu <-[1:n]--'
+ *
+ *
+ * struct perf_event_pmu_context lifetime is refcount based and RCU freed
+@@ -865,6 +865,9 @@ struct perf_event {
+ * ctx->mutex pinning the configuration. Since we hold a reference on
+ * group_leader (through the filedesc) it can't go away, therefore it's
+ * associated pmu_ctx must exist and cannot change due to ctx->mutex.
++ *
++ * perf_event holds a refcount on perf_event_context
++ * perf_event holds a refcount on perf_event_pmu_context
+ */
+ struct perf_event_pmu_context {
+ struct pmu *pmu;
+@@ -879,6 +882,7 @@ struct perf_event_pmu_context {
+ unsigned int embedded : 1;
+
+ unsigned int nr_events;
++ unsigned int nr_cgroups;
+
+ atomic_t refcount; /* event <-> epc */
+ struct rcu_head rcu_head;
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 1400c37b29c75..629c1633bbd00 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -374,24 +374,39 @@ const struct dev_pm_ops name = { \
+ RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+ }
+
+-#ifdef CONFIG_PM
+-#define _EXPORT_DEV_PM_OPS(name, license, ns) \
++#define _EXPORT_PM_OPS(name, license, ns) \
+ const struct dev_pm_ops name; \
+ __EXPORT_SYMBOL(name, license, ns); \
+ const struct dev_pm_ops name
+-#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
+-#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
+-#else
+-#define _EXPORT_DEV_PM_OPS(name, license, ns) \
++
++#define _DISCARD_PM_OPS(name, license, ns) \
+ static __maybe_unused const struct dev_pm_ops __static_##name
++
++#ifdef CONFIG_PM
++#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
++#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
++#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
++#else
++#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
+ #define EXPORT_PM_FN_GPL(name)
+ #define EXPORT_PM_FN_NS_GPL(name, ns)
+ #endif
+
+-#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
+-#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
+-#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
+-#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
++#ifdef CONFIG_PM_SLEEP
++#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
++#else
++#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
++#endif
++
++#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
++#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
++#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
++#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
++
++#define EXPORT_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "", "")
++#define EXPORT_GPL_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", "")
++#define EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "", #ns)
++#define EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", #ns)
+
+ /*
+ * Use this if you want to use the same suspend and resume callbacks for suspend
+@@ -404,19 +419,19 @@ const struct dev_pm_ops name = { \
+ _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
+
+ #define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+- EXPORT_DEV_PM_OPS(name) = { \
++ EXPORT_DEV_SLEEP_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+ #define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+- EXPORT_GPL_DEV_PM_OPS(name) = { \
++ EXPORT_GPL_DEV_SLEEP_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+ #define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+- EXPORT_NS_DEV_PM_OPS(name, ns) = { \
++ EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+ #define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+- EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
++ EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+
+diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
+index a427f13c757f4..85b86768c0b91 100644
+--- a/include/linux/power_supply.h
++++ b/include/linux/power_supply.h
+@@ -767,7 +767,7 @@ struct power_supply_battery_info {
+ int bti_resistance_tolerance;
+ };
+
+-extern struct atomic_notifier_head power_supply_notifier;
++extern struct blocking_notifier_head power_supply_notifier;
+ extern int power_supply_reg_notifier(struct notifier_block *nb);
+ extern void power_supply_unreg_notifier(struct notifier_block *nb);
+ #if IS_ENABLED(CONFIG_POWER_SUPPLY)
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 1424670df161d..9aa6358a1a16b 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -99,14 +99,21 @@ static __always_inline unsigned char interrupt_context_level(void)
+ return level;
+ }
+
++/*
++ * These macro definitions avoid redundant invocations of preempt_count()
++ * because such invocations would result in redundant loads given that
++ * preempt_count() is commonly implemented with READ_ONCE().
++ */
++
+ #define nmi_count() (preempt_count() & NMI_MASK)
+ #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+ #ifdef CONFIG_PREEMPT_RT
+ # define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
++# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
+ #else
+ # define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
+ #endif
+-#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
+
+ /*
+ * Macros to retrieve the current execution context:
+@@ -119,7 +126,11 @@ static __always_inline unsigned char interrupt_context_level(void)
+ #define in_nmi() (nmi_count())
+ #define in_hardirq() (hardirq_count())
+ #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+-#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
++#ifdef CONFIG_PREEMPT_RT
++# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
++#else
++# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
++#endif
+
+ /*
+ * The following macros are deprecated and should not be used in new code:
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index d2f9f690a9c14..fe0f38ce1bdee 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -41,8 +41,8 @@ struct pwm_args {
+ };
+
+ enum {
+- PWMF_REQUESTED = 1 << 0,
+- PWMF_EXPORTED = 1 << 1,
++ PWMF_REQUESTED = 0,
++ PWMF_EXPORTED = 1,
+ };
+
+ /*
+diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
+index 0ee96ea7a0e90..1b37fa8fc723d 100644
+--- a/include/linux/sched/coredump.h
++++ b/include/linux/sched/coredump.h
+@@ -91,4 +91,14 @@ static inline int get_dumpable(struct mm_struct *mm)
+ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK)
+
+ #define MMF_VM_MERGE_ANY 29
++#define MMF_HAS_MDWE_NO_INHERIT 30
++
++static inline unsigned long mmf_init_flags(unsigned long flags)
++{
++ if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
++ flags &= ~((1UL << MMF_HAS_MDWE) |
++ (1UL << MMF_HAS_MDWE_NO_INHERIT));
++ return flags & MMF_INIT_MASK;
++}
++
+ #endif /* _LINUX_SCHED_COREDUMP_H */
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index c1637515a8a41..c953b8c0d2f43 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -106,6 +106,7 @@ struct sk_psock {
+ struct mutex work_mutex;
+ struct sk_psock_work_state work_state;
+ struct delayed_work work;
++ struct sock *sk_pair;
+ struct rcu_work rwork;
+ };
+
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 39b74d83c7c4a..cfcb7e2c3813f 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -383,6 +383,7 @@ struct ucred {
+ #define SOL_MPTCP 284
+ #define SOL_MCTP 285
+ #define SOL_SMC 286
++#define SOL_VSOCK 287
+
+ /* IPX options */
+ #define IPX_TYPE 1
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index 7f8b478fdeb3d..8cc7a99927f95 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -566,6 +566,7 @@ struct spi_controller {
+ #define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */
+ #define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
+ #define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */
++#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
+
+ /* Flag indicating if the allocation of this struct is devres-managed */
+ bool devm_allocated;
+diff --git a/include/linux/string.h b/include/linux/string.h
+index dbfc66400050f..5077776e995e0 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -5,7 +5,9 @@
+ #include <linux/compiler.h> /* for inline */
+ #include <linux/types.h> /* for size_t */
+ #include <linux/stddef.h> /* for NULL */
++#include <linux/err.h> /* for ERR_PTR() */
+ #include <linux/errno.h> /* for E2BIG */
++#include <linux/overflow.h> /* for check_mul_overflow() */
+ #include <linux/stdarg.h>
+ #include <uapi/linux/string.h>
+
+@@ -14,6 +16,44 @@ extern void *memdup_user(const void __user *, size_t);
+ extern void *vmemdup_user(const void __user *, size_t);
+ extern void *memdup_user_nul(const void __user *, size_t);
+
++/**
++ * memdup_array_user - duplicate array from user space
++ * @src: source address in user space
++ * @n: number of array members to copy
++ * @size: size of one array member
++ *
++ * Return: an ERR_PTR() on failure. Result is physically
++ * contiguous, to be freed by kfree().
++ */
++static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
++{
++ size_t nbytes;
++
++ if (check_mul_overflow(n, size, &nbytes))
++ return ERR_PTR(-EOVERFLOW);
++
++ return memdup_user(src, nbytes);
++}
++
++/**
++ * vmemdup_array_user - duplicate array from user space
++ * @src: source address in user space
++ * @n: number of array members to copy
++ * @size: size of one array member
++ *
++ * Return: an ERR_PTR() on failure. Result may be not
++ * physically contiguous. Use kvfree() to free.
++ */
++static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
++{
++ size_t nbytes;
++
++ if (check_mul_overflow(n, size, &nbytes))
++ return ERR_PTR(-EOVERFLOW);
++
++ return vmemdup_user(src, nbytes);
++}
++
+ /*
+ * Include machine specific inline routines
+ */
+@@ -277,10 +317,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ */
+ #define strtomem_pad(dest, src, pad) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
++ const size_t _src_len = __builtin_object_size(src, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+- memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
++ memcpy_and_pad(dest, _dest_len, src, \
++ strnlen(src, min(_src_len, _dest_len)), pad); \
+ } while (0)
+
+ /**
+@@ -298,10 +340,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ */
+ #define strtomem(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
++ const size_t _src_len = __builtin_object_size(src, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+- memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \
++ memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \
+ } while (0)
+
+ /**
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index af7358277f1c3..e9d4377d03c6e 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -92,6 +92,7 @@ struct rpc_clnt {
+ };
+ const struct cred *cl_cred;
+ unsigned int cl_max_connect; /* max number of transports not to the same IP */
++ struct super_block *pipefs_sb;
+ };
+
+ /*
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index 09d7429d67c0e..61b40ea81f4d3 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -242,6 +242,7 @@ extern void __register_sysctl_init(const char *path, struct ctl_table *table,
+ extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
+
+ void do_sysctl_args(void);
++bool sysctl_is_alias(char *param);
+ int do_proc_douintvec(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+@@ -287,6 +288,11 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
+ static inline void do_sysctl_args(void)
+ {
+ }
++
++static inline bool sysctl_is_alias(char *param)
++{
++ return false;
++}
+ #endif /* CONFIG_SYSCTL */
+
+ int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
+diff --git a/include/linux/topology.h b/include/linux/topology.h
+index fea32377f7c77..52f5850730b3e 100644
+--- a/include/linux/topology.h
++++ b/include/linux/topology.h
+@@ -251,7 +251,7 @@ extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int
+ #else
+ static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+ {
+- return cpumask_nth(cpu, cpus);
++ return cpumask_nth_and(cpu, cpus, cpu_online_mask);
+ }
+
+ static inline const struct cpumask *
+diff --git a/include/linux/torture.h b/include/linux/torture.h
+index bb466eec01e42..017f0f710815a 100644
+--- a/include/linux/torture.h
++++ b/include/linux/torture.h
+@@ -81,7 +81,8 @@ static inline void torture_random_init(struct torture_random_state *trsp)
+ }
+
+ /* Definitions for high-resolution-timer sleeps. */
+-int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp);
++int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
++ struct torture_random_state *trsp);
+ int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp);
+ int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp);
+ int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp);
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 21ae37e49319a..cf9f0c61796e1 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -492,6 +492,7 @@ enum {
+ EVENT_FILE_FL_TRIGGER_COND_BIT,
+ EVENT_FILE_FL_PID_FILTER_BIT,
+ EVENT_FILE_FL_WAS_ENABLED_BIT,
++ EVENT_FILE_FL_FREED_BIT,
+ };
+
+ extern struct trace_event_file *trace_get_event_file(const char *instance,
+@@ -630,6 +631,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
+ * TRIGGER_COND - When set, one or more triggers has an associated filter
+ * PID_FILTER - When set, the event is filtered based on pid
+ * WAS_ENABLED - Set when enabled to know to clear trace on module removal
++ * FREED - File descriptor is freed, all fields should be considered invalid
+ */
+ enum {
+ EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
+@@ -643,6 +645,7 @@ enum {
+ EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
+ EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
+ EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
++ EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
+ };
+
+ struct trace_event_file {
+@@ -671,6 +674,7 @@ struct trace_event_file {
+ * caching and such. Which is mostly OK ;-)
+ */
+ unsigned long flags;
++ atomic_t ref; /* ref count for opened files */
+ atomic_t sm_ref; /* soft-mode reference counter */
+ atomic_t tm_ref; /* trigger-mode reference counter */
+ };
+diff --git a/include/linux/udp.h b/include/linux/udp.h
+index 43c1fb2d2c21a..d04188714dca1 100644
+--- a/include/linux/udp.h
++++ b/include/linux/udp.h
+@@ -32,25 +32,30 @@ static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
+ return (num + net_hash_mix(net)) & mask;
+ }
+
++enum {
++ UDP_FLAGS_CORK, /* Cork is required */
++ UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
++ UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
++ UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */
++ UDP_FLAGS_ACCEPT_FRAGLIST,
++ UDP_FLAGS_ACCEPT_L4,
++ UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
++ UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
++ UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
++};
++
+ struct udp_sock {
+ /* inet_sock has to be the first member */
+ struct inet_sock inet;
+ #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
+ #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
+ #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
++
++ unsigned long udp_flags;
++
+ int pending; /* Any pending frames ? */
+- unsigned int corkflag; /* Cork is required */
+ __u8 encap_type; /* Is this an Encapsulation socket? */
+- unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
+- no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
+- encap_enabled:1, /* This socket enabled encap
+- * processing; UDP tunnels and
+- * different encapsulation layer set
+- * this
+- */
+- gro_enabled:1, /* Request GRO aggregation */
+- accept_udp_l4:1,
+- accept_udp_fraglist:1;
++
+ /*
+ * Following member retains the information to create a UDP header
+ * when the socket is uncorked.
+@@ -62,12 +67,6 @@ struct udp_sock {
+ */
+ __u16 pcslen;
+ __u16 pcrlen;
+-/* indicator bits used by pcflag: */
+-#define UDPLITE_BIT 0x1 /* set by udplite proto init function */
+-#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */
+-#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */
+- __u8 pcflag; /* marks socket as UDP-Lite if > 0 */
+- __u8 unused[3];
+ /*
+ * For encapsulation sockets.
+ */
+@@ -95,28 +94,39 @@ struct udp_sock {
+ int forward_threshold;
+ };
+
++#define udp_test_bit(nr, sk) \
++ test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_set_bit(nr, sk) \
++ set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_test_and_set_bit(nr, sk) \
++ test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_clear_bit(nr, sk) \
++ clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_assign_bit(nr, sk, val) \
++ assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
++
+ #define UDP_MAX_SEGMENTS (1 << 6UL)
+
+ #define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
+
+ static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
+ {
+- udp_sk(sk)->no_check6_tx = val;
++ udp_assign_bit(NO_CHECK6_TX, sk, val);
+ }
+
+ static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
+ {
+- udp_sk(sk)->no_check6_rx = val;
++ udp_assign_bit(NO_CHECK6_RX, sk, val);
+ }
+
+-static inline bool udp_get_no_check6_tx(struct sock *sk)
++static inline bool udp_get_no_check6_tx(const struct sock *sk)
+ {
+- return udp_sk(sk)->no_check6_tx;
++ return udp_test_bit(NO_CHECK6_TX, sk);
+ }
+
+-static inline bool udp_get_no_check6_rx(struct sock *sk)
++static inline bool udp_get_no_check6_rx(const struct sock *sk)
+ {
+- return udp_sk(sk)->no_check6_rx;
++ return udp_test_bit(NO_CHECK6_RX, sk);
+ }
+
+ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+@@ -135,10 +145,12 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+ if (!skb_is_gso(skb))
+ return false;
+
+- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++ !udp_test_bit(ACCEPT_L4, sk))
+ return true;
+
+- if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
++ !udp_test_bit(ACCEPT_FRAGLIST, sk))
+ return true;
+
+ return false;
+@@ -146,8 +158,8 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+
+ static inline void udp_allow_gso(struct sock *sk)
+ {
+- udp_sk(sk)->accept_udp_l4 = 1;
+- udp_sk(sk)->accept_udp_fraglist = 1;
++ udp_set_bit(ACCEPT_L4, sk);
++ udp_set_bit(ACCEPT_FRAGLIST, sk);
+ }
+
+ #define udp_portaddr_for_each_entry(__sk, list) \
+diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
+index b513749582d77..e4de6bc1f69b6 100644
+--- a/include/linux/usb/phy.h
++++ b/include/linux/usb/phy.h
+@@ -144,10 +144,6 @@ struct usb_phy {
+ */
+ int (*set_wakeup)(struct usb_phy *x, bool enabled);
+
+- /* notify phy port status change */
+- int (*notify_port_status)(struct usb_phy *x, int port,
+- u16 portstatus, u16 portchange);
+-
+ /* notify phy connect status change */
+ int (*notify_connect)(struct usb_phy *x,
+ enum usb_device_speed speed);
+@@ -320,15 +316,6 @@ usb_phy_set_wakeup(struct usb_phy *x, bool enabled)
+ return 0;
+ }
+
+-static inline int
+-usb_phy_notify_port_status(struct usb_phy *x, int port, u16 portstatus, u16 portchange)
+-{
+- if (x && x->notify_port_status)
+- return x->notify_port_status(x, port, portstatus, portchange);
+- else
+- return 0;
+-}
+-
+ static inline int
+ usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
+ {
+diff --git a/include/linux/verification.h b/include/linux/verification.h
+index f34e50ebcf60a..cb2d47f280910 100644
+--- a/include/linux/verification.h
++++ b/include/linux/verification.h
+@@ -8,6 +8,7 @@
+ #ifndef _LINUX_VERIFICATION_H
+ #define _LINUX_VERIFICATION_H
+
++#include <linux/errno.h>
+ #include <linux/types.h>
+
+ /*
+diff --git a/include/linux/vfio.h b/include/linux/vfio.h
+index 454e9295970c4..a65b2513f8cdc 100644
+--- a/include/linux/vfio.h
++++ b/include/linux/vfio.h
+@@ -289,16 +289,12 @@ void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ /*
+ * External user API
+ */
+-#if IS_ENABLED(CONFIG_VFIO_GROUP)
+ struct iommu_group *vfio_file_iommu_group(struct file *file);
++
++#if IS_ENABLED(CONFIG_VFIO_GROUP)
+ bool vfio_file_is_group(struct file *file);
+ bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
+ #else
+-static inline struct iommu_group *vfio_file_iommu_group(struct file *file)
+-{
+- return NULL;
+-}
+-
+ static inline bool vfio_file_is_group(struct file *file)
+ {
+ return false;
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 1c1d06804d450..24b1e5070f4d4 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -274,18 +274,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
+ * to generate better code.
+ */
+ #ifdef CONFIG_LOCKDEP
+-#define __INIT_WORK(_work, _func, _onstack) \
++#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
+ do { \
+- static struct lock_class_key __key; \
+- \
+ __init_work((_work), _onstack); \
+ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
+- lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
++ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->func = (_func); \
+ } while (0)
+ #else
+-#define __INIT_WORK(_work, _func, _onstack) \
++#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
+ do { \
+ __init_work((_work), _onstack); \
+ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
+@@ -294,12 +292,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
+ } while (0)
+ #endif
+
++#define __INIT_WORK(_work, _func, _onstack) \
++ do { \
++ static __maybe_unused struct lock_class_key __key; \
++ \
++ __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
++ } while (0)
++
+ #define INIT_WORK(_work, _func) \
+ __INIT_WORK((_work), (_func), 0)
+
+ #define INIT_WORK_ONSTACK(_work, _func) \
+ __INIT_WORK((_work), (_func), 1)
+
++#define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
++ __INIT_WORK_KEY((_work), (_func), 1, _key)
++
+ #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
+ do { \
+ INIT_WORK(&(_work)->work, (_func)); \
+@@ -693,8 +701,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
+ return fn(arg);
+ }
+ #else
+-long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
+-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
++long work_on_cpu_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key);
++/*
++ * A new key is defined for each caller to make sure the work
++ * associated with the function doesn't share its locking class.
++ */
++#define work_on_cpu(_cpu, _fn, _arg) \
++({ \
++ static struct lock_class_key __key; \
++ \
++ work_on_cpu_key(_cpu, _fn, _arg, &__key); \
++})
++
++long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key);
++
++/*
++ * A new key is defined for each caller to make sure the work
++ * associated with the function doesn't share its locking class.
++ */
++#define work_on_cpu_safe(_cpu, _fn, _arg) \
++({ \
++ static struct lock_class_key __key; \
++ \
++ work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
++})
+ #endif /* CONFIG_SMP */
+
+ #ifdef CONFIG_FREEZER
+diff --git a/include/media/ipu-bridge.h b/include/media/ipu-bridge.h
+index bdc654a455216..783bda6d5cc3f 100644
+--- a/include/media/ipu-bridge.h
++++ b/include/media/ipu-bridge.h
+@@ -108,7 +108,7 @@ struct ipu_node_names {
+ char ivsc_sensor_port[7];
+ char ivsc_ipu_port[7];
+ char endpoint[11];
+- char remote_port[7];
++ char remote_port[9];
+ char vcm[16];
+ };
+
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index 824c258143a3a..49c4640027d8a 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -75,6 +75,7 @@ struct unix_sock {
+ };
+
+ #define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk)
++#define unix_peer(sk) (unix_sk(sk)->peer)
+
+ #define peer_wait peer_wq.wait
+
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 87d92accc26ea..bdee5d649cc61 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -1,6 +1,7 @@
+ /*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
++ Copyright 2023 NXP
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+@@ -673,6 +674,8 @@ enum {
+ #define HCI_TX_POWER_INVALID 127
+ #define HCI_RSSI_INVALID 127
+
++#define HCI_SYNC_HANDLE_INVALID 0xffff
++
+ #define HCI_ROLE_MASTER 0x00
+ #define HCI_ROLE_SLAVE 0x01
+
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index c33348ba1657e..7fa95b72e5c85 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -350,6 +350,8 @@ struct hci_dev {
+ struct list_head list;
+ struct mutex lock;
+
++ struct ida unset_handle_ida;
++
+ const char *name;
+ unsigned long flags;
+ __u16 id;
+@@ -1314,7 +1316,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *
+ }
+
+ static inline struct hci_conn *
+-hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big)
++hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
+ {
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+@@ -1336,6 +1338,29 @@ hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big)
+ return NULL;
+ }
+
++static inline struct hci_conn *
++hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
++{
++ struct hci_conn_hash *h = &hdev->conn_hash;
++ struct hci_conn *c;
++
++ rcu_read_lock();
++
++ list_for_each_entry_rcu(c, &h->list, list) {
++ if (c->type != ISO_LINK ||
++ !test_bit(HCI_CONN_PA_SYNC, &c->flags))
++ continue;
++
++ if (c->sync_handle == sync_handle) {
++ rcu_read_unlock();
++ return c;
++ }
++ }
++ rcu_read_unlock();
++
++ return NULL;
++}
++
+ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
+ __u8 type, __u16 state)
+ {
+@@ -1426,7 +1451,9 @@ int hci_le_create_cis_pending(struct hci_dev *hdev);
+ int hci_conn_check_create_cis(struct hci_conn *conn);
+
+ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+- u8 role);
++ u8 role, u16 handle);
++struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
++ bdaddr_t *dst, u8 role);
+ void hci_conn_del(struct hci_conn *conn);
+ void hci_conn_hash_flush(struct hci_dev *hdev);
+ void hci_conn_check_pending(struct hci_dev *hdev);
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 7192346e4a22d..153a8c3e7213d 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -5826,6 +5826,16 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
+ */
+ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
+
++/**
++ * wiphy_work_flush - flush previously queued work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to flush, this can be %NULL to flush all work
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
++
+ struct wiphy_delayed_work {
+ struct wiphy_work work;
+ struct wiphy *wiphy;
+@@ -5869,6 +5879,17 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
+ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
++/**
++ * wiphy_delayed_work_flush - flush previously queued delayed work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to flush
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_delayed_work_flush(struct wiphy *wiphy,
++ struct wiphy_delayed_work *dwork);
++
+ /**
+ * struct wireless_dev - wireless device state
+ *
+diff --git a/include/net/flow.h b/include/net/flow.h
+index 7f0adda3bf2fe..335bbc52171c1 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -40,8 +40,8 @@ struct flowi_common {
+ #define FLOWI_FLAG_KNOWN_NH 0x02
+ __u32 flowic_secid;
+ kuid_t flowic_uid;
+- struct flowi_tunnel flowic_tun_key;
+ __u32 flowic_multipath_hash;
++ struct flowi_tunnel flowic_tun_key;
+ };
+
+ union flowi_uli {
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 07022bb0d44d4..0d28172193fa6 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -162,7 +162,7 @@ struct neighbour {
+ struct rcu_head rcu;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+- u8 primary_key[0];
++ u8 primary_key[];
+ } __randomize_layout;
+
+ struct neigh_ops {
+diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h
+index 078d3c52c03f9..e5f2f0b73a9a0 100644
+--- a/include/net/netfilter/nf_conntrack_act_ct.h
++++ b/include/net/netfilter/nf_conntrack_act_ct.h
+@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
+ #endif
+ }
+
+-static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
++static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
++ enum ip_conntrack_info ctinfo)
++{
++#if IS_ENABLED(CONFIG_NET_ACT_CT)
++ struct nf_conn_act_ct_ext *act_ct_ext;
++
++ act_ct_ext = nf_conn_act_ct_ext_find(ct);
++ if (dev_net(skb->dev) == &init_net && act_ct_ext)
++ act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
++#endif
++}
++
++static inline struct
++nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
++ struct nf_conn *ct,
++ enum ip_conntrack_info ctinfo)
+ {
+ #if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
+ return act_ct;
+
+ act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
++ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
+ return act_ct;
+ #else
+ return NULL;
+ #endif
+ }
+
+-static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+- enum ip_conntrack_info ctinfo)
+-{
+-#if IS_ENABLED(CONFIG_NET_ACT_CT)
+- struct nf_conn_act_ct_ext *act_ct_ext;
+-
+- act_ct_ext = nf_conn_act_ct_ext_find(ct);
+- if (dev_net(skb->dev) == &init_net && act_ct_ext)
+- act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+-#endif
+-}
+-
+ #endif /* _NF_CONNTRACK_ACT_CT_H */
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 7c816359d5a98..75972e211ba12 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -178,9 +178,9 @@ static inline __be32 nft_reg_load_be32(const u32 *sreg)
+ return *(__force __be32 *)sreg;
+ }
+
+-static inline void nft_reg_store64(u32 *dreg, u64 val)
++static inline void nft_reg_store64(u64 *dreg, u64 val)
+ {
+- put_unaligned(val, (u64 *)dreg);
++ put_unaligned(val, dreg);
+ }
+
+ static inline u64 nft_reg_load64(const u32 *sreg)
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 92f7ea62a9159..7753354d59c0b 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2006,21 +2006,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+ /* sk_tx_queue_mapping accept only upto a 16-bit value */
+ if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
+ return;
+- sk->sk_tx_queue_mapping = tx_queue;
++ /* Paired with READ_ONCE() in sk_tx_queue_get() and
++ * other WRITE_ONCE() because socket lock might be not held.
++ */
++ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
+ }
+
+ #define NO_QUEUE_MAPPING USHRT_MAX
+
+ static inline void sk_tx_queue_clear(struct sock *sk)
+ {
+- sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
++ /* Paired with READ_ONCE() in sk_tx_queue_get() and
++ * other WRITE_ONCE() because socket lock might be not held.
++ */
++ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
+ }
+
+ static inline int sk_tx_queue_get(const struct sock *sk)
+ {
+- if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
+- return sk->sk_tx_queue_mapping;
++ if (sk) {
++ /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
++ * and sk_tx_queue_set().
++ */
++ int val = READ_ONCE(sk->sk_tx_queue_mapping);
+
++ if (val != NO_QUEUE_MAPPING)
++ return val;
++ }
+ return -1;
+ }
+
+@@ -2169,7 +2181,7 @@ static inline void __dst_negative_advice(struct sock *sk)
+ if (ndst != dst) {
+ rcu_assign_pointer(sk->sk_dst_cache, ndst);
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ }
+ }
+ }
+@@ -2186,7 +2198,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ old_dst = rcu_dereference_protected(sk->sk_dst_cache,
+ lockdep_sock_is_held(sk));
+ rcu_assign_pointer(sk->sk_dst_cache, dst);
+@@ -2199,7 +2211,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ dst_release(old_dst);
+ }
+diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
+index b24ea2d9400ba..1dc2f827d0bcf 100644
+--- a/include/net/tc_act/tc_ct.h
++++ b/include/net/tc_act/tc_ct.h
+@@ -57,6 +57,11 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+ return to_ct_params(a)->nf_ft;
+ }
+
++static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
++{
++ return to_ct_params(a)->helper;
++}
++
+ #else
+ static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
+ static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
+@@ -64,6 +69,10 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+ {
+ return NULL;
+ }
++static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
++{
++ return NULL;
++}
+ #endif /* CONFIG_NF_CONNTRACK */
+
+ #if IS_ENABLED(CONFIG_NET_ACT_CT)
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 4b03ca7cb8a5e..0239e815edf71 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -801,7 +801,7 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+ }
+
+ /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+-static inline u32 tcp_ns_to_ts(u64 ns)
++static inline u64 tcp_ns_to_ts(u64 ns)
+ {
+ return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+ }
+diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
+index 0ca9b7a11baf5..29251c3519cf0 100644
+--- a/include/net/udp_tunnel.h
++++ b/include/net/udp_tunnel.h
+@@ -174,16 +174,13 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
+ }
+ #endif
+
+-static inline void udp_tunnel_encap_enable(struct socket *sock)
++static inline void udp_tunnel_encap_enable(struct sock *sk)
+ {
+- struct udp_sock *up = udp_sk(sock->sk);
+-
+- if (up->encap_enabled)
++ if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
+ return;
+
+- up->encap_enabled = 1;
+ #if IS_ENABLED(CONFIG_IPV6)
+- if (sock->sk->sk_family == PF_INET6)
++ if (READ_ONCE(sk->sk_family) == PF_INET6)
+ ipv6_stub->udpv6_encap_enable();
+ #endif
+ udp_encap_enable();
+diff --git a/include/net/udplite.h b/include/net/udplite.h
+index bd33ff2b8f426..786919d29f8de 100644
+--- a/include/net/udplite.h
++++ b/include/net/udplite.h
+@@ -66,14 +66,18 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
+ /* Fast-path computation of checksum. Socket may not be locked. */
+ static inline __wsum udplite_csum(struct sk_buff *skb)
+ {
+- const struct udp_sock *up = udp_sk(skb->sk);
+ const int off = skb_transport_offset(skb);
++ const struct sock *sk = skb->sk;
+ int len = skb->len - off;
+
+- if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
+- if (0 < up->pcslen)
+- len = up->pcslen;
+- udp_hdr(skb)->len = htons(up->pcslen);
++ if (udp_test_bit(UDPLITE_SEND_CC, sk)) {
++ u16 pcslen = READ_ONCE(udp_sk(sk)->pcslen);
++
++ if (pcslen < len) {
++ if (pcslen > 0)
++ len = pcslen;
++ udp_hdr(skb)->len = htons(pcslen);
++ }
+ }
+ skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
+
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 65e49fae8da7a..8fa1153f37cbf 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -167,19 +167,25 @@ struct scsi_device {
+ * power state for system suspend/resume (suspend to RAM and
+ * hibernation) operations.
+ */
+- bool manage_system_start_stop;
++ unsigned manage_system_start_stop:1;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for runtime device suspand and resume operations.
+ */
+- bool manage_runtime_start_stop;
++ unsigned manage_runtime_start_stop:1;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system shutdown (power off) operations.
+ */
+- bool manage_shutdown;
++ unsigned manage_shutdown:1;
++
++ /*
++ * If set and if the device is runtime suspended, ask the high-level
++ * device driver (sd) to force a runtime resume of the device.
++ */
++ unsigned force_runtime_start_on_system_start:1;
+
+ unsigned removable:1;
+ unsigned changed:1; /* Data invalid due to media change */
+diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
+index 5842e38bb2880..f5e4ac5b8cce8 100644
+--- a/include/soc/tegra/bpmp.h
++++ b/include/soc/tegra/bpmp.h
+@@ -102,8 +102,12 @@ struct tegra_bpmp {
+ #ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_mirror;
+ #endif
++
++ bool suspended;
+ };
+
++#define TEGRA_BPMP_MESSAGE_RESET BIT(0)
++
+ struct tegra_bpmp_message {
+ unsigned int mrq;
+
+@@ -117,6 +121,8 @@ struct tegra_bpmp_message {
+ size_t size;
+ int ret;
+ } rx;
++
++ unsigned long flags;
+ };
+
+ #if IS_ENABLED(CONFIG_TEGRA_BPMP)
+diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
+index 1bf757901d024..2fe8c6b0d4cf3 100644
+--- a/include/sound/cs35l41.h
++++ b/include/sound/cs35l41.h
+@@ -11,7 +11,6 @@
+ #define __CS35L41_H
+
+ #include <linux/regmap.h>
+-#include <linux/completion.h>
+ #include <linux/firmware/cirrus/cs_dsp.h>
+
+ #define CS35L41_FIRSTREG 0x00000000
+@@ -902,7 +901,8 @@ int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap);
+ int cs35l41_init_boost(struct device *dev, struct regmap *regmap,
+ struct cs35l41_hw_cfg *hw_cfg);
+ bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type);
++int cs35l41_mdsync_up(struct regmap *regmap);
+ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
+- int enable, struct completion *pll_lock, bool firmware_running);
++ int enable, bool firmware_running);
+
+ #endif /* __CS35L41_H */
+diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
+index 6d31d535e8f6d..23d6d6bfb0736 100644
+--- a/include/sound/soc-acpi.h
++++ b/include/sound/soc-acpi.h
+@@ -68,6 +68,10 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
+ * @i2s_link_mask: I2S/TDM links enabled on the board
+ * @num_dai_drivers: number of elements in @dai_drivers
+ * @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
++ * @subsystem_vendor: optional PCI SSID vendor value
++ * @subsystem_device: optional PCI SSID device value
++ * @subsystem_id_set: true if a value has been written to
++ * subsystem_vendor and subsystem_device.
+ */
+ struct snd_soc_acpi_mach_params {
+ u32 acpi_ipc_irq_index;
+@@ -80,6 +84,9 @@ struct snd_soc_acpi_mach_params {
+ u32 i2s_link_mask;
+ u32 num_dai_drivers;
+ struct snd_soc_dai_driver *dai_drivers;
++ unsigned short subsystem_vendor;
++ unsigned short subsystem_device;
++ bool subsystem_id_set;
+ };
+
+ /**
+diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
+index fc94dfb0021fd..e8ff2e089cd00 100644
+--- a/include/sound/soc-card.h
++++ b/include/sound/soc-card.h
+@@ -59,6 +59,43 @@ int snd_soc_card_add_dai_link(struct snd_soc_card *card,
+ void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link);
+
++#ifdef CONFIG_PCI
++static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
++ unsigned short vendor,
++ unsigned short device)
++{
++ card->pci_subsystem_vendor = vendor;
++ card->pci_subsystem_device = device;
++ card->pci_subsystem_set = true;
++}
++
++static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
++ unsigned short *vendor,
++ unsigned short *device)
++{
++ if (!card->pci_subsystem_set)
++ return -ENOENT;
++
++ *vendor = card->pci_subsystem_vendor;
++ *device = card->pci_subsystem_device;
++
++ return 0;
++}
++#else /* !CONFIG_PCI */
++static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
++ unsigned short vendor,
++ unsigned short device)
++{
++}
++
++static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
++ unsigned short *vendor,
++ unsigned short *device)
++{
++ return -ENOENT;
++}
++#endif /* CONFIG_PCI */
++
+ /* device driver data */
+ static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
+ void *data)
+diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
+index 5fcfba47d98cc..adcd8719d3435 100644
+--- a/include/sound/soc-dai.h
++++ b/include/sound/soc-dai.h
+@@ -370,6 +370,7 @@ struct snd_soc_dai_ops {
+
+ /* bit field */
+ unsigned int no_capture_mute:1;
++ unsigned int mute_unmute_on_trigger:1;
+ };
+
+ struct snd_soc_cdai_ops {
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 37f9d3fe302a6..49ec688eed606 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -932,6 +932,17 @@ struct snd_soc_card {
+ #ifdef CONFIG_DMI
+ char dmi_longname[80];
+ #endif /* CONFIG_DMI */
++
++#ifdef CONFIG_PCI
++ /*
++ * PCI does not define 0 as invalid, so pci_subsystem_set indicates
++ * whether a value has been written to these fields.
++ */
++ unsigned short pci_subsystem_vendor;
++ unsigned short pci_subsystem_device;
++ bool pci_subsystem_set;
++#endif /* CONFIG_PCI */
++
+ char topology_shortname[32];
+
+ struct device *dev;
+diff --git a/include/sound/sof.h b/include/sound/sof.h
+index d3c41f87ac319..51294f2ba302c 100644
+--- a/include/sound/sof.h
++++ b/include/sound/sof.h
+@@ -64,6 +64,14 @@ struct snd_sof_pdata {
+ const char *name;
+ const char *platform;
+
++ /*
++ * PCI SSID. As PCI does not define 0 as invalid, the subsystem_id_set
++ * flag indicates that a value has been written to these members.
++ */
++ unsigned short subsystem_vendor;
++ unsigned short subsystem_device;
++ bool subsystem_id_set;
++
+ struct device *dev;
+
+ /*
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index 4c53a5ef6257b..f7e537f64db45 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -328,7 +328,7 @@
+ E_(rxrpc_rtt_tx_ping, "PING")
+
+ #define rxrpc_rtt_rx_traces \
+- EM(rxrpc_rtt_rx_cancel, "CNCL") \
++ EM(rxrpc_rtt_rx_other_ack, "OACK") \
+ EM(rxrpc_rtt_rx_obsolete, "OBSL") \
+ EM(rxrpc_rtt_rx_lost, "LOST") \
+ EM(rxrpc_rtt_rx_ping_response, "PONG") \
+diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
+index 6c80f96049bd0..282e90aeb163c 100644
+--- a/include/uapi/linux/fcntl.h
++++ b/include/uapi/linux/fcntl.h
+@@ -116,5 +116,8 @@
+ #define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
+ compare object identity and may not
+ be usable to open_by_handle_at(2) */
++#if defined(__KERNEL__)
++#define AT_GETATTR_NOSEC 0x80000000
++#endif
+
+ #endif /* _UAPI_LINUX_FCNTL_H */
+diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
+index 3c36aeade991e..370ed14b1ae09 100644
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -283,7 +283,8 @@ struct prctl_mm_map {
+
+ /* Memory deny write / execute */
+ #define PR_SET_MDWE 65
+-# define PR_MDWE_REFUSE_EXEC_GAIN 1
++# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
++# define PR_MDWE_NO_INHERIT (1UL << 1)
+
+ #define PR_GET_MDWE 66
+
+diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
+index 5c6c4269f7efe..2ec6f35cda32e 100644
+--- a/include/uapi/linux/stddef.h
++++ b/include/uapi/linux/stddef.h
+@@ -27,7 +27,7 @@
+ union { \
+ struct { MEMBERS } ATTRS; \
+ struct TAG { MEMBERS } ATTRS NAME; \
+- }
++ } ATTRS
+
+ #ifdef __cplusplus
+ /* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
+diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
+index 4a195b68f28f6..b383c2fe0cf35 100644
+--- a/include/uapi/linux/v4l2-subdev.h
++++ b/include/uapi/linux/v4l2-subdev.h
+@@ -239,7 +239,7 @@ struct v4l2_subdev_routing {
+ * set (which is the default), the 'stream' fields will be forced to 0 by the
+ * kernel.
+ */
+- #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1U << 0)
++ #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
+
+ /**
+ * struct v4l2_subdev_client_capability - Capabilities of the client accessing
+diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
+index c60ca33eac594..ed07181d4eff9 100644
+--- a/include/uapi/linux/vm_sockets.h
++++ b/include/uapi/linux/vm_sockets.h
+@@ -191,4 +191,21 @@ struct sockaddr_vm {
+
+ #define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
+
++/* MSG_ZEROCOPY notifications are encoded in the standard error format,
++ * sock_extended_err. See Documentation/networking/msg_zerocopy.rst in
++ * kernel source tree for more details.
++ */
++
++/* 'cmsg_level' field value of 'struct cmsghdr' for notification parsing
++ * when MSG_ZEROCOPY flag is used on transmissions.
++ */
++
++#define SOL_VSOCK 287
++
++/* 'cmsg_type' field value of 'struct cmsghdr' for notification parsing
++ * when MSG_ZEROCOPY flag is used on transmissions.
++ */
++
++#define VSOCK_RECVERR 1
++
+ #endif /* _UAPI_VM_SOCKETS_H */
+diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
+index 375718ba4ab62..e145bca5105c5 100644
+--- a/include/uapi/xen/privcmd.h
++++ b/include/uapi/xen/privcmd.h
+@@ -102,7 +102,7 @@ struct privcmd_mmap_resource {
+ #define PRIVCMD_IRQFD_FLAG_DEASSIGN (1 << 0)
+
+ struct privcmd_irqfd {
+- void __user *dm_op;
++ __u64 dm_op;
+ __u32 size; /* Size of structure pointed by dm_op */
+ __u32 fd;
+ __u32 flags;
+@@ -138,6 +138,6 @@ struct privcmd_irqfd {
+ #define IOCTL_PRIVCMD_MMAP_RESOURCE \
+ _IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource))
+ #define IOCTL_PRIVCMD_IRQFD \
+- _IOC(_IOC_NONE, 'P', 8, sizeof(struct privcmd_irqfd))
++ _IOW('P', 8, struct privcmd_irqfd)
+
+ #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
+diff --git a/include/video/sticore.h b/include/video/sticore.h
+index 945ad60463a18..012b5b46ad7d0 100644
+--- a/include/video/sticore.h
++++ b/include/video/sticore.h
+@@ -232,7 +232,7 @@ struct sti_rom_font {
+ u8 height;
+ u8 font_type; /* language type */
+ u8 bytes_per_char;
+- u32 next_font;
++ s32 next_font; /* note: signed int */
+ u8 underline_height;
+ u8 underline_pos;
+ u8 res008[2];
+diff --git a/init/Makefile b/init/Makefile
+index ec557ada3c12e..cbac576c57d63 100644
+--- a/init/Makefile
++++ b/init/Makefile
+@@ -60,4 +60,5 @@ include/generated/utsversion.h: FORCE
+ $(obj)/version-timestamp.o: include/generated/utsversion.h
+ CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
+ KASAN_SANITIZE_version-timestamp.o := n
++KCSAN_SANITIZE_version-timestamp.o := n
+ GCOV_PROFILE_version-timestamp.o := n
+diff --git a/init/main.c b/init/main.c
+index 436d73261810b..e24b0780fdff7 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -530,6 +530,10 @@ static int __init unknown_bootoption(char *param, char *val,
+ {
+ size_t len = strlen(param);
+
++ /* Handle params aliased to sysctls */
++ if (sysctl_is_alias(param))
++ return 0;
++
+ repair_env_string(param, val);
+
+ /* Handle obsolete-style parameters */
+diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
+index f04a43044d917..976e9500f6518 100644
+--- a/io_uring/fdinfo.c
++++ b/io_uring/fdinfo.c
+@@ -145,13 +145,8 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
+ if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
+ struct io_sq_data *sq = ctx->sq_data;
+
+- if (mutex_trylock(&sq->lock)) {
+- if (sq->thread) {
+- sq_pid = task_pid_nr(sq->thread);
+- sq_cpu = task_cpu(sq->thread);
+- }
+- mutex_unlock(&sq->lock);
+- }
++ sq_pid = sq->task_pid;
++ sq_cpu = sq->sq_cpu;
+ }
+
+ seq_printf(m, "SqThread:\t%d\n", sq_pid);
+diff --git a/io_uring/fs.c b/io_uring/fs.c
+index 08e3b175469c6..eccea851dd5a2 100644
+--- a/io_uring/fs.c
++++ b/io_uring/fs.c
+@@ -254,7 +254,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ lnk->flags = READ_ONCE(sqe->hardlink_flags);
+
+- lnk->oldpath = getname(oldf);
++ lnk->oldpath = getname_uflags(oldf, lnk->flags);
+ if (IS_ERR(lnk->oldpath))
+ return PTR_ERR(lnk->oldpath);
+
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 8d1bc6cdfe712..f09e3ee11229c 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -323,6 +323,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
+ INIT_LIST_HEAD(&ctx->sqd_list);
+ INIT_LIST_HEAD(&ctx->cq_overflow_list);
+ INIT_LIST_HEAD(&ctx->io_buffers_cache);
++ INIT_HLIST_HEAD(&ctx->io_buf_list);
+ io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
+ sizeof(struct io_rsrc_node));
+ io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
+@@ -2659,7 +2660,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
+ }
+
+-static void io_mem_free(void *ptr)
++void io_mem_free(void *ptr)
+ {
+ if (!ptr)
+ return;
+@@ -2690,6 +2691,7 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
+ {
+ struct page **page_array;
+ unsigned int nr_pages;
++ void *page_addr;
+ int ret, i;
+
+ *npages = 0;
+@@ -2711,27 +2713,29 @@ err:
+ io_pages_free(&page_array, ret > 0 ? ret : 0);
+ return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT);
+ }
+- /*
+- * Should be a single page. If the ring is small enough that we can
+- * use a normal page, that is fine. If we need multiple pages, then
+- * userspace should use a huge page. That's the only way to guarantee
+- * that we get contigious memory, outside of just being lucky or
+- * (currently) having low memory fragmentation.
+- */
+- if (page_array[0] != page_array[ret - 1])
+- goto err;
+
+- /*
+- * Can't support mapping user allocated ring memory on 32-bit archs
+- * where it could potentially reside in highmem. Just fail those with
+- * -EINVAL, just like we did on kernels that didn't support this
+- * feature.
+- */
++ page_addr = page_address(page_array[0]);
+ for (i = 0; i < nr_pages; i++) {
+- if (PageHighMem(page_array[i])) {
+- ret = -EINVAL;
++ ret = -EINVAL;
++
++ /*
++ * Can't support mapping user allocated ring memory on 32-bit
++ * archs where it could potentially reside in highmem. Just
++ * fail those with -EINVAL, just like we did on kernels that
++ * didn't support this feature.
++ */
++ if (PageHighMem(page_array[i]))
+ goto err;
+- }
++
++ /*
++ * No support for discontig pages for now, should either be a
++ * single normal page, or a huge page. Later on we can add
++ * support for remapping discontig pages, for now we will
++ * just fail them with EINVAL.
++ */
++ if (page_address(page_array[i]) != page_addr)
++ goto err;
++ page_addr += PAGE_SIZE;
+ }
+
+ *pages = page_array;
+@@ -2768,7 +2772,7 @@ static void io_rings_free(struct io_ring_ctx *ctx)
+ }
+ }
+
+-static void *io_mem_alloc(size_t size)
++void *io_mem_alloc(size_t size)
+ {
+ gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
+ void *ret;
+@@ -2939,6 +2943,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
+ ctx->mm_account = NULL;
+ }
+ io_rings_free(ctx);
++ io_kbuf_mmap_list_free(ctx);
+
+ percpu_ref_exit(&ctx->refs);
+ free_uid(ctx->user);
+@@ -3433,25 +3438,27 @@ static void *io_uring_validate_mmap_request(struct file *file,
+ struct page *page;
+ void *ptr;
+
+- /* Don't allow mmap if the ring was setup without it */
+- if (ctx->flags & IORING_SETUP_NO_MMAP)
+- return ERR_PTR(-EINVAL);
+-
+ switch (offset & IORING_OFF_MMAP_MASK) {
+ case IORING_OFF_SQ_RING:
+ case IORING_OFF_CQ_RING:
++ /* Don't allow mmap if the ring was setup without it */
++ if (ctx->flags & IORING_SETUP_NO_MMAP)
++ return ERR_PTR(-EINVAL);
+ ptr = ctx->rings;
+ break;
+ case IORING_OFF_SQES:
++ /* Don't allow mmap if the ring was setup without it */
++ if (ctx->flags & IORING_SETUP_NO_MMAP)
++ return ERR_PTR(-EINVAL);
+ ptr = ctx->sq_sqes;
+ break;
+ case IORING_OFF_PBUF_RING: {
+ unsigned int bgid;
+
+ bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
+- mutex_lock(&ctx->uring_lock);
++ rcu_read_lock();
+ ptr = io_pbuf_get_address(ctx, bgid);
+- mutex_unlock(&ctx->uring_lock);
++ rcu_read_unlock();
+ if (!ptr)
+ return ERR_PTR(-EINVAL);
+ break;
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 0bc145614a6e6..d2bad1df347da 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -86,6 +86,9 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
+ bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+ bool cancel_all);
+
++void *io_mem_alloc(size_t size);
++void io_mem_free(void *ptr);
++
+ #if defined(CONFIG_PROVE_LOCKING)
+ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
+ {
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index 9123138aa9f48..012f622036049 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -19,28 +19,54 @@
+
+ #define BGID_ARRAY 64
+
++/* BIDs are addressed by a 16-bit field in a CQE */
++#define MAX_BIDS_PER_BGID (1 << 16)
++
+ struct io_provide_buf {
+ struct file *file;
+ __u64 addr;
+ __u32 len;
+ __u32 bgid;
+- __u16 nbufs;
++ __u32 nbufs;
+ __u16 bid;
+ };
+
++static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
++ struct io_buffer_list *bl,
++ unsigned int bgid)
++{
++ if (bl && bgid < BGID_ARRAY)
++ return &bl[bgid];
++
++ return xa_load(&ctx->io_bl_xa, bgid);
++}
++
++struct io_buf_free {
++ struct hlist_node list;
++ void *mem;
++ size_t size;
++ int inuse;
++};
++
+ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
+ unsigned int bgid)
+ {
+- if (ctx->io_bl && bgid < BGID_ARRAY)
+- return &ctx->io_bl[bgid];
++ lockdep_assert_held(&ctx->uring_lock);
+
+- return xa_load(&ctx->io_bl_xa, bgid);
++ return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
+ }
+
+ static int io_buffer_add_list(struct io_ring_ctx *ctx,
+ struct io_buffer_list *bl, unsigned int bgid)
+ {
++ /*
++ * Store buffer group ID and finally mark the list as visible.
++ * The normal lookup doesn't care about the visibility as we're
++ * always under the ->uring_lock, but the RCU lookup from mmap does.
++ */
+ bl->bgid = bgid;
++ smp_store_release(&bl->is_ready, 1);
++
+ if (bgid < BGID_ARRAY)
+ return 0;
+
+@@ -191,21 +217,40 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
+
+ static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
+ {
++ struct io_buffer_list *bl;
+ int i;
+
+- ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
+- GFP_KERNEL);
+- if (!ctx->io_bl)
++ bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
++ if (!bl)
+ return -ENOMEM;
+
+ for (i = 0; i < BGID_ARRAY; i++) {
+- INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
+- ctx->io_bl[i].bgid = i;
++ INIT_LIST_HEAD(&bl[i].buf_list);
++ bl[i].bgid = i;
+ }
+
++ smp_store_release(&ctx->io_bl, bl);
+ return 0;
+ }
+
++/*
++ * Mark the given mapped range as free for reuse
++ */
++static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
++{
++ struct io_buf_free *ibf;
++
++ hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
++ if (bl->buf_ring == ibf->mem) {
++ ibf->inuse = 0;
++ return;
++ }
++ }
++
++ /* can't happen... */
++ WARN_ON_ONCE(1);
++}
++
+ static int __io_remove_buffers(struct io_ring_ctx *ctx,
+ struct io_buffer_list *bl, unsigned nbufs)
+ {
+@@ -218,7 +263,11 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
+ if (bl->is_mapped) {
+ i = bl->buf_ring->tail - bl->head;
+ if (bl->is_mmap) {
+- folio_put(virt_to_folio(bl->buf_ring));
++ /*
++ * io_kbuf_list_free() will free the page(s) at
++ * ->release() time.
++ */
++ io_kbuf_mark_free(ctx, bl);
+ bl->buf_ring = NULL;
+ bl->is_mmap = 0;
+ } else if (bl->buf_nr_pages) {
+@@ -267,7 +316,7 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
+ xa_for_each(&ctx->io_bl_xa, index, bl) {
+ xa_erase(&ctx->io_bl_xa, bl->bgid);
+ __io_remove_buffers(ctx, bl, -1U);
+- kfree(bl);
++ kfree_rcu(bl, rcu);
+ }
+
+ while (!list_empty(&ctx->io_buffers_pages)) {
+@@ -289,7 +338,7 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ return -EINVAL;
+
+ tmp = READ_ONCE(sqe->fd);
+- if (!tmp || tmp > USHRT_MAX)
++ if (!tmp || tmp > MAX_BIDS_PER_BGID)
+ return -EINVAL;
+
+ memset(p, 0, sizeof(*p));
+@@ -332,7 +381,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ return -EINVAL;
+
+ tmp = READ_ONCE(sqe->fd);
+- if (!tmp || tmp > USHRT_MAX)
++ if (!tmp || tmp > MAX_BIDS_PER_BGID)
+ return -E2BIG;
+ p->nbufs = tmp;
+ p->addr = READ_ONCE(sqe->addr);
+@@ -352,7 +401,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ tmp = READ_ONCE(sqe->off);
+ if (tmp > USHRT_MAX)
+ return -E2BIG;
+- if (tmp + p->nbufs >= USHRT_MAX)
++ if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
+ return -EINVAL;
+ p->bid = tmp;
+ return 0;
+@@ -452,7 +501,16 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
+ INIT_LIST_HEAD(&bl->buf_list);
+ ret = io_buffer_add_list(ctx, bl, p->bgid);
+ if (ret) {
+- kfree(bl);
++ /*
++ * Doesn't need rcu free as it was never visible, but
++ * let's keep it consistent throughout. Also can't
++ * be a lower indexed array group, as adding one
++ * where lookup failed cannot happen.
++ */
++ if (p->bgid >= BGID_ARRAY)
++ kfree_rcu(bl, rcu);
++ else
++ WARN_ON_ONCE(1);
+ goto err;
+ }
+ }
+@@ -523,19 +581,63 @@ error_unpin:
+ return -EINVAL;
+ }
+
+-static int io_alloc_pbuf_ring(struct io_uring_buf_reg *reg,
++/*
++ * See if we have a suitable region that we can reuse, rather than allocate
++ * both a new io_buf_free and mem region again. We leave it on the list as
++ * even a reused entry will need freeing at ring release.
++ */
++static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx,
++ size_t ring_size)
++{
++ struct io_buf_free *ibf, *best = NULL;
++ size_t best_dist;
++
++ hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
++ size_t dist;
++
++ if (ibf->inuse || ibf->size < ring_size)
++ continue;
++ dist = ibf->size - ring_size;
++ if (!best || dist < best_dist) {
++ best = ibf;
++ if (!dist)
++ break;
++ best_dist = dist;
++ }
++ }
++
++ return best;
++}
++
++static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
++ struct io_uring_buf_reg *reg,
+ struct io_buffer_list *bl)
+ {
+- gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
++ struct io_buf_free *ibf;
+ size_t ring_size;
+ void *ptr;
+
+ ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
+- ptr = (void *) __get_free_pages(gfp, get_order(ring_size));
+- if (!ptr)
+- return -ENOMEM;
+
+- bl->buf_ring = ptr;
++ /* Reuse existing entry, if we can */
++ ibf = io_lookup_buf_free_entry(ctx, ring_size);
++ if (!ibf) {
++ ptr = io_mem_alloc(ring_size);
++ if (!ptr)
++ return -ENOMEM;
++
++ /* Allocate and store deferred free entry */
++ ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
++ if (!ibf) {
++ io_mem_free(ptr);
++ return -ENOMEM;
++ }
++ ibf->mem = ptr;
++ ibf->size = ring_size;
++ hlist_add_head(&ibf->list, &ctx->io_buf_list);
++ }
++ ibf->inuse = 1;
++ bl->buf_ring = ibf->mem;
+ bl->is_mapped = 1;
+ bl->is_mmap = 1;
+ return 0;
+@@ -547,6 +649,8 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ struct io_buffer_list *bl, *free_bl = NULL;
+ int ret;
+
++ lockdep_assert_held(&ctx->uring_lock);
++
+ if (copy_from_user(&reg, arg, sizeof(reg)))
+ return -EFAULT;
+
+@@ -591,7 +695,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ if (!(reg.flags & IOU_PBUF_RING_MMAP))
+ ret = io_pin_pbuf_ring(&reg, bl);
+ else
+- ret = io_alloc_pbuf_ring(&reg, bl);
++ ret = io_alloc_pbuf_ring(ctx, &reg, bl);
+
+ if (!ret) {
+ bl->nr_entries = reg.ring_entries;
+@@ -601,7 +705,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ return 0;
+ }
+
+- kfree(free_bl);
++ kfree_rcu(free_bl, rcu);
+ return ret;
+ }
+
+@@ -610,6 +714,8 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ struct io_uring_buf_reg reg;
+ struct io_buffer_list *bl;
+
++ lockdep_assert_held(&ctx->uring_lock);
++
+ if (copy_from_user(&reg, arg, sizeof(reg)))
+ return -EFAULT;
+ if (reg.resv[0] || reg.resv[1] || reg.resv[2])
+@@ -626,7 +732,7 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ __io_remove_buffers(ctx, bl, -1U);
+ if (bl->bgid >= BGID_ARRAY) {
+ xa_erase(&ctx->io_bl_xa, bl->bgid);
+- kfree(bl);
++ kfree_rcu(bl, rcu);
+ }
+ return 0;
+ }
+@@ -635,9 +741,33 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
+ {
+ struct io_buffer_list *bl;
+
+- bl = io_buffer_get_list(ctx, bgid);
++ bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
++
++ /*
++ * Ensure the list is fully setup. Only strictly needed for RCU lookup
++ * via mmap, and in that case only for the array indexed groups. For
++ * the xarray lookups, it's either visible and ready, or not at all.
++ */
++ if (!smp_load_acquire(&bl->is_ready))
++ return NULL;
+ if (!bl || !bl->is_mmap)
+ return NULL;
+
+ return bl->buf_ring;
+ }
++
++/*
++ * Called at or after ->release(), free the mmap'ed buffers that we used
++ * for memory mapped provided buffer rings.
++ */
++void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx)
++{
++ struct io_buf_free *ibf;
++ struct hlist_node *tmp;
++
++ hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) {
++ hlist_del(&ibf->list);
++ io_mem_free(ibf->mem);
++ kfree(ibf);
++ }
++}
+diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
+index d14345ef61fc8..3d0cb6b8c1ed2 100644
+--- a/io_uring/kbuf.h
++++ b/io_uring/kbuf.h
+@@ -15,6 +15,7 @@ struct io_buffer_list {
+ struct page **buf_pages;
+ struct io_uring_buf_ring *buf_ring;
+ };
++ struct rcu_head rcu;
+ };
+ __u16 bgid;
+
+@@ -28,6 +29,8 @@ struct io_buffer_list {
+ __u8 is_mapped;
+ /* ring mapped provided buffers, but mmap'ed by application */
+ __u8 is_mmap;
++ /* bl is visible from an RCU point of view for lookup */
++ __u8 is_ready;
+ };
+
+ struct io_buffer {
+@@ -51,6 +54,8 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
+ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
+ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
+
++void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
++
+ unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
+
+ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 7a8e298af81b3..75d494dad7e2c 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1461,16 +1461,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ int ret;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+
+- if (connect->in_progress) {
+- struct socket *socket;
+-
+- ret = -ENOTSOCK;
+- socket = sock_from_file(req->file);
+- if (socket)
+- ret = sock_error(socket->sk);
+- goto out;
+- }
+-
+ if (req_has_async_data(req)) {
+ io = req->async_data;
+ } else {
+@@ -1490,9 +1480,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ && force_nonblock) {
+ if (ret == -EINPROGRESS) {
+ connect->in_progress = true;
+- return -EAGAIN;
+- }
+- if (ret == -ECONNABORTED) {
++ } else if (ret == -ECONNABORTED) {
+ if (connect->seen_econnaborted)
+ goto out;
+ connect->seen_econnaborted = true;
+@@ -1506,6 +1494,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ memcpy(req->async_data, &__io, sizeof(__io));
+ return -EAGAIN;
+ }
++ if (connect->in_progress) {
++ /*
++ * At least bluetooth will return -EBADFD on a re-connect
++ * attempt, and it's (supposedly) also valid to get -EISCONN
++ * which means the previous result is good. For both of these,
++ * grab the sock_error() and use that for the completion.
++ */
++ if (ret == -EBADFD || ret == -EISCONN)
++ ret = sock_error(sock_from_file(req->file)->sk);
++ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ out:
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index d9c853d105878..dde501abd7196 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -1261,7 +1261,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
+ */
+ const struct bio_vec *bvec = imu->bvec;
+
+- if (offset <= bvec->bv_len) {
++ if (offset < bvec->bv_len) {
+ /*
+ * Note, huge pages buffers consists of one large
+ * bvec entry and should always go this way. The other
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index bd6c2c7959a5b..65b5dbe3c850e 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -214,6 +214,7 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd)
+ did_sig = get_signal(&ksig);
+ cond_resched();
+ mutex_lock(&sqd->lock);
++ sqd->sq_cpu = raw_smp_processor_id();
+ }
+ return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
+ }
+@@ -229,10 +230,15 @@ static int io_sq_thread(void *data)
+ snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
+ set_task_comm(current, buf);
+
+- if (sqd->sq_cpu != -1)
++ /* reset to our pid after we've set task_comm, for fdinfo */
++ sqd->task_pid = current->pid;
++
++ if (sqd->sq_cpu != -1) {
+ set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
+- else
++ } else {
+ set_cpus_allowed_ptr(current, cpu_online_mask);
++ sqd->sq_cpu = raw_smp_processor_id();
++ }
+
+ mutex_lock(&sqd->lock);
+ while (1) {
+@@ -261,6 +267,7 @@ static int io_sq_thread(void *data)
+ mutex_unlock(&sqd->lock);
+ cond_resched();
+ mutex_lock(&sqd->lock);
++ sqd->sq_cpu = raw_smp_processor_id();
+ }
+ continue;
+ }
+@@ -294,6 +301,7 @@ static int io_sq_thread(void *data)
+ mutex_unlock(&sqd->lock);
+ schedule();
+ mutex_lock(&sqd->lock);
++ sqd->sq_cpu = raw_smp_processor_id();
+ }
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ atomic_andnot(IORING_SQ_NEED_WAKEUP,
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index 65075f1e4ac8c..7a98cd176a127 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -527,11 +527,18 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
+ unsigned long ino;
+ dev_t dev;
+
+- exe_file = get_task_exe_file(tsk);
++ /* only do exe filtering if we are recording @current events/records */
++ if (tsk != current)
++ return 0;
++
++ if (!current->mm)
++ return 0;
++ exe_file = get_mm_exe_file(current->mm);
+ if (!exe_file)
+ return 0;
+ ino = file_inode(exe_file)->i_ino;
+ dev = file_inode(exe_file)->i_sb->s_dev;
+ fput(exe_file);
++
+ return audit_mark_compare(mark, ino, dev);
+ }
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 4e3ce0542e31f..64fcd81ad3da4 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -623,7 +623,11 @@ static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
+
+ if (val < ksym->start)
+ return -1;
+- if (val >= ksym->end)
++ /* Ensure that we detect return addresses as part of the program, when
++ * the final instruction is a call for a program part of the stack
++ * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
++ */
++ if (val > ksym->end)
+ return 1;
+
+ return 0;
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index a8c7e1c5abfac..fd8d4b0addfca 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -155,13 +155,15 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
+ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+
+ preempt_disable();
++ local_irq_save(flags);
+ if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
+ __this_cpu_dec(*(htab->map_locked[hash]));
++ local_irq_restore(flags);
+ preempt_enable();
+ return -EBUSY;
+ }
+
+- raw_spin_lock_irqsave(&b->raw_lock, flags);
++ raw_spin_lock(&b->raw_lock);
+ *pflags = flags;
+
+ return 0;
+@@ -172,8 +174,9 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
+ unsigned long flags)
+ {
+ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+- raw_spin_unlock_irqrestore(&b->raw_lock, flags);
++ raw_spin_unlock(&b->raw_lock);
+ __this_cpu_dec(*(htab->map_locked[hash]));
++ local_irq_restore(flags);
+ preempt_enable();
+ }
+
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 8bd3812fb8df4..607be04db75b9 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1176,13 +1176,6 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
+ ret = -EBUSY;
+ goto out;
+ }
+- if (!atomic64_read(&map->usercnt)) {
+- /* maps with timers must be either held by user space
+- * or pinned in bpffs.
+- */
+- ret = -EPERM;
+- goto out;
+- }
+ /* allocate hrtimer via map_kmalloc to use memcg accounting */
+ t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
+ if (!t) {
+@@ -1195,7 +1188,21 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
+ rcu_assign_pointer(t->callback_fn, NULL);
+ hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
+ t->timer.function = bpf_timer_cb;
+- timer->timer = t;
++ WRITE_ONCE(timer->timer, t);
++ /* Guarantee the order between timer->timer and map->usercnt. So
++ * when there are concurrent uref release and bpf timer init, either
++ * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
++ * timer or atomic64_read() below returns a zero usercnt.
++ */
++ smp_mb();
++ if (!atomic64_read(&map->usercnt)) {
++ /* maps with timers must be either held by user space
++ * or pinned in bpffs.
++ */
++ WRITE_ONCE(timer->timer, NULL);
++ kfree(t);
++ ret = -EPERM;
++ }
+ out:
+ __bpf_spin_unlock_irqrestore(&timer->lock);
+ return ret;
+@@ -1370,7 +1377,7 @@ void bpf_timer_cancel_and_free(void *val)
+ /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
+ * this timer, since it won't be initialized.
+ */
+- timer->timer = NULL;
++ WRITE_ONCE(timer->timer, NULL);
+ out:
+ __bpf_spin_unlock_irqrestore(&timer->lock);
+ if (!t)
+@@ -2197,7 +2204,12 @@ __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
+ __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
+ struct cgroup *ancestor)
+ {
+- return task_under_cgroup_hierarchy(task, ancestor);
++ long ret;
++
++ rcu_read_lock();
++ ret = task_under_cgroup_hierarchy(task, ancestor);
++ rcu_read_unlock();
++ return ret;
+ }
+ #endif /* CONFIG_CGROUPS */
+
+diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
+index d93ddac283d40..956f80ee6f5c5 100644
+--- a/kernel/bpf/memalloc.c
++++ b/kernel/bpf/memalloc.c
+@@ -958,6 +958,8 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
+ memcg = get_memcg(c);
+ old_memcg = set_active_memcg(memcg);
+ ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT);
++ if (ret)
++ *(struct bpf_mem_cache **)ret = c;
+ set_active_memcg(old_memcg);
+ mem_cgroup_put(memcg);
+ }
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index 53ff50cac61ea..e97aeda3a86b5 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
+ goto out;
+ }
+
+- /* clear all bits except SHARE_IPMODIFY */
+- tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
++ /* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
++ tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
+
+ if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
+ tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 873ade146f3de..824531d4c262a 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1515,7 +1515,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
+ if (state->in_async_callback_fn)
+ verbose(env, " async_cb");
+ verbose(env, "\n");
+- mark_verifier_state_clean(env);
++ if (!print_all)
++ mark_verifier_state_clean(env);
+ }
+
+ static inline u32 vlog_alignment(u32 pos)
+@@ -3200,12 +3201,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
+
+ /* Backtrack one insn at a time. If idx is not at the top of recorded
+ * history then previous instruction came from straight line execution.
++ * Return -ENOENT if we exhausted all instructions within given state.
++ *
++ * It's legal to have a bit of a looping with the same starting and ending
++ * insn index within the same state, e.g.: 3->4->5->3, so just because current
++ * instruction index is the same as state's first_idx doesn't mean we are
++ * done. If there is still some jump history left, we should keep going. We
++ * need to take into account that we might have a jump history between given
++ * state's parent and itself, due to checkpointing. In this case, we'll have
++ * history entry recording a jump from last instruction of parent state and
++ * first instruction of given state.
+ */
+ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
+ u32 *history)
+ {
+ u32 cnt = *history;
+
++ if (i == st->first_insn_idx) {
++ if (cnt == 0)
++ return -ENOENT;
++ if (cnt == 1 && st->jmp_history[0].idx == i)
++ return -ENOENT;
++ }
++
+ if (cnt && st->jmp_history[cnt - 1].idx == i) {
+ i = st->jmp_history[cnt - 1].prev_idx;
+ (*history)--;
+@@ -3426,7 +3444,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ if (class == BPF_ALU || class == BPF_ALU64) {
+ if (!bt_is_reg_set(bt, dreg))
+ return 0;
+- if (opcode == BPF_MOV) {
++ if (opcode == BPF_END || opcode == BPF_NEG) {
++ /* sreg is reserved and unused
++ * dreg still need precision before this insn
++ */
++ return 0;
++ } else if (opcode == BPF_MOV) {
+ if (BPF_SRC(insn->code) == BPF_X) {
+ /* dreg = sreg or dreg = (s8, s16, s32)sreg
+ * dreg needs precision after this insn
+@@ -4080,10 +4103,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ * Nothing to be tracked further in the parent state.
+ */
+ return 0;
+- if (i == first_idx)
+- break;
+ subseq_idx = i;
+ i = get_prev_insn_idx(st, i, &history);
++ if (i == -ENOENT)
++ break;
+ if (i >= env->prog->len) {
+ /* This can happen if backtracking reached insn 0
+ * and there are still reg_mask or stack_mask
+@@ -4358,7 +4381,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ insn->imm != 0 && env->bpf_capable) {
+ struct bpf_reg_state fake_reg = {};
+
+- __mark_reg_known(&fake_reg, (u32)insn->imm);
++ __mark_reg_known(&fake_reg, insn->imm);
+ fake_reg.type = SCALAR_VALUE;
+ save_register_state(state, spi, &fake_reg, size);
+ } else if (reg && is_spillable_regtype(reg->type)) {
+@@ -11202,6 +11225,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ break;
+ }
+ case KF_ARG_PTR_TO_CALLBACK:
++ if (reg->type != PTR_TO_FUNC) {
++ verbose(env, "arg%d expected pointer to func\n", i);
++ return -EINVAL;
++ }
+ meta->subprogno = reg->subprogno;
+ break;
+ case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
+@@ -14135,6 +14162,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ !sanitize_speculative_path(env, insn, *insn_idx + 1,
+ *insn_idx))
+ return -EFAULT;
++ if (env->log.level & BPF_LOG_LEVEL)
++ print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ *insn_idx += insn->off;
+ return 0;
+ } else if (pred == 0) {
+@@ -14147,6 +14176,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ *insn_idx + insn->off + 1,
+ *insn_idx))
+ return -EFAULT;
++ if (env->log.level & BPF_LOG_LEVEL)
++ print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ return 0;
+ }
+
+@@ -14725,8 +14756,7 @@ enum {
+ * w - next instruction
+ * e - edge
+ */
+-static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
+- bool loop_ok)
++static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
+ {
+ int *insn_stack = env->cfg.insn_stack;
+ int *insn_state = env->cfg.insn_state;
+@@ -14758,7 +14788,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
+ insn_stack[env->cfg.cur_stack++] = w;
+ return KEEP_EXPLORING;
+ } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
+- if (loop_ok && env->bpf_capable)
++ if (env->bpf_capable)
+ return DONE_EXPLORING;
+ verbose_linfo(env, t, "%d: ", t);
+ verbose_linfo(env, w, "%d: ", w);
+@@ -14778,24 +14808,20 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ struct bpf_verifier_env *env,
+ bool visit_callee)
+ {
+- int ret;
++ int ret, insn_sz;
+
+- ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
++ insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
++ ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
+ if (ret)
+ return ret;
+
+- mark_prune_point(env, t + 1);
++ mark_prune_point(env, t + insn_sz);
+ /* when we exit from subprog, we need to record non-linear history */
+- mark_jmp_point(env, t + 1);
++ mark_jmp_point(env, t + insn_sz);
+
+ if (visit_callee) {
+ mark_prune_point(env, t);
+- ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
+- /* It's ok to allow recursion from CFG point of
+- * view. __check_func_call() will do the actual
+- * check.
+- */
+- bpf_pseudo_func(insns + t));
++ ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
+ }
+ return ret;
+ }
+@@ -14808,15 +14834,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ static int visit_insn(int t, struct bpf_verifier_env *env)
+ {
+ struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
+- int ret, off;
++ int ret, off, insn_sz;
+
+ if (bpf_pseudo_func(insn))
+ return visit_func_call_insn(t, insns, env, true);
+
+ /* All non-branch instructions have a single fall-through edge. */
+ if (BPF_CLASS(insn->code) != BPF_JMP &&
+- BPF_CLASS(insn->code) != BPF_JMP32)
+- return push_insn(t, t + 1, FALLTHROUGH, env, false);
++ BPF_CLASS(insn->code) != BPF_JMP32) {
++ insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
++ return push_insn(t, t + insn_sz, FALLTHROUGH, env);
++ }
+
+ switch (BPF_OP(insn->code)) {
+ case BPF_EXIT:
+@@ -14862,8 +14890,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
+ off = insn->imm;
+
+ /* unconditional jump with single edge */
+- ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
+- true);
++ ret = push_insn(t, t + off + 1, FALLTHROUGH, env);
+ if (ret)
+ return ret;
+
+@@ -14876,11 +14903,11 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
+ /* conditional jump with two edges */
+ mark_prune_point(env, t);
+
+- ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
++ ret = push_insn(t, t + 1, FALLTHROUGH, env);
+ if (ret)
+ return ret;
+
+- return push_insn(t, t + insn->off + 1, BRANCH, env, true);
++ return push_insn(t, t + insn->off + 1, BRANCH, env);
+ }
+ }
+
+@@ -14935,11 +14962,21 @@ static int check_cfg(struct bpf_verifier_env *env)
+ }
+
+ for (i = 0; i < insn_cnt; i++) {
++ struct bpf_insn *insn = &env->prog->insnsi[i];
++
+ if (insn_state[i] != EXPLORED) {
+ verbose(env, "unreachable insn %d\n", i);
+ ret = -EINVAL;
+ goto err_free;
+ }
++ if (bpf_is_ldimm64(insn)) {
++ if (insn_state[i + 1] != 0) {
++ verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
++ ret = -EINVAL;
++ goto err_free;
++ }
++ i++; /* skip second half of ldimm64 */
++ }
+ }
+ ret = 0; /* cfg looks good */
+
+@@ -19641,6 +19678,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
+ if (!tr)
+ return -ENOMEM;
+
++ if (tgt_prog && tgt_prog->aux->tail_call_reachable)
++ tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
++
+ prog->aux->dst_trampoline = tr;
+ return 0;
+ }
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 1fb7f562289d5..518725b57200c 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -3867,14 +3867,6 @@ static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
+ return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
+ }
+
+-static int cgroup_pressure_open(struct kernfs_open_file *of)
+-{
+- if (of->file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
+- return -EPERM;
+-
+- return 0;
+-}
+-
+ static void cgroup_pressure_release(struct kernfs_open_file *of)
+ {
+ struct cgroup_file_ctx *ctx = of->priv;
+@@ -5275,7 +5267,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "io.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_IO]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_io_pressure_show,
+ .write = cgroup_io_pressure_write,
+ .poll = cgroup_pressure_poll,
+@@ -5284,7 +5275,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "memory.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_memory_pressure_show,
+ .write = cgroup_memory_pressure_write,
+ .poll = cgroup_pressure_poll,
+@@ -5293,7 +5283,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "cpu.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_cpu_pressure_show,
+ .write = cgroup_cpu_pressure_write,
+ .poll = cgroup_pressure_poll,
+@@ -5303,7 +5292,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "irq.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_irq_pressure_show,
+ .write = cgroup_irq_pressure_write,
+ .poll = cgroup_pressure_poll,
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 58ec88efa4f82..4749e0c86c62c 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1304,13 +1304,23 @@ static int update_partition_exclusive(struct cpuset *cs, int new_prs)
+ *
+ * Changing load balance flag will automatically call
+ * rebuild_sched_domains_locked().
++ * This function is for cgroup v2 only.
+ */
+ static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
+ {
+ int new_prs = cs->partition_root_state;
+- bool new_lb = (new_prs != PRS_ISOLATED);
+ bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
++ bool new_lb;
+
++ /*
++ * If cs is not a valid partition root, the load balance state
++ * will follow its parent.
++ */
++ if (new_prs > 0) {
++ new_lb = (new_prs != PRS_ISOLATED);
++ } else {
++ new_lb = is_sched_load_balance(parent_cs(cs));
++ }
+ if (new_lb != !!is_sched_load_balance(cs)) {
+ rebuild_domains = true;
+ if (new_lb)
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 6de7c6bb74eee..303cb0591b4b1 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -659,11 +659,19 @@ static inline bool cpu_smt_thread_allowed(unsigned int cpu)
+ #endif
+ }
+
+-static inline bool cpu_smt_allowed(unsigned int cpu)
++static inline bool cpu_bootable(unsigned int cpu)
+ {
+ if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
+ return true;
+
++ /* All CPUs are bootable if controls are not configured */
++ if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
++ return true;
++
++ /* All CPUs are bootable if CPU is not SMT capable */
++ if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
++ return true;
++
+ if (topology_is_primary_thread(cpu))
+ return true;
+
+@@ -685,7 +693,7 @@ bool cpu_smt_possible(void)
+ EXPORT_SYMBOL_GPL(cpu_smt_possible);
+
+ #else
+-static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
++static inline bool cpu_bootable(unsigned int cpu) { return true; }
+ #endif
+
+ static inline enum cpuhp_state
+@@ -788,10 +796,10 @@ static int bringup_wait_for_ap_online(unsigned int cpu)
+ * SMT soft disabling on X86 requires to bring the CPU out of the
+ * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
+ * CPU marked itself as booted_once in notify_cpu_starting() so the
+- * cpu_smt_allowed() check will now return false if this is not the
++ * cpu_bootable() check will now return false if this is not the
+ * primary sibling.
+ */
+- if (!cpu_smt_allowed(cpu))
++ if (!cpu_bootable(cpu))
+ return -ECANCELED;
+ return 0;
+ }
+@@ -1515,11 +1523,14 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+ /*
+ * Ensure that the control task does not run on the to be offlined
+ * CPU to prevent a deadlock against cfs_b->period_timer.
++ * Also keep at least one housekeeping cpu onlined to avoid generating
++ * an empty sched_domain span.
+ */
+- cpu = cpumask_any_but(cpu_online_mask, cpu);
+- if (cpu >= nr_cpu_ids)
+- return -EBUSY;
+- return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
++ for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
++ if (cpu != work.cpu)
++ return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
++ }
++ return -EBUSY;
+ }
+
+ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
+@@ -1741,7 +1752,7 @@ static int cpu_up(unsigned int cpu, enum cpuhp_state target)
+ err = -EBUSY;
+ goto out;
+ }
+- if (!cpu_smt_allowed(cpu)) {
++ if (!cpu_bootable(cpu)) {
+ err = -EPERM;
+ goto out;
+ }
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index 621037a0aa870..ce1bb2301c061 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -1006,6 +1006,9 @@ void kgdb_panic(const char *msg)
+ if (panic_timeout)
+ return;
+
++ debug_locks_off();
++ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
++
+ if (dbg_kdb_mode)
+ kdb_printf("PANIC: %s\n", msg);
+
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index dff067bd56b1e..2048194a03bed 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -283,7 +283,8 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
+ }
+
+ for (i = 0; i < mem->nslabs; i++) {
+- mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
++ mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
++ mem->nslabs - i);
+ mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
+ mem->slots[i].alloc_size = 0;
+ }
+@@ -558,29 +559,40 @@ void __init swiotlb_exit(void)
+ * alloc_dma_pages() - allocate pages to be used for DMA
+ * @gfp: GFP flags for the allocation.
+ * @bytes: Size of the buffer.
++ * @phys_limit: Maximum allowed physical address of the buffer.
+ *
+ * Allocate pages from the buddy allocator. If successful, make the allocated
+ * pages decrypted that they can be used for DMA.
+ *
+- * Return: Decrypted pages, or %NULL on failure.
++ * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
++ * if the allocated physical address was above @phys_limit.
+ */
+-static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes)
++static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
+ {
+ unsigned int order = get_order(bytes);
+ struct page *page;
++ phys_addr_t paddr;
+ void *vaddr;
+
+ page = alloc_pages(gfp, order);
+ if (!page)
+ return NULL;
+
+- vaddr = page_address(page);
++ paddr = page_to_phys(page);
++ if (paddr + bytes - 1 > phys_limit) {
++ __free_pages(page, order);
++ return ERR_PTR(-EAGAIN);
++ }
++
++ vaddr = phys_to_virt(paddr);
+ if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
+ goto error;
+ return page;
+
+ error:
+- __free_pages(page, order);
++ /* Intentional leak if pages cannot be encrypted again. */
++ if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
++ __free_pages(page, order);
+ return NULL;
+ }
+
+@@ -618,11 +630,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
+ else if (phys_limit <= DMA_BIT_MASK(32))
+ gfp |= __GFP_DMA32;
+
+- while ((page = alloc_dma_pages(gfp, bytes)) &&
+- page_to_phys(page) + bytes - 1 > phys_limit) {
+- /* allocated, but too high */
+- __free_pages(page, get_order(bytes));
+-
++ while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
+ if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+ phys_limit < DMA_BIT_MASK(64) &&
+ !(gfp & (__GFP_DMA32 | __GFP_DMA)))
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index a2f2a9525d72e..6dbb03c532375 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -375,6 +375,7 @@ enum event_type_t {
+ EVENT_TIME = 0x4,
+ /* see ctx_resched() for details */
+ EVENT_CPU = 0x8,
++ EVENT_CGROUP = 0x10,
+ EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+ };
+
+@@ -684,20 +685,26 @@ do { \
+ ___p; \
+ })
+
+-static void perf_ctx_disable(struct perf_event_context *ctx)
++static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
+ {
+ struct perf_event_pmu_context *pmu_ctx;
+
+- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
++ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++ if (cgroup && !pmu_ctx->nr_cgroups)
++ continue;
+ perf_pmu_disable(pmu_ctx->pmu);
++ }
+ }
+
+-static void perf_ctx_enable(struct perf_event_context *ctx)
++static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
+ {
+ struct perf_event_pmu_context *pmu_ctx;
+
+- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
++ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++ if (cgroup && !pmu_ctx->nr_cgroups)
++ continue;
+ perf_pmu_enable(pmu_ctx->pmu);
++ }
+ }
+
+ static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
+@@ -856,9 +863,9 @@ static void perf_cgroup_switch(struct task_struct *task)
+ return;
+
+ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+- perf_ctx_disable(&cpuctx->ctx);
++ perf_ctx_disable(&cpuctx->ctx, true);
+
+- ctx_sched_out(&cpuctx->ctx, EVENT_ALL);
++ ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
+ /*
+ * must not be done before ctxswout due
+ * to update_cgrp_time_from_cpuctx() in
+@@ -870,9 +877,9 @@ static void perf_cgroup_switch(struct task_struct *task)
+ * perf_cgroup_set_timestamp() in ctx_sched_in()
+ * to not have to pass task around
+ */
+- ctx_sched_in(&cpuctx->ctx, EVENT_ALL);
++ ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
+
+- perf_ctx_enable(&cpuctx->ctx);
++ perf_ctx_enable(&cpuctx->ctx, true);
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+ }
+
+@@ -965,6 +972,8 @@ perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ct
+ if (!is_cgroup_event(event))
+ return;
+
++ event->pmu_ctx->nr_cgroups++;
++
+ /*
+ * Because cgroup events are always per-cpu events,
+ * @ctx == &cpuctx->ctx.
+@@ -985,6 +994,8 @@ perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *c
+ if (!is_cgroup_event(event))
+ return;
+
++ event->pmu_ctx->nr_cgroups--;
++
+ /*
+ * Because cgroup events are always per-cpu events,
+ * @ctx == &cpuctx->ctx.
+@@ -2679,9 +2690,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
+
+ event_type &= EVENT_ALL;
+
+- perf_ctx_disable(&cpuctx->ctx);
++ perf_ctx_disable(&cpuctx->ctx, false);
+ if (task_ctx) {
+- perf_ctx_disable(task_ctx);
++ perf_ctx_disable(task_ctx, false);
+ task_ctx_sched_out(task_ctx, event_type);
+ }
+
+@@ -2699,9 +2710,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
+
+ perf_event_sched_in(cpuctx, task_ctx);
+
+- perf_ctx_enable(&cpuctx->ctx);
++ perf_ctx_enable(&cpuctx->ctx, false);
+ if (task_ctx)
+- perf_ctx_enable(task_ctx);
++ perf_ctx_enable(task_ctx, false);
+ }
+
+ void perf_pmu_resched(struct pmu *pmu)
+@@ -3246,6 +3257,9 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
+ struct perf_event_pmu_context *pmu_ctx;
+ int is_active = ctx->is_active;
++ bool cgroup = event_type & EVENT_CGROUP;
++
++ event_type &= ~EVENT_CGROUP;
+
+ lockdep_assert_held(&ctx->lock);
+
+@@ -3292,8 +3306,11 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
+
+ is_active ^= ctx->is_active; /* changed bits */
+
+- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
++ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++ if (cgroup && !pmu_ctx->nr_cgroups)
++ continue;
+ __pmu_ctx_sched_out(pmu_ctx, is_active);
++ }
+ }
+
+ /*
+@@ -3484,7 +3501,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
+ raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
+ if (context_equiv(ctx, next_ctx)) {
+
+- perf_ctx_disable(ctx);
++ perf_ctx_disable(ctx, false);
+
+ /* PMIs are disabled; ctx->nr_pending is stable. */
+ if (local_read(&ctx->nr_pending) ||
+@@ -3504,7 +3521,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
+ perf_ctx_sched_task_cb(ctx, false);
+ perf_event_swap_task_ctx_data(ctx, next_ctx);
+
+- perf_ctx_enable(ctx);
++ perf_ctx_enable(ctx, false);
+
+ /*
+ * RCU_INIT_POINTER here is safe because we've not
+@@ -3528,13 +3545,13 @@ unlock:
+
+ if (do_switch) {
+ raw_spin_lock(&ctx->lock);
+- perf_ctx_disable(ctx);
++ perf_ctx_disable(ctx, false);
+
+ inside_switch:
+ perf_ctx_sched_task_cb(ctx, false);
+ task_ctx_sched_out(ctx, EVENT_ALL);
+
+- perf_ctx_enable(ctx);
++ perf_ctx_enable(ctx, false);
+ raw_spin_unlock(&ctx->lock);
+ }
+ }
+@@ -3820,47 +3837,32 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ return 0;
+ }
+
+-static void ctx_pinned_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
++static void pmu_groups_sched_in(struct perf_event_context *ctx,
++ struct perf_event_groups *groups,
++ struct pmu *pmu)
+ {
+- struct perf_event_pmu_context *pmu_ctx;
+ int can_add_hw = 1;
+-
+- if (pmu) {
+- visit_groups_merge(ctx, &ctx->pinned_groups,
+- smp_processor_id(), pmu,
+- merge_sched_in, &can_add_hw);
+- } else {
+- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
+- can_add_hw = 1;
+- visit_groups_merge(ctx, &ctx->pinned_groups,
+- smp_processor_id(), pmu_ctx->pmu,
+- merge_sched_in, &can_add_hw);
+- }
+- }
++ visit_groups_merge(ctx, groups, smp_processor_id(), pmu,
++ merge_sched_in, &can_add_hw);
+ }
+
+-static void ctx_flexible_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
++static void ctx_groups_sched_in(struct perf_event_context *ctx,
++ struct perf_event_groups *groups,
++ bool cgroup)
+ {
+ struct perf_event_pmu_context *pmu_ctx;
+- int can_add_hw = 1;
+
+- if (pmu) {
+- visit_groups_merge(ctx, &ctx->flexible_groups,
+- smp_processor_id(), pmu,
+- merge_sched_in, &can_add_hw);
+- } else {
+- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
+- can_add_hw = 1;
+- visit_groups_merge(ctx, &ctx->flexible_groups,
+- smp_processor_id(), pmu_ctx->pmu,
+- merge_sched_in, &can_add_hw);
+- }
++ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++ if (cgroup && !pmu_ctx->nr_cgroups)
++ continue;
++ pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu);
+ }
+ }
+
+-static void __pmu_ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
++static void __pmu_ctx_sched_in(struct perf_event_context *ctx,
++ struct pmu *pmu)
+ {
+- ctx_flexible_sched_in(ctx, pmu);
++ pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu);
+ }
+
+ static void
+@@ -3868,6 +3870,9 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
+ {
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
+ int is_active = ctx->is_active;
++ bool cgroup = event_type & EVENT_CGROUP;
++
++ event_type &= ~EVENT_CGROUP;
+
+ lockdep_assert_held(&ctx->lock);
+
+@@ -3900,11 +3905,11 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
+ * in order to give them the best chance of going on.
+ */
+ if (is_active & EVENT_PINNED)
+- ctx_pinned_sched_in(ctx, NULL);
++ ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup);
+
+ /* Then walk through the lower prio flexible groups */
+ if (is_active & EVENT_FLEXIBLE)
+- ctx_flexible_sched_in(ctx, NULL);
++ ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup);
+ }
+
+ static void perf_event_context_sched_in(struct task_struct *task)
+@@ -3919,11 +3924,11 @@ static void perf_event_context_sched_in(struct task_struct *task)
+
+ if (cpuctx->task_ctx == ctx) {
+ perf_ctx_lock(cpuctx, ctx);
+- perf_ctx_disable(ctx);
++ perf_ctx_disable(ctx, false);
+
+ perf_ctx_sched_task_cb(ctx, true);
+
+- perf_ctx_enable(ctx);
++ perf_ctx_enable(ctx, false);
+ perf_ctx_unlock(cpuctx, ctx);
+ goto rcu_unlock;
+ }
+@@ -3936,7 +3941,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
+ if (!ctx->nr_events)
+ goto unlock;
+
+- perf_ctx_disable(ctx);
++ perf_ctx_disable(ctx, false);
+ /*
+ * We want to keep the following priority order:
+ * cpu pinned (that don't need to move), task pinned,
+@@ -3946,7 +3951,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
+ * events, no need to flip the cpuctx's events around.
+ */
+ if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
+- perf_ctx_disable(&cpuctx->ctx);
++ perf_ctx_disable(&cpuctx->ctx, false);
+ ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
+ }
+
+@@ -3955,9 +3960,9 @@ static void perf_event_context_sched_in(struct task_struct *task)
+ perf_ctx_sched_task_cb(cpuctx->task_ctx, true);
+
+ if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
+- perf_ctx_enable(&cpuctx->ctx);
++ perf_ctx_enable(&cpuctx->ctx, false);
+
+- perf_ctx_enable(ctx);
++ perf_ctx_enable(ctx, false);
+
+ unlock:
+ perf_ctx_unlock(cpuctx, ctx);
+@@ -4811,6 +4816,11 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
+ void *task_ctx_data = NULL;
+
+ if (!ctx->task) {
++ /*
++ * perf_pmu_migrate_context() / __perf_pmu_install_event()
++ * relies on the fact that find_get_pmu_context() cannot fail
++ * for CPU contexts.
++ */
+ struct perf_cpu_pmu_context *cpc;
+
+ cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
+@@ -12872,6 +12882,9 @@ static void __perf_pmu_install_event(struct pmu *pmu,
+ int cpu, struct perf_event *event)
+ {
+ struct perf_event_pmu_context *epc;
++ struct perf_event_context *old_ctx = event->ctx;
++
++ get_ctx(ctx); /* normally find_get_context() */
+
+ event->cpu = cpu;
+ epc = find_get_pmu_context(pmu, ctx, event);
+@@ -12880,6 +12893,11 @@ static void __perf_pmu_install_event(struct pmu *pmu,
+ if (event->state >= PERF_EVENT_STATE_OFF)
+ event->state = PERF_EVENT_STATE_INACTIVE;
+ perf_install_in_context(ctx, event, cpu);
++
++ /*
++ * Now that event->ctx is updated and visible, put the old ctx.
++ */
++ put_ctx(old_ctx);
+ }
+
+ static void __perf_pmu_install(struct perf_event_context *ctx,
+@@ -12918,6 +12936,10 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+ struct perf_event_context *src_ctx, *dst_ctx;
+ LIST_HEAD(events);
+
++ /*
++ * Since per-cpu context is persistent, no need to grab an extra
++ * reference.
++ */
+ src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx;
+ dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx;
+
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index fb1e180b5f0af..e8d82c2f07d0e 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -700,6 +700,12 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
+ watermark = 0;
+ }
+
++ /*
++ * kcalloc_node() is unable to allocate buffer if the size is larger
++ * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
++ */
++ if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
++ return -ENOMEM;
+ rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
+ node);
+ if (!rb->aux_pages)
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 3b6d20dfb9a85..177ce7438db6b 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1288,7 +1288,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ hugetlb_count_init(mm);
+
+ if (current->mm) {
+- mm->flags = current->mm->flags & MMF_INIT_MASK;
++ mm->flags = mmf_init_flags(current->mm->flags);
+ mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
+ } else {
+ mm->flags = default_dump_filter;
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index f10587d1d4817..f30a93e50f65e 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -248,7 +248,17 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
+ * but access_ok() should be faster than find_vma()
+ */
+ if (!fshared) {
+- key->private.mm = mm;
++ /*
++ * On no-MMU, shared futexes are treated as private, therefore
++ * we must not include the current process in the key. Since
++ * there is only one address space, the address is a unique key
++ * on its own.
++ */
++ if (IS_ENABLED(CONFIG_MMU))
++ key->private.mm = mm;
++ else
++ key->private.mm = NULL;
++
+ key->private.address = address;
+ return 0;
+ }
+diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
+index 5971a66be0347..aae0402507ed7 100644
+--- a/kernel/irq/debugfs.c
++++ b/kernel/irq/debugfs.c
+@@ -121,7 +121,6 @@ static const struct irq_bit_descr irqdata_states[] = {
+ BIT_MASK_DESCR(IRQD_AFFINITY_ON_ACTIVATE),
+ BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
+ BIT_MASK_DESCR(IRQD_CAN_RESERVE),
+- BIT_MASK_DESCR(IRQD_MSI_NOMASK_QUIRK),
+
+ BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
+
+diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
+index c653cd31548d0..5a452b94b6434 100644
+--- a/kernel/irq/generic-chip.c
++++ b/kernel/irq/generic-chip.c
+@@ -544,21 +544,34 @@ EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
+ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ unsigned int clr, unsigned int set)
+ {
+- unsigned int i = gc->irq_base;
++ unsigned int i, virq;
+
+ raw_spin_lock(&gc_lock);
+ list_del(&gc->list);
+ raw_spin_unlock(&gc_lock);
+
+- for (; msk; msk >>= 1, i++) {
++ for (i = 0; msk; msk >>= 1, i++) {
+ if (!(msk & 0x01))
+ continue;
+
++ /*
++ * Interrupt domain based chips store the base hardware
++ * interrupt number in gc::irq_base. Otherwise gc::irq_base
++ * contains the base Linux interrupt number.
++ */
++ if (gc->domain) {
++ virq = irq_find_mapping(gc->domain, gc->irq_base + i);
++ if (!virq)
++ continue;
++ } else {
++ virq = gc->irq_base + i;
++ }
++
+ /* Remove handler first. That will mask the irq line */
+- irq_set_handler(i, NULL);
+- irq_set_chip(i, &no_irq_chip);
+- irq_set_chip_data(i, NULL);
+- irq_modify_status(i, clr, set);
++ irq_set_handler(virq, NULL);
++ irq_set_chip(virq, &no_irq_chip);
++ irq_set_chip_data(virq, NULL);
++ irq_modify_status(virq, clr, set);
+ }
+ }
+ EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
+diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
+index 1698e77645acf..75d0ae490e29c 100644
+--- a/kernel/irq/matrix.c
++++ b/kernel/irq/matrix.c
+@@ -466,16 +466,16 @@ unsigned int irq_matrix_reserved(struct irq_matrix *m)
+ }
+
+ /**
+- * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
++ * irq_matrix_allocated - Get the number of allocated non-managed irqs on the local CPU
+ * @m: Pointer to the matrix to search
+ *
+- * This returns number of allocated irqs
++ * This returns number of allocated non-managed interrupts.
+ */
+ unsigned int irq_matrix_allocated(struct irq_matrix *m)
+ {
+ struct cpumap *cm = this_cpu_ptr(m->maps);
+
+- return cm->allocated;
++ return cm->allocated - cm->managed_allocated;
+ }
+
+ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index b4c31a5c11473..79b4a58ba9c3f 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -1204,7 +1204,6 @@ static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
+
+ #define VIRQ_CAN_RESERVE 0x01
+ #define VIRQ_ACTIVATE 0x02
+-#define VIRQ_NOMASK_QUIRK 0x04
+
+ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
+ {
+@@ -1213,8 +1212,6 @@ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflag
+
+ if (!(vflags & VIRQ_CAN_RESERVE)) {
+ irqd_clr_can_reserve(irqd);
+- if (vflags & VIRQ_NOMASK_QUIRK)
+- irqd_set_msi_nomask_quirk(irqd);
+
+ /*
+ * If the interrupt is managed but no CPU is available to
+@@ -1275,15 +1272,8 @@ static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain
+ * Interrupt can use a reserved vector and will not occupy
+ * a real device vector until the interrupt is requested.
+ */
+- if (msi_check_reservation_mode(domain, info, dev)) {
++ if (msi_check_reservation_mode(domain, info, dev))
+ vflags |= VIRQ_CAN_RESERVE;
+- /*
+- * MSI affinity setting requires a special quirk (X86) when
+- * reservation mode is active.
+- */
+- if (info->flags & MSI_FLAG_NOMASK_QUIRK)
+- vflags |= VIRQ_NOMASK_QUIRK;
+- }
+
+ xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
+ if (!msi_desc_match(desc, MSI_DESC_NOTASSOCIATED))
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 107f355eac101..8f35a5a42af85 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -247,7 +247,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
+ ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
+ return -EINVAL;
+
+- ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
++ ksegments = memdup_array_user(segments, nr_segments, sizeof(ksegments[0]));
+ if (IS_ERR(ksegments))
+ return PTR_ERR(ksegments);
+
+diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
+index 61328328c474c..ecbc9b6aba3a1 100644
+--- a/kernel/livepatch/core.c
++++ b/kernel/livepatch/core.c
+@@ -243,7 +243,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
+ * symbols are exported and normal relas can be used instead.
+ */
+ if (!sec_vmlinux && sym_vmlinux) {
+- pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
++ pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n",
+ sym_name);
+ return -EINVAL;
+ }
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index e85b5ad3e2069..151bd3de59363 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3497,7 +3497,8 @@ static int alloc_chain_hlocks(int req)
+ size = chain_block_size(curr);
+ if (likely(size >= req)) {
+ del_chain_block(0, size, chain_block_next(curr));
+- add_chain_block(curr + req, size - req);
++ if (size > req)
++ add_chain_block(curr + req, size - req);
+ return curr;
+ }
+ }
+diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
+index 93cca6e698600..7c5a8f05497f2 100644
+--- a/kernel/locking/test-ww_mutex.c
++++ b/kernel/locking/test-ww_mutex.c
+@@ -466,7 +466,6 @@ retry:
+ } while (!time_after(jiffies, stress->timeout));
+
+ kfree(order);
+- kfree(stress);
+ }
+
+ struct reorder_lock {
+@@ -531,7 +530,6 @@ out:
+ list_for_each_entry_safe(ll, ln, &locks, link)
+ kfree(ll);
+ kfree(order);
+- kfree(stress);
+ }
+
+ static void stress_one_work(struct work_struct *work)
+@@ -552,8 +550,6 @@ static void stress_one_work(struct work_struct *work)
+ break;
+ }
+ } while (!time_after(jiffies, stress->timeout));
+-
+- kfree(stress);
+ }
+
+ #define STRESS_INORDER BIT(0)
+@@ -564,15 +560,24 @@ static void stress_one_work(struct work_struct *work)
+ static int stress(int nlocks, int nthreads, unsigned int flags)
+ {
+ struct ww_mutex *locks;
+- int n;
++ struct stress *stress_array;
++ int n, count;
+
+ locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
+ if (!locks)
+ return -ENOMEM;
+
++ stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
++ GFP_KERNEL);
++ if (!stress_array) {
++ kfree(locks);
++ return -ENOMEM;
++ }
++
+ for (n = 0; n < nlocks; n++)
+ ww_mutex_init(&locks[n], &ww_class);
+
++ count = 0;
+ for (n = 0; nthreads; n++) {
+ struct stress *stress;
+ void (*fn)(struct work_struct *work);
+@@ -596,9 +601,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
+ if (!fn)
+ continue;
+
+- stress = kmalloc(sizeof(*stress), GFP_KERNEL);
+- if (!stress)
+- break;
++ stress = &stress_array[count++];
+
+ INIT_WORK(&stress->work, fn);
+ stress->locks = locks;
+@@ -613,6 +616,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
+
+ for (n = 0; n < nlocks; n++)
+ ww_mutex_destroy(&locks[n]);
++ kfree(stress_array);
+ kfree(locks);
+
+ return 0;
+diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
+index 87440f714c0ca..474e68f0f0634 100644
+--- a/kernel/module/decompress.c
++++ b/kernel/module/decompress.c
+@@ -100,7 +100,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ s.next_in = buf + gzip_hdr_len;
+ s.avail_in = size - gzip_hdr_len;
+
+- s.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
++ s.workspace = kvmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+ if (!s.workspace)
+ return -ENOMEM;
+
+@@ -138,7 +138,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ out_inflate_end:
+ zlib_inflateEnd(&s);
+ out:
+- kfree(s.workspace);
++ kvfree(s.workspace);
+ return retval;
+ }
+ #elif defined(CONFIG_MODULE_COMPRESS_XZ)
+@@ -241,7 +241,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
+ }
+
+ wksp_size = zstd_dstream_workspace_bound(header.windowSize);
+- wksp = vmalloc(wksp_size);
++ wksp = kvmalloc(wksp_size, GFP_KERNEL);
+ if (!wksp) {
+ retval = -ENOMEM;
+ goto out;
+@@ -284,7 +284,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
+ retval = new_size;
+
+ out:
+- vfree(wksp);
++ kvfree(wksp);
+ return retval;
+ }
+ #else
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 222d60195de66..179fb1518070c 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -202,7 +202,7 @@ int padata_do_parallel(struct padata_shell *ps,
+ *cb_cpu = cpu;
+ }
+
+- err = -EBUSY;
++ err = -EBUSY;
+ if ((pinst->flags & PADATA_RESET))
+ goto out;
+
+@@ -1102,12 +1102,16 @@ EXPORT_SYMBOL(padata_alloc_shell);
+ */
+ void padata_free_shell(struct padata_shell *ps)
+ {
++ struct parallel_data *pd;
++
+ if (!ps)
+ return;
+
+ mutex_lock(&ps->pinst->lock);
+ list_del(&ps->list);
+- padata_free_pd(rcu_dereference_protected(ps->pd, 1));
++ pd = rcu_dereference_protected(ps->pd, 1);
++ if (refcount_dec_and_test(&pd->refcnt))
++ padata_free_pd(pd);
+ mutex_unlock(&ps->pinst->lock);
+
+ kfree(ps);
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 0f12e0a97e432..50a15408c3fca 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -2545,8 +2545,9 @@ static void *get_highmem_page_buffer(struct page *page,
+ pbe->copy_page = tmp;
+ } else {
+ /* Copy of the page will be stored in normal memory */
+- kaddr = safe_pages_list;
+- safe_pages_list = safe_pages_list->next;
++ kaddr = __get_safe_page(ca->gfp_mask);
++ if (!kaddr)
++ return ERR_PTR(-ENOMEM);
+ pbe->copy_page = virt_to_page(kaddr);
+ }
+ pbe->next = highmem_pblist;
+@@ -2750,8 +2751,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
+ return ERR_PTR(-ENOMEM);
+ }
+ pbe->orig_address = page_address(page);
+- pbe->address = safe_pages_list;
+- safe_pages_list = safe_pages_list->next;
++ pbe->address = __get_safe_page(ca->gfp_mask);
++ if (!pbe->address)
++ return ERR_PTR(-ENOMEM);
+ pbe->next = restore_pblist;
+ restore_pblist = pbe;
+ return pbe->address;
+@@ -2783,8 +2785,6 @@ next:
+ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
+ return 0;
+
+- handle->sync_read = 1;
+-
+ if (!handle->cur) {
+ if (!buffer)
+ /* This makes the buffer be freed by swsusp_free() */
+@@ -2827,7 +2827,6 @@ next:
+ memory_bm_position_reset(&zero_bm);
+ restore_pblist = NULL;
+ handle->buffer = get_buffer(&orig_bm, &ca);
+- handle->sync_read = 0;
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
+ }
+@@ -2837,9 +2836,8 @@ next:
+ handle->buffer = get_buffer(&orig_bm, &ca);
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
+- if (handle->buffer != buffer)
+- handle->sync_read = 0;
+ }
++ handle->sync_read = (handle->buffer == buffer);
+ handle->cur++;
+
+ /* Zero pages were not included in the image, memset it and move on. */
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index 20d7a238d675a..25285893e44e7 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -223,7 +223,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
+ snp->grplo = cpu;
+ snp->grphi = cpu;
+ }
+- sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
++ sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
+ }
+ smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
+ return true;
+@@ -782,8 +782,7 @@ static void srcu_gp_start(struct srcu_struct *ssp)
+ spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+- rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
++ WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
+ spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
+ WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
+ WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
+@@ -833,7 +832,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
+ int cpu;
+
+ for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
+- if (!(mask & (1 << (cpu - snp->grplo))))
++ if (!(mask & (1UL << (cpu - snp->grplo))))
+ continue;
+ srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
+ }
+@@ -1242,10 +1241,37 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
+ spin_lock_irqsave_sdp_contention(sdp, &flags);
+ if (rhp)
+ rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
++ /*
++ * The snapshot for acceleration must be taken _before_ the read of the
++ * current gp sequence used for advancing, otherwise advancing may fail
++ * and acceleration may then fail too.
++ *
++ * This could happen if:
++ *
++ * 1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the
++ * RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8).
++ *
++ * 2) The grace period for RCU_WAIT_TAIL is seen as started but not
++ * completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1.
++ *
++ * 3) This value is passed to rcu_segcblist_advance() which can't move
++ * any segment forward and fails.
++ *
++ * 4) srcu_gp_start_if_needed() still proceeds with callback acceleration.
++ * But then the call to rcu_seq_snap() observes the grace period for the
++ * RCU_WAIT_TAIL segment as completed and the subsequent one for the
++ * RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1)
++ * so it returns a snapshot of the next grace period, which is X + 12.
++ *
++ * 5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the
++ * freshly enqueued callback in RCU_NEXT_TAIL can't move to
++ * RCU_NEXT_READY_TAIL which already has callbacks for a previous grace
++ * period (gp_num = X + 8). So acceleration fails.
++ */
++ s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+- s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
+- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
++ WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s) && rhp);
+ if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
+ sdp->srcu_gp_seq_needed = s;
+ needgp = true;
+@@ -1692,6 +1718,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+ ssp = sdp->ssp;
+ rcu_cblist_init(&ready_cbs);
+ spin_lock_irq_rcu_node(sdp);
++ WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+ if (sdp->srcu_cblist_invoking ||
+@@ -1720,8 +1747,6 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+ */
+ spin_lock_irq_rcu_node(sdp);
+ rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
+- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+- rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
+ sdp->srcu_cblist_invoking = false;
+ more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
+ spin_unlock_irq_rcu_node(sdp);
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index cb1caefa8bd07..7b4517dc46579 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -31,6 +31,7 @@
+ #include <linux/bitops.h>
+ #include <linux/export.h>
+ #include <linux/completion.h>
++#include <linux/kmemleak.h>
+ #include <linux/moduleparam.h>
+ #include <linux/panic.h>
+ #include <linux/panic_notifier.h>
+@@ -1556,10 +1557,22 @@ static bool rcu_gp_fqs_check_wake(int *gfp)
+ */
+ static void rcu_gp_fqs(bool first_time)
+ {
++ int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
+ struct rcu_node *rnp = rcu_get_root();
+
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
++
++ WARN_ON_ONCE(nr_fqs > 3);
++ /* Only countdown nr_fqs for stall purposes if jiffies moves. */
++ if (nr_fqs) {
++ if (nr_fqs == 1) {
++ WRITE_ONCE(rcu_state.jiffies_stall,
++ jiffies + rcu_jiffies_till_stall_check());
++ }
++ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
++ }
++
+ if (first_time) {
+ /* Collect dyntick-idle snapshots. */
+ force_qs_rnp(dyntick_save_progress_counter);
+@@ -3388,6 +3401,14 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
+ success = true;
+ }
+
++ /*
++ * The kvfree_rcu() caller considers the pointer freed at this point
++ * and likely removes any references to it. Since the actual slab
++ * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
++ * this object (no scanning or false positives reporting).
++ */
++ kmemleak_ignore(ptr);
++
+ // Set timer to drain after KFREE_DRAIN_JIFFIES.
+ if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
+ schedule_delayed_monitor_work(krcp);
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index 192536916f9a6..e9821a8422dbe 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -386,6 +386,10 @@ struct rcu_state {
+ /* in jiffies. */
+ unsigned long jiffies_stall; /* Time at which to check */
+ /* for CPU stalls. */
++ int nr_fqs_jiffies_stall; /* Number of fqs loops after
++ * which read jiffies and set
++ * jiffies_stall. Stall
++ * warnings disabled if !0. */
+ unsigned long jiffies_resched; /* Time at which to resched */
+ /* a reluctant CPU. */
+ unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
+diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
+index 6f06dc12904ad..e09f4f624261e 100644
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -149,12 +149,17 @@ static void panic_on_rcu_stall(void)
+ /**
+ * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
+ *
++ * To perform the reset request from the caller, disable stall detection until
++ * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
++ * loaded. It should be safe to do from the fqs loop as enough timer
++ * interrupts and context switches should have passed.
++ *
+ * The caller must disable hard irqs.
+ */
+ void rcu_cpu_stall_reset(void)
+ {
+- WRITE_ONCE(rcu_state.jiffies_stall,
+- jiffies + rcu_jiffies_till_stall_check());
++ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
++ WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+@@ -170,6 +175,7 @@ static void record_gp_stall_check_time(void)
+ WRITE_ONCE(rcu_state.gp_start, j);
+ j1 = rcu_jiffies_till_stall_check();
+ smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
++ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
+ WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
+ rcu_state.jiffies_resched = j + j1 / 2;
+ rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
+@@ -725,6 +731,16 @@ static void check_cpu_stall(struct rcu_data *rdp)
+ !rcu_gp_in_progress())
+ return;
+ rcu_stall_kick_kthreads();
++
++ /*
++ * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
++ * loop has to set jiffies to ensure a non-stale jiffies value. This
++ * is required to have good jiffies value after coming out of long
++ * breaks of jiffies updates. Not doing so can cause false positives.
++ */
++ if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
++ return;
++
+ j = jiffies;
+
+ /*
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index 3bba88c7ffc6b..6ebef11c88760 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -74,6 +74,7 @@ void __weak (*pm_power_off)(void);
+ void emergency_restart(void)
+ {
+ kmsg_dump(KMSG_DUMP_EMERG);
++ system_state = SYSTEM_RESTART;
+ machine_emergency_restart();
+ }
+ EXPORT_SYMBOL_GPL(emergency_restart);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 802551e0009bf..a854b71836dd5 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2664,9 +2664,11 @@ static int migration_cpu_stop(void *data)
+ * it.
+ */
+ WARN_ON_ONCE(!pending->stop_pending);
++ preempt_disable();
+ task_rq_unlock(rq, p, &rf);
+ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
++ preempt_enable();
+ return 0;
+ }
+ out:
+@@ -2986,12 +2988,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ complete = true;
+ }
+
++ preempt_disable();
+ task_rq_unlock(rq, p, rf);
+-
+ if (push_task) {
+ stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ p, &rq->push_work);
+ }
++ preempt_enable();
+
+ if (complete)
+ complete_all(&pending->done);
+@@ -3057,12 +3060,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ if (flags & SCA_MIGRATE_ENABLE)
+ p->migration_flags &= ~MDF_PUSH;
+
++ preempt_disable();
+ task_rq_unlock(rq, p, rf);
+-
+ if (!stop_pending) {
+ stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
+ }
++ preempt_enable();
+
+ if (flags & SCA_MIGRATE_ENABLE)
+ return 0;
+@@ -5374,8 +5378,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
+ /* switch_mm_cid() requires the memory barriers above. */
+ switch_mm_cid(rq, prev, next);
+
+- rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
+-
+ prepare_lock_switch(rq, next, rf);
+
+ /* Here we just switch the register state and the stack. */
+@@ -6615,6 +6617,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ /* Promote REQ to ACT */
+ rq->clock_update_flags <<= 1;
+ update_rq_clock(rq);
++ rq->clock_update_flags = RQCF_UPDATED;
+
+ switch_count = &prev->nivcsw;
+
+@@ -6694,8 +6697,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ /* Also unlocks the rq: */
+ rq = context_switch(rq, prev, next, &rf);
+ } else {
+- rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
+-
+ rq_unpin_lock(rq, &rf);
+ __balance_callbacks(rq);
+ raw_spin_rq_unlock_irq(rq);
+@@ -9505,9 +9506,11 @@ static void balance_push(struct rq *rq)
+ * Temporarily drop rq->lock such that we can wake-up the stop task.
+ * Both preemption and IRQs are still disabled.
+ */
++ preempt_disable();
+ raw_spin_rq_unlock(rq);
+ stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
+ this_cpu_ptr(&push_work));
++ preempt_enable();
+ /*
+ * At this point need_resched() is true and we'll take the loop in
+ * schedule(). The next pick is obviously going to be the stop task
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 58b542bf28934..d78f2e8769fb4 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2449,9 +2449,11 @@ skip:
+ double_unlock_balance(this_rq, src_rq);
+
+ if (push_task) {
++ preempt_disable();
+ raw_spin_rq_unlock(this_rq);
+ stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ push_task, &src_rq->push_work);
++ preempt_enable();
+ raw_spin_rq_lock(this_rq);
+ }
+ }
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index df348aa55d3c7..fa9fff0f9620d 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3626,41 +3626,140 @@ static inline void
+ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+ #endif
+
++static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se,
++ unsigned long weight)
++{
++ unsigned long old_weight = se->load.weight;
++ u64 avruntime = avg_vruntime(cfs_rq);
++ s64 vlag, vslice;
++
++ /*
++ * VRUNTIME
++ * ========
++ *
++ * COROLLARY #1: The virtual runtime of the entity needs to be
++ * adjusted if re-weight at !0-lag point.
++ *
++ * Proof: For contradiction assume this is not true, so we can
++ * re-weight without changing vruntime at !0-lag point.
++ *
++ * Weight VRuntime Avg-VRuntime
++ * before w v V
++ * after w' v' V'
++ *
++ * Since lag needs to be preserved through re-weight:
++ *
++ * lag = (V - v)*w = (V'- v')*w', where v = v'
++ * ==> V' = (V - v)*w/w' + v (1)
++ *
++ * Let W be the total weight of the entities before reweight,
++ * since V' is the new weighted average of entities:
++ *
++ * V' = (WV + w'v - wv) / (W + w' - w) (2)
++ *
++ * by using (1) & (2) we obtain:
++ *
++ * (WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v
++ * ==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v
++ * ==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v
++ * ==> (V - v)*W/(W + w' - w) = (V - v)*w/w' (3)
++ *
++ * Since we are doing at !0-lag point which means V != v, we
++ * can simplify (3):
++ *
++ * ==> W / (W + w' - w) = w / w'
++ * ==> Ww' = Ww + ww' - ww
++ * ==> W * (w' - w) = w * (w' - w)
++ * ==> W = w (re-weight indicates w' != w)
++ *
++ * So the cfs_rq contains only one entity, hence vruntime of
++ * the entity @v should always equal to the cfs_rq's weighted
++ * average vruntime @V, which means we will always re-weight
++ * at 0-lag point, thus breach assumption. Proof completed.
++ *
++ *
++ * COROLLARY #2: Re-weight does NOT affect weighted average
++ * vruntime of all the entities.
++ *
++ * Proof: According to corollary #1, Eq. (1) should be:
++ *
++ * (V - v)*w = (V' - v')*w'
++ * ==> v' = V' - (V - v)*w/w' (4)
++ *
++ * According to the weighted average formula, we have:
++ *
++ * V' = (WV - wv + w'v') / (W - w + w')
++ * = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w')
++ * = (WV - wv + w'V' - Vw + wv) / (W - w + w')
++ * = (WV + w'V' - Vw) / (W - w + w')
++ *
++ * ==> V'*(W - w + w') = WV + w'V' - Vw
++ * ==> V' * (W - w) = (W - w) * V (5)
++ *
++ * If the entity is the only one in the cfs_rq, then reweight
++ * always occurs at 0-lag point, so V won't change. Or else
++ * there are other entities, hence W != w, then Eq. (5) turns
++ * into V' = V. So V won't change in either case, proof done.
++ *
++ *
++ * So according to corollary #1 & #2, the effect of re-weight
++ * on vruntime should be:
++ *
++ * v' = V' - (V - v) * w / w' (4)
++ * = V - (V - v) * w / w'
++ * = V - vl * w / w'
++ * = V - vl'
++ */
++ if (avruntime != se->vruntime) {
++ vlag = (s64)(avruntime - se->vruntime);
++ vlag = div_s64(vlag * old_weight, weight);
++ se->vruntime = avruntime - vlag;
++ }
++
++ /*
++ * DEADLINE
++ * ========
++ *
++ * When the weight changes, the virtual time slope changes and
++ * we should adjust the relative virtual deadline accordingly.
++ *
++ * d' = v' + (d - v)*w/w'
++ * = V' - (V - v)*w/w' + (d - v)*w/w'
++ * = V - (V - v)*w/w' + (d - v)*w/w'
++ * = V + (d - V)*w/w'
++ */
++ vslice = (s64)(se->deadline - avruntime);
++ vslice = div_s64(vslice * old_weight, weight);
++ se->deadline = avruntime + vslice;
++}
++
+ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ unsigned long weight)
+ {
+- unsigned long old_weight = se->load.weight;
++ bool curr = cfs_rq->curr == se;
+
+ if (se->on_rq) {
+ /* commit outstanding execution time */
+- if (cfs_rq->curr == se)
++ if (curr)
+ update_curr(cfs_rq);
+ else
+- avg_vruntime_sub(cfs_rq, se);
++ __dequeue_entity(cfs_rq, se);
+ update_load_sub(&cfs_rq->load, se->load.weight);
+ }
+ dequeue_load_avg(cfs_rq, se);
+
+- update_load_set(&se->load, weight);
+-
+ if (!se->on_rq) {
+ /*
+ * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
+ * we need to scale se->vlag when w_i changes.
+ */
+- se->vlag = div_s64(se->vlag * old_weight, weight);
++ se->vlag = div_s64(se->vlag * se->load.weight, weight);
+ } else {
+- s64 deadline = se->deadline - se->vruntime;
+- /*
+- * When the weight changes, the virtual time slope changes and
+- * we should adjust the relative virtual deadline accordingly.
+- */
+- deadline = div_s64(deadline * old_weight, weight);
+- se->deadline = se->vruntime + deadline;
+- if (se != cfs_rq->curr)
+- min_deadline_cb_propagate(&se->run_node, NULL);
++ reweight_eevdf(cfs_rq, se, weight);
+ }
+
++ update_load_set(&se->load, weight);
++
+ #ifdef CONFIG_SMP
+ do {
+ u32 divider = get_pelt_divider(&se->avg);
+@@ -3672,8 +3771,17 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ enqueue_load_avg(cfs_rq, se);
+ if (se->on_rq) {
+ update_load_add(&cfs_rq->load, se->load.weight);
+- if (cfs_rq->curr != se)
+- avg_vruntime_add(cfs_rq, se);
++ if (!curr) {
++ /*
++ * The entity's vruntime has been adjusted, so let's check
++ * whether the rq-wide min_vruntime needs updated too. Since
++ * the calculations above require stable min_vruntime rather
++ * than up-to-date one, we do the update at the end of the
++ * reweight process.
++ */
++ __enqueue_entity(cfs_rq, se);
++ update_min_vruntime(cfs_rq);
++ }
+ }
+ }
+
+@@ -3817,14 +3925,11 @@ static void update_cfs_group(struct sched_entity *se)
+
+ #ifndef CONFIG_SMP
+ shares = READ_ONCE(gcfs_rq->tg->shares);
+-
+- if (likely(se->load.weight == shares))
+- return;
+ #else
+- shares = calc_group_shares(gcfs_rq);
++ shares = calc_group_shares(gcfs_rq);
+ #endif
+-
+- reweight_entity(cfs_rq_of(se), se, shares);
++ if (unlikely(se->load.weight != shares))
++ reweight_entity(cfs_rq_of(se), se, shares);
+ }
+
+ #else /* CONFIG_FAIR_GROUP_SCHED */
+@@ -4626,22 +4731,6 @@ static inline unsigned long task_util_est(struct task_struct *p)
+ return max(task_util(p), _task_util_est(p));
+ }
+
+-#ifdef CONFIG_UCLAMP_TASK
+-static inline unsigned long uclamp_task_util(struct task_struct *p,
+- unsigned long uclamp_min,
+- unsigned long uclamp_max)
+-{
+- return clamp(task_util_est(p), uclamp_min, uclamp_max);
+-}
+-#else
+-static inline unsigned long uclamp_task_util(struct task_struct *p,
+- unsigned long uclamp_min,
+- unsigned long uclamp_max)
+-{
+- return task_util_est(p);
+-}
+-#endif
+-
+ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
+ struct task_struct *p)
+ {
+@@ -4932,7 +5021,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+
+ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+ {
+- return true;
++ return !cfs_rq->nr_running;
+ }
+
+ #define UPDATE_TG 0x0
+@@ -7756,7 +7845,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ target = prev_cpu;
+
+ sync_entity_load_avg(&p->se);
+- if (!uclamp_task_util(p, p_util_min, p_util_max))
++ if (!task_util_est(p) && p_util_min == 0)
+ goto unlock;
+
+ eenv_task_busy_time(&eenv, p, prev_cpu);
+@@ -7764,11 +7853,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ for (; pd; pd = pd->next) {
+ unsigned long util_min = p_util_min, util_max = p_util_max;
+ unsigned long cpu_cap, cpu_thermal_cap, util;
+- unsigned long cur_delta, max_spare_cap = 0;
++ long prev_spare_cap = -1, max_spare_cap = -1;
+ unsigned long rq_util_min, rq_util_max;
+- unsigned long prev_spare_cap = 0;
++ unsigned long cur_delta, base_energy;
+ int max_spare_cap_cpu = -1;
+- unsigned long base_energy;
+ int fits, max_fits = -1;
+
+ cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
+@@ -7831,7 +7919,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ prev_spare_cap = cpu_cap;
+ prev_fits = fits;
+ } else if ((fits > max_fits) ||
+- ((fits == max_fits) && (cpu_cap > max_spare_cap))) {
++ ((fits == max_fits) && ((long)cpu_cap > max_spare_cap))) {
+ /*
+ * Find the CPU with the maximum spare capacity
+ * among the remaining CPUs in the performance
+@@ -7843,7 +7931,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ }
+ }
+
+- if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
++ if (max_spare_cap_cpu < 0 && prev_spare_cap < 0)
+ continue;
+
+ eenv_pd_busy_time(&eenv, cpus, p);
+@@ -7851,7 +7939,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ base_energy = compute_energy(&eenv, pd, cpus, p, -1);
+
+ /* Evaluate the energy impact of using prev_cpu. */
+- if (prev_spare_cap > 0) {
++ if (prev_spare_cap > -1) {
+ prev_delta = compute_energy(&eenv, pd, cpus, p,
+ prev_cpu);
+ /* CPU utilization has changed */
+@@ -11033,12 +11121,16 @@ static int should_we_balance(struct lb_env *env)
+ continue;
+ }
+
+- /* Are we the first idle CPU? */
++ /*
++ * Are we the first idle core in a non-SMT domain or higher,
++ * or the first idle CPU in a SMT domain?
++ */
+ return cpu == env->dst_cpu;
+ }
+
+- if (idle_smt == env->dst_cpu)
+- return true;
++ /* Are we the first idle CPU with busy siblings? */
++ if (idle_smt != -1)
++ return idle_smt == env->dst_cpu;
+
+ /* Are we the first CPU of this group ? */
+ return group_balance_cpu(sg) == env->dst_cpu;
+@@ -11251,13 +11343,15 @@ more_balance:
+ busiest->push_cpu = this_cpu;
+ active_balance = 1;
+ }
+- raw_spin_rq_unlock_irqrestore(busiest, flags);
+
++ preempt_disable();
++ raw_spin_rq_unlock_irqrestore(busiest, flags);
+ if (active_balance) {
+ stop_one_cpu_nowait(cpu_of(busiest),
+ active_load_balance_cpu_stop, busiest,
+ &busiest->active_balance_work);
+ }
++ preempt_enable();
+ }
+ } else {
+ sd->nr_balance_failed = 0;
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 0597ba0f85ff3..904dd85345973 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2109,9 +2109,11 @@ retry:
+ */
+ push_task = get_push_task(rq);
+ if (push_task) {
++ preempt_disable();
+ raw_spin_rq_unlock(rq);
+ stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ push_task, &rq->push_work);
++ preempt_enable();
+ raw_spin_rq_lock(rq);
+ }
+
+@@ -2448,9 +2450,11 @@ skip:
+ double_unlock_balance(this_rq, src_rq);
+
+ if (push_task) {
++ preempt_disable();
+ raw_spin_rq_unlock(this_rq);
+ stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ push_task, &src_rq->push_work);
++ preempt_enable();
+ raw_spin_rq_lock(this_rq);
+ }
+ }
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 05a5bc678c089..423d08947962c 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -2122,12 +2122,16 @@ static int hop_cmp(const void *a, const void *b)
+ */
+ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+ {
+- struct __cmp_key k = { .cpus = cpus, .node = node, .cpu = cpu };
++ struct __cmp_key k = { .cpus = cpus, .cpu = cpu };
+ struct cpumask ***hop_masks;
+ int hop, ret = nr_cpu_ids;
+
+ rcu_read_lock();
+
++ /* CPU-less node entries are uninitialized in sched_domains_numa_masks */
++ node = numa_nearest_node(node, N_CPU);
++ k.node = node;
++
+ k.masks = rcu_dereference(sched_domains_numa_masks);
+ if (!k.masks)
+ goto unlock;
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 8455a53465af8..695eb13a276d2 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -170,6 +170,8 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
+
+ static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
+ module_param(csd_lock_timeout, ulong, 0444);
++static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */
++module_param(panic_on_ipistall, int, 0444);
+
+ static atomic_t csd_bug_count = ATOMIC_INIT(0);
+
+@@ -230,6 +232,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
+ }
+
+ ts2 = sched_clock();
++ /* How long since we last checked for a stuck CSD lock.*/
+ ts_delta = ts2 - *ts1;
+ if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
+ return false;
+@@ -243,9 +246,17 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
+ else
+ cpux = cpu;
+ cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
++ /* How long since this CSD lock was stuck. */
++ ts_delta = ts2 - ts0;
+ pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
+- firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
++ firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts_delta,
+ cpu, csd->func, csd->info);
++ /*
++ * If the CSD lock is still stuck after 5 minutes, it is unlikely
++ * to become unstuck. Use a signed comparison to avoid triggering
++ * on underflows when the TSC is out of sync between sockets.
++ */
++ BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
+ if (cpu_cur_csd && csd != cpu_cur_csd) {
+ pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
+ *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 2410e3999ebe5..7a4ae6d5aecd5 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -2368,19 +2368,45 @@ static int prctl_set_vma(unsigned long opt, unsigned long start,
+ }
+ #endif /* CONFIG_ANON_VMA_NAME */
+
++static inline unsigned long get_current_mdwe(void)
++{
++ unsigned long ret = 0;
++
++ if (test_bit(MMF_HAS_MDWE, &current->mm->flags))
++ ret |= PR_MDWE_REFUSE_EXEC_GAIN;
++ if (test_bit(MMF_HAS_MDWE_NO_INHERIT, &current->mm->flags))
++ ret |= PR_MDWE_NO_INHERIT;
++
++ return ret;
++}
++
+ static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5)
+ {
++ unsigned long current_bits;
++
+ if (arg3 || arg4 || arg5)
+ return -EINVAL;
+
+- if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN))
++ if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT))
+ return -EINVAL;
+
++ /* NO_INHERIT only makes sense with REFUSE_EXEC_GAIN */
++ if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN))
++ return -EINVAL;
++
++ /* PARISC cannot allow mdwe as it needs writable stacks */
++ if (IS_ENABLED(CONFIG_PARISC))
++ return -EINVAL;
++
++ current_bits = get_current_mdwe();
++ if (current_bits && current_bits != bits)
++ return -EPERM; /* Cannot unset the flags */
++
++ if (bits & PR_MDWE_NO_INHERIT)
++ set_bit(MMF_HAS_MDWE_NO_INHERIT, &current->mm->flags);
+ if (bits & PR_MDWE_REFUSE_EXEC_GAIN)
+ set_bit(MMF_HAS_MDWE, &current->mm->flags);
+- else if (test_bit(MMF_HAS_MDWE, &current->mm->flags))
+- return -EPERM; /* Cannot unset the flag */
+
+ return 0;
+ }
+@@ -2390,9 +2416,7 @@ static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3,
+ {
+ if (arg2 || arg3 || arg4 || arg5)
+ return -EINVAL;
+-
+- return test_bit(MMF_HAS_MDWE, &current->mm->flags) ?
+- PR_MDWE_REFUSE_EXEC_GAIN : 0;
++ return get_current_mdwe();
+ }
+
+ static int prctl_get_auxv(void __user *addr, unsigned long len)
+diff --git a/kernel/torture.c b/kernel/torture.c
+index b28b05bbef027..c7b475883b9a8 100644
+--- a/kernel/torture.c
++++ b/kernel/torture.c
+@@ -87,14 +87,15 @@ EXPORT_SYMBOL_GPL(verbose_torout_sleep);
+ * nanosecond random fuzz. This function and its friends desynchronize
+ * testing from the timer wheel.
+ */
+-int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp)
++int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
++ struct torture_random_state *trsp)
+ {
+ ktime_t hto = baset_ns;
+
+ if (trsp)
+ hto += torture_random(trsp) % fuzzt_ns;
+ set_current_state(TASK_IDLE);
+- return schedule_hrtimeout(&hto, HRTIMER_MODE_REL);
++ return schedule_hrtimeout(&hto, mode);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_ns);
+
+@@ -106,7 +107,7 @@ int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state
+ {
+ ktime_t baset_ns = baset_us * NSEC_PER_USEC;
+
+- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_us);
+
+@@ -123,7 +124,7 @@ int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state
+ fuzzt_ns = (u32)~0U;
+ else
+ fuzzt_ns = fuzzt_us * NSEC_PER_USEC;
+- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_ms);
+
+@@ -136,7 +137,7 @@ int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp)
+ {
+ ktime_t baset_ns = jiffies_to_nsecs(baset_j);
+
+- return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), trsp);
++ return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_jiffies);
+
+@@ -153,7 +154,7 @@ int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *
+ fuzzt_ns = (u32)~0U;
+ else
+ fuzzt_ns = fuzzt_ms * NSEC_PER_MSEC;
+- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_s);
+
+@@ -720,7 +721,7 @@ static void torture_shutdown_cleanup(void)
+ * suddenly applied to or removed from the system.
+ */
+ static struct task_struct *stutter_task;
+-static int stutter_pause_test;
++static ktime_t stutter_till_abs_time;
+ static int stutter;
+ static int stutter_gap;
+
+@@ -730,30 +731,16 @@ static int stutter_gap;
+ */
+ bool stutter_wait(const char *title)
+ {
+- unsigned int i = 0;
+ bool ret = false;
+- int spt;
++ ktime_t till_ns;
+
+ cond_resched_tasks_rcu_qs();
+- spt = READ_ONCE(stutter_pause_test);
+- for (; spt; spt = READ_ONCE(stutter_pause_test)) {
+- if (!ret && !rt_task(current)) {
+- sched_set_normal(current, MAX_NICE);
+- ret = true;
+- }
+- if (spt == 1) {
+- torture_hrtimeout_jiffies(1, NULL);
+- } else if (spt == 2) {
+- while (READ_ONCE(stutter_pause_test)) {
+- if (!(i++ & 0xffff))
+- torture_hrtimeout_us(10, 0, NULL);
+- cond_resched();
+- }
+- } else {
+- torture_hrtimeout_jiffies(round_jiffies_relative(HZ), NULL);
+- }
+- torture_shutdown_absorb(title);
++ till_ns = READ_ONCE(stutter_till_abs_time);
++ if (till_ns && ktime_before(ktime_get(), till_ns)) {
++ torture_hrtimeout_ns(till_ns, 0, HRTIMER_MODE_ABS, NULL);
++ ret = true;
+ }
++ torture_shutdown_absorb(title);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(stutter_wait);
+@@ -764,23 +751,16 @@ EXPORT_SYMBOL_GPL(stutter_wait);
+ */
+ static int torture_stutter(void *arg)
+ {
+- DEFINE_TORTURE_RANDOM(rand);
+- int wtime;
++ ktime_t till_ns;
+
+ VERBOSE_TOROUT_STRING("torture_stutter task started");
+ do {
+ if (!torture_must_stop() && stutter > 1) {
+- wtime = stutter;
+- if (stutter > 2) {
+- WRITE_ONCE(stutter_pause_test, 1);
+- wtime = stutter - 3;
+- torture_hrtimeout_jiffies(wtime, &rand);
+- wtime = 2;
+- }
+- WRITE_ONCE(stutter_pause_test, 2);
+- torture_hrtimeout_jiffies(wtime, NULL);
++ till_ns = ktime_add_ns(ktime_get(),
++ jiffies_to_nsecs(stutter));
++ WRITE_ONCE(stutter_till_abs_time, till_ns);
++ torture_hrtimeout_jiffies(stutter - 1, NULL);
+ }
+- WRITE_ONCE(stutter_pause_test, 0);
+ if (!torture_must_stop())
+ torture_hrtimeout_jiffies(stutter_gap, NULL);
+ torture_shutdown_absorb("torture_stutter");
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index abaaf516fcae9..a40d6baf101f0 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4986,6 +4986,20 @@ int tracing_open_file_tr(struct inode *inode, struct file *filp)
+ if (ret)
+ return ret;
+
++ mutex_lock(&event_mutex);
++
++ /* Fail if the file is marked for removal */
++ if (file->flags & EVENT_FILE_FL_FREED) {
++ trace_array_put(file->tr);
++ ret = -ENODEV;
++ } else {
++ event_file_get(file);
++ }
++
++ mutex_unlock(&event_mutex);
++ if (ret)
++ return ret;
++
+ filp->private_data = inode->i_private;
+
+ return 0;
+@@ -4996,6 +5010,7 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
+ struct trace_event_file *file = inode->i_private;
+
+ trace_array_put(file->tr);
++ event_file_put(file);
+
+ return 0;
+ }
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 77debe53f07cf..d608f61287043 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1664,6 +1664,9 @@ extern void event_trigger_unregister(struct event_command *cmd_ops,
+ char *glob,
+ struct event_trigger_data *trigger_data);
+
++extern void event_file_get(struct trace_event_file *file);
++extern void event_file_put(struct trace_event_file *file);
++
+ /**
+ * struct event_trigger_ops - callbacks for trace event triggers
+ *
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index f49d6ddb63425..82cb22ad6d617 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -990,13 +990,35 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
+ }
+ }
+
++void event_file_get(struct trace_event_file *file)
++{
++ atomic_inc(&file->ref);
++}
++
++void event_file_put(struct trace_event_file *file)
++{
++ if (WARN_ON_ONCE(!atomic_read(&file->ref))) {
++ if (file->flags & EVENT_FILE_FL_FREED)
++ kmem_cache_free(file_cachep, file);
++ return;
++ }
++
++ if (atomic_dec_and_test(&file->ref)) {
++ /* Count should only go to zero when it is freed */
++ if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
++ return;
++ kmem_cache_free(file_cachep, file);
++ }
++}
++
+ static void remove_event_file_dir(struct trace_event_file *file)
+ {
+ eventfs_remove(file->ef);
+ list_del(&file->list);
+ remove_subsystem(file->system);
+ free_event_filter(file->filter);
+- kmem_cache_free(file_cachep, file);
++ file->flags |= EVENT_FILE_FL_FREED;
++ event_file_put(file);
+ }
+
+ /*
+@@ -1369,7 +1391,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
+ flags = file->flags;
+ mutex_unlock(&event_mutex);
+
+- if (!file)
++ if (!file || flags & EVENT_FILE_FL_FREED)
+ return -ENODEV;
+
+ if (flags & EVENT_FILE_FL_ENABLED &&
+@@ -1407,7 +1429,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ ret = -ENODEV;
+ mutex_lock(&event_mutex);
+ file = event_file_data(filp);
+- if (likely(file))
++ if (likely(file && !(file->flags & EVENT_FILE_FL_FREED)))
+ ret = ftrace_event_enable_disable(file, val);
+ mutex_unlock(&event_mutex);
+ break;
+@@ -1681,7 +1703,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+
+ mutex_lock(&event_mutex);
+ file = event_file_data(filp);
+- if (file)
++ if (file && !(file->flags & EVENT_FILE_FL_FREED))
+ print_event_filter(file, s);
+ mutex_unlock(&event_mutex);
+
+@@ -2803,6 +2825,7 @@ trace_create_new_event(struct trace_event_call *call,
+ atomic_set(&file->tm_ref, 0);
+ INIT_LIST_HEAD(&file->triggers);
+ list_add(&file->list, &tr->events);
++ event_file_get(file);
+
+ return file;
+ }
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 33264e510d161..0c611b281a5b5 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -2349,6 +2349,9 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
+ struct event_filter *filter = NULL;
+ int err;
+
++ if (file->flags & EVENT_FILE_FL_FREED)
++ return -ENODEV;
++
+ if (!strcmp(strstrip(filter_string), "0")) {
+ filter_disable(file);
+ filter = event_filter(file);
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index 14cb275a0bab0..846e02c0fb59a 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -452,7 +452,7 @@ static unsigned int trace_string(struct synth_trace_event *entry,
+
+ #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if ((unsigned long)str_val < TASK_SIZE)
+- ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
++ ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
+ else
+ #endif
+ ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
+diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
+index 8bfe23af9c739..7d2ddbcfa377c 100644
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -927,11 +927,12 @@ static int parse_symbol_and_return(int argc, const char *argv[],
+ for (i = 2; i < argc; i++) {
+ tmp = strstr(argv[i], "$retval");
+ if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
++ if (is_tracepoint) {
++ trace_probe_log_set_index(i);
++ trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
++ return -EINVAL;
++ }
+ *is_return = true;
+- /*
+- * NOTE: Don't check is_tracepoint here, because it will
+- * be checked when the argument is parsed.
+- */
+ break;
+ }
+ }
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index e834f149695b7..47812aa16bb57 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1020,9 +1020,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+ /**
+ * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
++ * @kretprobe: Is this a return probe?
+ * @name: The name of the kprobe event
+ * @loc: The location of the kprobe event
+- * @kretprobe: Is this a return probe?
+ * @...: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
+index d0b6b390ee423..778b4056700ff 100644
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -331,7 +331,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
+ filter.__reserved != 0)
+ return -EINVAL;
+
+- tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
++ tf = memdup_array_user(_filter->filters, filter.nr_filters, sizeof(*tf));
+ if (IS_ERR(tf))
+ return PTR_ERR(tf);
+
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index d145305d95fe8..5cd6d4e269157 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -283,6 +283,13 @@ static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
+ static DEFINE_PER_CPU(bool, softlockup_touch_sync);
+ static unsigned long soft_lockup_nmi_warn;
+
++static int __init softlockup_panic_setup(char *str)
++{
++ softlockup_panic = simple_strtoul(str, NULL, 0);
++ return 1;
++}
++__setup("softlockup_panic=", softlockup_panic_setup);
++
+ static int __init nowatchdog_setup(char *str)
+ {
+ watchdog_user_enabled = 0;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index a3522b70218d3..0f682da96e1c5 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -5622,50 +5622,54 @@ static void work_for_cpu_fn(struct work_struct *work)
+ }
+
+ /**
+- * work_on_cpu - run a function in thread context on a particular cpu
++ * work_on_cpu_key - run a function in thread context on a particular cpu
+ * @cpu: the cpu to run on
+ * @fn: the function to run
+ * @arg: the function arg
++ * @key: The lock class key for lock debugging purposes
+ *
+ * It is up to the caller to ensure that the cpu doesn't go offline.
+ * The caller must not hold any locks which would prevent @fn from completing.
+ *
+ * Return: The value @fn returns.
+ */
+-long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
++long work_on_cpu_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key)
+ {
+ struct work_for_cpu wfc = { .fn = fn, .arg = arg };
+
+- INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
++ INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
+ schedule_work_on(cpu, &wfc.work);
+ flush_work(&wfc.work);
+ destroy_work_on_stack(&wfc.work);
+ return wfc.ret;
+ }
+-EXPORT_SYMBOL_GPL(work_on_cpu);
++EXPORT_SYMBOL_GPL(work_on_cpu_key);
+
+ /**
+- * work_on_cpu_safe - run a function in thread context on a particular cpu
++ * work_on_cpu_safe_key - run a function in thread context on a particular cpu
+ * @cpu: the cpu to run on
+ * @fn: the function to run
+ * @arg: the function argument
++ * @key: The lock class key for lock debugging purposes
+ *
+ * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
+ * any locks which would prevent @fn from completing.
+ *
+ * Return: The value @fn returns.
+ */
+-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
++long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key)
+ {
+ long ret = -ENODEV;
+
+ cpus_read_lock();
+ if (cpu_online(cpu))
+- ret = work_on_cpu(cpu, fn, arg);
++ ret = work_on_cpu_key(cpu, fn, arg, key);
+ cpus_read_unlock();
+ return ret;
+ }
+-EXPORT_SYMBOL_GPL(work_on_cpu_safe);
++EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
+ #endif /* CONFIG_SMP */
+
+ #ifdef CONFIG_FREEZER
+diff --git a/lib/errname.c b/lib/errname.c
+index 67739b174a8cc..0c336b0f12f60 100644
+--- a/lib/errname.c
++++ b/lib/errname.c
+@@ -111,9 +111,6 @@ static const char *names_0[] = {
+ E(ENOSPC),
+ E(ENOSR),
+ E(ENOSTR),
+-#ifdef ENOSYM
+- E(ENOSYM),
+-#endif
+ E(ENOSYS),
+ E(ENOTBLK),
+ E(ENOTCONN),
+@@ -144,9 +141,6 @@ static const char *names_0[] = {
+ #endif
+ E(EREMOTE),
+ E(EREMOTEIO),
+-#ifdef EREMOTERELEASE
+- E(EREMOTERELEASE),
+-#endif
+ E(ERESTART),
+ E(ERFKILL),
+ E(EROFS),
+diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
+index f25eb111c0516..7dfa88282b006 100644
+--- a/lib/generic-radix-tree.c
++++ b/lib/generic-radix-tree.c
+@@ -166,6 +166,10 @@ void *__genradix_iter_peek(struct genradix_iter *iter,
+ struct genradix_root *r;
+ struct genradix_node *n;
+ unsigned level, i;
++
++ if (iter->offset == SIZE_MAX)
++ return NULL;
++
+ restart:
+ r = READ_ONCE(radix->root);
+ if (!r)
+@@ -184,10 +188,17 @@ restart:
+ (GENRADIX_ARY - 1);
+
+ while (!n->children[i]) {
++ size_t objs_per_ptr = genradix_depth_size(level);
++
++ if (iter->offset + objs_per_ptr < iter->offset) {
++ iter->offset = SIZE_MAX;
++ iter->pos = SIZE_MAX;
++ return NULL;
++ }
++
+ i++;
+- iter->offset = round_down(iter->offset +
+- genradix_depth_size(level),
+- genradix_depth_size(level));
++ iter->offset = round_down(iter->offset + objs_per_ptr,
++ objs_per_ptr);
+ iter->pos = (iter->offset >> PAGE_SHIFT) *
+ objs_per_page;
+ if (i == GENRADIX_ARY)
+diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
+index a6348489d45fe..1236b3cd2fbb2 100644
+--- a/lib/kunit/executor.c
++++ b/lib/kunit/executor.c
+@@ -137,8 +137,10 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set)
+ {
+ struct kunit_suite * const *suites;
+
+- for (suites = suite_set.start; suites < suite_set.end; suites++)
++ for (suites = suite_set.start; suites < suite_set.end; suites++) {
++ kfree((*suites)->test_cases);
+ kfree(*suites);
++ }
+ kfree(suite_set.start);
+ }
+
+@@ -155,10 +157,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ struct kunit_suite_set filtered = {NULL, NULL};
+ struct kunit_glob_filter parsed_glob;
+ struct kunit_attr_filter *parsed_filters = NULL;
++ struct kunit_suite * const *suites;
+
+ const size_t max = suite_set->end - suite_set->start;
+
+- copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
++ copy = kcalloc(max, sizeof(*filtered.start), GFP_KERNEL);
+ if (!copy) { /* won't be able to run anything, return an empty set */
+ return filtered;
+ }
+@@ -193,7 +196,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ parsed_glob.test_glob);
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+- goto free_parsed_filters;
++ goto free_filtered_suite;
+ }
+ }
+ if (filter_count > 0 && parsed_filters != NULL) {
+@@ -210,11 +213,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ filtered_suite = new_filtered_suite;
+
+ if (*err)
+- goto free_parsed_filters;
++ goto free_filtered_suite;
+
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+- goto free_parsed_filters;
++ goto free_filtered_suite;
+ }
+ if (!filtered_suite)
+ break;
+@@ -229,6 +232,14 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ filtered.start = copy_start;
+ filtered.end = copy;
+
++free_filtered_suite:
++ if (*err) {
++ for (suites = copy_start; suites < copy; suites++) {
++ kfree((*suites)->test_cases);
++ kfree(*suites);
++ }
++ }
++
+ free_parsed_filters:
+ if (filter_count)
+ kfree(parsed_filters);
+@@ -241,7 +252,7 @@ free_parsed_glob:
+
+ free_copy:
+ if (*err)
+- kfree(copy);
++ kfree(copy_start);
+
+ return filtered;
+ }
+diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
+index b4f6f96b28445..22d4ee86dbedd 100644
+--- a/lib/kunit/executor_test.c
++++ b/lib/kunit/executor_test.c
+@@ -9,7 +9,7 @@
+ #include <kunit/test.h>
+ #include <kunit/attributes.h>
+
+-static void kfree_at_end(struct kunit *test, const void *to_free);
++static void free_suite_set_at_end(struct kunit *test, const void *to_free);
+ static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name,
+ struct kunit_case *test_cases);
+@@ -56,7 +56,7 @@ static void filter_suites_test(struct kunit *test)
+ got = kunit_filter_suites(&suite_set, "suite2", NULL, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start);
++ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have suite2 */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+@@ -82,7 +82,7 @@ static void filter_suites_test_glob_test(struct kunit *test)
+ got = kunit_filter_suites(&suite_set, "suite2.test2", NULL, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start);
++ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have suite2 */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+@@ -109,7 +109,7 @@ static void filter_suites_to_empty_test(struct kunit *test)
+
+ got = kunit_filter_suites(&suite_set, "not_found", NULL, NULL, &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start); /* just in case */
++ free_suite_set_at_end(test, &got); /* just in case */
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
+@@ -172,7 +172,7 @@ static void filter_attr_test(struct kunit *test)
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start);
++ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have normal_suite */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+@@ -200,7 +200,7 @@ static void filter_attr_empty_test(struct kunit *test)
+
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start); /* just in case */
++ free_suite_set_at_end(test, &got); /* just in case */
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
+@@ -222,7 +222,7 @@ static void filter_attr_skip_test(struct kunit *test)
+ got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start);
++ free_suite_set_at_end(test, &got);
+
+ /* Validate we have both the slow and normal test */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+@@ -256,18 +256,26 @@ kunit_test_suites(&executor_test_suite);
+
+ /* Test helpers */
+
+-/* Use the resource API to register a call to kfree(to_free).
++static void free_suite_set(void *suite_set)
++{
++ kunit_free_suite_set(*(struct kunit_suite_set *)suite_set);
++ kfree(suite_set);
++}
++
++/* Use the resource API to register a call to free_suite_set.
+ * Since we never actually use the resource, it's safe to use on const data.
+ */
+-static void kfree_at_end(struct kunit *test, const void *to_free)
++static void free_suite_set_at_end(struct kunit *test, const void *to_free)
+ {
+- /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
+- if (IS_ERR_OR_NULL(to_free))
++ struct kunit_suite_set *free;
++
++ if (!((struct kunit_suite_set *)to_free)->start)
+ return;
+
+- kunit_add_action(test,
+- (kunit_action_t *)kfree,
+- (void *)to_free);
++ free = kzalloc(sizeof(struct kunit_suite_set), GFP_KERNEL);
++ *free = *(struct kunit_suite_set *)to_free;
++
++ kunit_add_action(test, free_suite_set, (void *)free);
+ }
+
+ static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+diff --git a/mm/cma.c b/mm/cma.c
+index da2967c6a2238..2b2494fd6b59a 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -505,7 +505,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
+ */
+ if (page) {
+ for (i = 0; i < count; i++)
+- page_kasan_tag_reset(page + i);
++ page_kasan_tag_reset(nth_page(page, i));
+ }
+
+ if (ret && !no_warn) {
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index bcd2bd9d6c104..fd5be73f699f4 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -476,20 +476,14 @@ static unsigned int damon_age_for_new_attrs(unsigned int age,
+ static unsigned int damon_accesses_bp_to_nr_accesses(
+ unsigned int accesses_bp, struct damon_attrs *attrs)
+ {
+- unsigned int max_nr_accesses =
+- attrs->aggr_interval / attrs->sample_interval;
+-
+- return accesses_bp * max_nr_accesses / 10000;
++ return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
+ }
+
+ /* convert nr_accesses to access ratio in bp (per 10,000) */
+ static unsigned int damon_nr_accesses_to_accesses_bp(
+ unsigned int nr_accesses, struct damon_attrs *attrs)
+ {
+- unsigned int max_nr_accesses =
+- attrs->aggr_interval / attrs->sample_interval;
+-
+- return nr_accesses * 10000 / max_nr_accesses;
++ return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
+ }
+
+ static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
+@@ -920,7 +914,7 @@ static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
+ matched = true;
+ break;
+ default:
+- break;
++ return false;
+ }
+
+ return matched == filter->matching;
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index 7b8fce2f67a8d..3071e08e8b8f8 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -193,9 +193,7 @@ static int damon_lru_sort_apply_parameters(void)
+ if (err)
+ return err;
+
+- /* aggr_interval / sample_interval is the maximum nr_accesses */
+- hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
+- damon_lru_sort_mon_attrs.sample_interval *
++ hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
+ hot_thres_access_freq / 1000;
+ scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+ if (!scheme)
+diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
+index ac1c3fa80f984..d25d99cb5f2bb 100644
+--- a/mm/damon/ops-common.c
++++ b/mm/damon/ops-common.c
+@@ -73,7 +73,6 @@ void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr
+ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ struct damos *s)
+ {
+- unsigned int max_nr_accesses;
+ int freq_subscore;
+ unsigned int age_in_sec;
+ int age_in_log, age_subscore;
+@@ -81,8 +80,8 @@ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ unsigned int age_weight = s->quota.weight_age;
+ int hotness;
+
+- max_nr_accesses = c->attrs.aggr_interval / c->attrs.sample_interval;
+- freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
++ freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE /
++ damon_max_nr_accesses(&c->attrs);
+
+ age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
+ for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
+diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
+index 527e7d17eb3b2..36dcd881a19c0 100644
+--- a/mm/damon/sysfs-schemes.c
++++ b/mm/damon/sysfs-schemes.c
+@@ -126,6 +126,9 @@ damon_sysfs_scheme_regions_alloc(void)
+ struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions),
+ GFP_KERNEL);
+
++ if (!regions)
++ return NULL;
++
+ regions->kobj = (struct kobject){};
+ INIT_LIST_HEAD(&regions->regions_list);
+ regions->nr_regions = 0;
+@@ -1752,6 +1755,8 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx,
+ return 0;
+
+ region = damon_sysfs_scheme_region_alloc(r);
++ if (!region)
++ return 0;
+ list_add_tail(&region->list, &sysfs_regions->regions_list);
+ sysfs_regions->nr_regions++;
+ if (kobject_init_and_add(&region->kobj,
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index f60e56150feb6..faaef5098e264 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1150,58 +1150,75 @@ destroy_targets_out:
+ return err;
+ }
+
+-/*
+- * Search a target in a context that corresponds to the sysfs target input.
+- *
+- * Return: pointer to the target if found, NULL if not found, or negative
+- * error code if the search failed.
+- */
+-static struct damon_target *damon_sysfs_existing_target(
+- struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
++static int damon_sysfs_update_target_pid(struct damon_target *target, int pid)
+ {
+- struct pid *pid;
+- struct damon_target *t;
++ struct pid *pid_new;
+
+- if (!damon_target_has_pid(ctx)) {
+- /* Up to only one target for paddr could exist */
+- damon_for_each_target(t, ctx)
+- return t;
+- return NULL;
++ pid_new = find_get_pid(pid);
++ if (!pid_new)
++ return -EINVAL;
++
++ if (pid_new == target->pid) {
++ put_pid(pid_new);
++ return 0;
+ }
+
+- /* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
+- pid = find_get_pid(sys_target->pid);
+- if (!pid)
+- return ERR_PTR(-EINVAL);
+- damon_for_each_target(t, ctx) {
+- if (t->pid == pid) {
+- put_pid(pid);
+- return t;
+- }
++ put_pid(target->pid);
++ target->pid = pid_new;
++ return 0;
++}
++
++static int damon_sysfs_update_target(struct damon_target *target,
++ struct damon_ctx *ctx,
++ struct damon_sysfs_target *sys_target)
++{
++ int err;
++
++ if (damon_target_has_pid(ctx)) {
++ err = damon_sysfs_update_target_pid(target, sys_target->pid);
++ if (err)
++ return err;
+ }
+- put_pid(pid);
+- return NULL;
++
++ /*
++ * Do monitoring target region boundary update only if one or more
++ * regions are set by the user. This is for keeping current monitoring
++ * target results and range easier, especially for dynamic monitoring
++ * target regions update ops like 'vaddr'.
++ */
++ if (sys_target->regions->nr)
++ err = damon_sysfs_set_regions(target, sys_target->regions);
++ return err;
+ }
+
+ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
+ struct damon_sysfs_targets *sysfs_targets)
+ {
+- int i, err;
++ struct damon_target *t, *next;
++ int i = 0, err;
+
+ /* Multiple physical address space monitoring targets makes no sense */
+ if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
+ return -EINVAL;
+
+- for (i = 0; i < sysfs_targets->nr; i++) {
++ damon_for_each_target_safe(t, next, ctx) {
++ if (i < sysfs_targets->nr) {
++ err = damon_sysfs_update_target(t, ctx,
++ sysfs_targets->targets_arr[i]);
++ if (err)
++ return err;
++ } else {
++ if (damon_target_has_pid(ctx))
++ put_pid(t->pid);
++ damon_destroy_target(t);
++ }
++ i++;
++ }
++
++ for (; i < sysfs_targets->nr; i++) {
+ struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
+- struct damon_target *t = damon_sysfs_existing_target(st, ctx);
+-
+- if (IS_ERR(t))
+- return PTR_ERR(t);
+- if (!t)
+- err = damon_sysfs_add_target(st, ctx);
+- else
+- err = damon_sysfs_set_regions(t, st->regions);
++
++ err = damon_sysfs_add_target(st, ctx);
+ if (err)
+ return err;
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 064fbd90822b4..874000f97bfc1 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2737,13 +2737,15 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ int nr = folio_nr_pages(folio);
+
+ xas_split(&xas, folio, folio_order(folio));
+- if (folio_test_swapbacked(folio)) {
+- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
+- -nr);
+- } else {
+- __lruvec_stat_mod_folio(folio, NR_FILE_THPS,
+- -nr);
+- filemap_nr_thps_dec(mapping);
++ if (folio_test_pmd_mappable(folio)) {
++ if (folio_test_swapbacked(folio)) {
++ __lruvec_stat_mod_folio(folio,
++ NR_SHMEM_THPS, -nr);
++ } else {
++ __lruvec_stat_mod_folio(folio,
++ NR_FILE_THPS, -nr);
++ filemap_nr_thps_dec(mapping);
++ }
+ }
+ }
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 1301ba7b2c9a9..5f0adffeceb1d 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -6520,7 +6520,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ }
+ }
+
+- page += ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
++ page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
+
+ /*
+ * Note that page may be a sub-page, and with vmemmap
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 5b009b233ab89..8a881ab21f6cb 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2864,7 +2864,8 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
+ * Moreover, it should not come from DMA buffer and is not readily
+ * reclaimable. So those GFP bits should be masked off.
+ */
+-#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
++#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
++ __GFP_ACCOUNT | __GFP_NOFAIL)
+
+ /*
+ * mod_objcg_mlstate() may be called with irq enabled, so
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 1b03f4ec6fd21..3b301c4023ffc 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1689,7 +1689,7 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
+ */
+ if (HPageMigratable(head))
+ goto found;
+- skip = compound_nr(head) - (page - head);
++ skip = compound_nr(head) - (pfn - page_to_pfn(head));
+ pfn += skip - 1;
+ }
+ return -ENOENT;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 29ebf1e7898cf..e52e3a0b8f2e6 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -131,22 +131,26 @@ static struct mempolicy default_policy = {
+ static struct mempolicy preferred_node_policy[MAX_NUMNODES];
+
+ /**
+- * numa_map_to_online_node - Find closest online node
++ * numa_nearest_node - Find nearest node by state
+ * @node: Node id to start the search
++ * @state: State to filter the search
+ *
+- * Lookup the next closest node by distance if @nid is not online.
++ * Lookup the closest node by distance if @nid is not in state.
+ *
+- * Return: this @node if it is online, otherwise the closest node by distance
++ * Return: this @node if it is in state, otherwise the closest node by distance
+ */
+-int numa_map_to_online_node(int node)
++int numa_nearest_node(int node, unsigned int state)
+ {
+ int min_dist = INT_MAX, dist, n, min_node;
+
+- if (node == NUMA_NO_NODE || node_online(node))
++ if (state >= NR_NODE_STATES)
++ return -EINVAL;
++
++ if (node == NUMA_NO_NODE || node_state(node, state))
+ return node;
+
+ min_node = node;
+- for_each_online_node(n) {
++ for_each_node_state(n, state) {
+ dist = node_distance(node, n);
+ if (dist < min_dist) {
+ min_dist = dist;
+@@ -156,7 +160,7 @@ int numa_map_to_online_node(int node)
+
+ return min_node;
+ }
+-EXPORT_SYMBOL_GPL(numa_map_to_online_node);
++EXPORT_SYMBOL_GPL(numa_nearest_node);
+
+ struct mempolicy *get_task_policy(struct task_struct *p)
+ {
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index b8d3d7040a506..4656534b8f5cc 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -3110,7 +3110,7 @@ EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
+ */
+ void folio_wait_stable(struct folio *folio)
+ {
+- if (folio_inode(folio)->i_sb->s_iflags & SB_I_STABLE_WRITES)
++ if (mapping_stable_writes(folio_mapping(folio)))
+ folio_wait_writeback(folio);
+ }
+ EXPORT_SYMBOL_GPL(folio_wait_stable);
+diff --git a/mm/readahead.c b/mm/readahead.c
+index e815c114de21e..6925e6959fd3f 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -735,7 +735,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
+ */
+ ret = -EINVAL;
+ if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
+- !S_ISREG(file_inode(f.file)->i_mode))
++ (!S_ISREG(file_inode(f.file)->i_mode) &&
++ !S_ISBLK(file_inode(f.file)->i_mode)))
+ goto out;
+
+ ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+diff --git a/mm/util.c b/mm/util.c
+index 8cbbfd3a3d598..be798981acc7d 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -414,6 +414,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
+
+ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+ {
++#ifdef CONFIG_STACK_GROWSUP
++ /*
++ * For an upwards growing stack the calculation is much simpler.
++ * Memory for the maximum stack size is reserved at the top of the
++ * task. mmap_base starts directly below the stack and grows
++ * downwards.
++ */
++ return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
++#else
+ unsigned long gap = rlim_stack->rlim_cur;
+ unsigned long pad = stack_guard_gap;
+
+@@ -431,6 +440,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+ gap = MAX_GAP;
+
+ return PAGE_ALIGN(STACK_TOP - gap - rnd);
++#endif
+ }
+
+ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 86bbc7147fc14..e265a0ca6bddd 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -540,12 +540,14 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
+ return 0;
+
+ if (!p9_is_proto_dotl(c)) {
+- char *ename;
++ char *ename = NULL;
+
+ err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
+ &ename, &ecode);
+- if (err)
++ if (err) {
++ kfree(ename);
+ goto out_err;
++ }
+
+ if (p9_is_proto_dotu(c) && ecode < 512)
+ err = -ecode;
+@@ -1979,7 +1981,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
+ goto error;
+ }
+ p9_debug(P9_DEBUG_9P,
+- ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
++ ">>> TXATTRWALK file_fid %d, attr_fid %d name '%s'\n",
+ file_fid->fid, attr_fid->fid, attr_name);
+
+ req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index c4015f30f9fa7..d0eb03ada704d 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -832,14 +832,21 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
+ goto out_free_ts;
+ if (!(ts->rd->f_mode & FMODE_READ))
+ goto out_put_rd;
+- /* prevent workers from hanging on IO when fd is a pipe */
+- ts->rd->f_flags |= O_NONBLOCK;
++ /* Prevent workers from hanging on IO when fd is a pipe.
++ * It's technically possible for userspace or concurrent mounts to
++ * modify this flag concurrently, which will likely result in a
++ * broken filesystem. However, just having bad flags here should
++ * not crash the kernel or cause any other sort of bug, so mark this
++ * particular data race as intentional so that tooling (like KCSAN)
++ * can allow it and detect further problems.
++ */
++ data_race(ts->rd->f_flags |= O_NONBLOCK);
+ ts->wr = fget(wfd);
+ if (!ts->wr)
+ goto out_put_rd;
+ if (!(ts->wr->f_mode & FMODE_WRITE))
+ goto out_put_wr;
+- ts->wr->f_flags |= O_NONBLOCK;
++ data_race(ts->wr->f_flags |= O_NONBLOCK);
+
+ client->trans = ts;
+ client->status = Connected;
+diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
+index 2134f92bd7ac2..5d698f19868c5 100644
+--- a/net/bluetooth/amp.c
++++ b/net/bluetooth/amp.c
+@@ -109,7 +109,7 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+ struct hci_conn *hcon;
+ u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
+
+- hcon = hci_conn_add(hdev, AMP_LINK, dst, role);
++ hcon = hci_conn_add(hdev, AMP_LINK, dst, role, __next_handle(mgr));
+ if (!hcon)
+ return NULL;
+
+@@ -117,7 +117,6 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+
+ hcon->state = BT_CONNECT;
+ hcon->attempt++;
+- hcon->handle = __next_handle(mgr);
+ hcon->remote_id = remote_id;
+ hcon->amp_mgr = amp_mgr_get(mgr);
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 73470cc3518a7..f3139c4c20fc0 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -153,6 +153,9 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+
+ hci_conn_hash_del(hdev, conn);
+
++ if (HCI_CONN_HANDLE_UNSET(conn->handle))
++ ida_free(&hdev->unset_handle_ida, conn->handle);
++
+ if (conn->cleanup)
+ conn->cleanup(conn);
+
+@@ -169,13 +172,11 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+ hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
+ }
+
+- hci_conn_del_sysfs(conn);
+-
+ debugfs_remove_recursive(conn->debugfs);
+
+- hci_dev_put(hdev);
++ hci_conn_del_sysfs(conn);
+
+- hci_conn_put(conn);
++ hci_dev_put(hdev);
+ }
+
+ static void hci_acl_create_connection(struct hci_conn *conn)
+@@ -928,31 +929,18 @@ static void cis_cleanup(struct hci_conn *conn)
+ hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
+ }
+
+-static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev)
++static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
+ {
+- struct hci_conn_hash *h = &hdev->conn_hash;
+- struct hci_conn *c;
+- u16 handle = HCI_CONN_HANDLE_MAX + 1;
+-
+- rcu_read_lock();
+-
+- list_for_each_entry_rcu(c, &h->list, list) {
+- /* Find the first unused handle */
+- if (handle == 0xffff || c->handle != handle)
+- break;
+- handle++;
+- }
+- rcu_read_unlock();
+-
+- return handle;
++ return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
++ U16_MAX, GFP_ATOMIC);
+ }
+
+ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+- u8 role)
++ u8 role, u16 handle)
+ {
+ struct hci_conn *conn;
+
+- BT_DBG("%s dst %pMR", hdev->name, dst);
++ bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
+
+ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ if (!conn)
+@@ -960,7 +948,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+
+ bacpy(&conn->dst, dst);
+ bacpy(&conn->src, &hdev->bdaddr);
+- conn->handle = hci_conn_hash_alloc_unset(hdev);
++ conn->handle = handle;
+ conn->hdev = hdev;
+ conn->type = type;
+ conn->role = role;
+@@ -973,6 +961,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ conn->rssi = HCI_RSSI_INVALID;
+ conn->tx_power = HCI_TX_POWER_INVALID;
+ conn->max_tx_power = HCI_TX_POWER_INVALID;
++ conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
+
+ set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+@@ -1044,6 +1033,20 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ return conn;
+ }
+
++struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
++ bdaddr_t *dst, u8 role)
++{
++ int handle;
++
++ bt_dev_dbg(hdev, "dst %pMR", dst);
++
++ handle = hci_conn_hash_alloc_unset(hdev);
++ if (unlikely(handle < 0))
++ return NULL;
++
++ return hci_conn_add(hdev, type, dst, role, handle);
++}
++
+ static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
+ {
+ if (!reason)
+@@ -1274,6 +1277,9 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
+ if (conn->abort_reason)
+ return conn->abort_reason;
+
++ if (HCI_CONN_HANDLE_UNSET(conn->handle))
++ ida_free(&hdev->unset_handle_ida, conn->handle);
++
+ conn->handle = handle;
+
+ return 0;
+@@ -1381,7 +1387,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ if (conn) {
+ bacpy(&conn->dst, dst);
+ } else {
+- conn = hci_conn_add(hdev, LE_LINK, dst, role);
++ conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+ hci_conn_hold(conn);
+@@ -1546,7 +1552,7 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ memcmp(conn->le_per_adv_data, base, base_len)))
+ return ERR_PTR(-EADDRINUSE);
+
+- conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
++ conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+
+@@ -1590,7 +1596,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+
+ BT_DBG("requesting refresh of dst_addr");
+
+- conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
++ conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+
+@@ -1638,7 +1644,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+
+ acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ if (!acl) {
+- acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
++ acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
+ if (!acl)
+ return ERR_PTR(-ENOMEM);
+ }
+@@ -1698,7 +1704,7 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+
+ sco = hci_conn_hash_lookup_ba(hdev, type, dst);
+ if (!sco) {
+- sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
++ sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
+ if (!sco) {
+ hci_conn_drop(acl);
+ return ERR_PTR(-ENOMEM);
+@@ -1890,7 +1896,7 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
+ qos->ucast.cis);
+ if (!cis) {
+- cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
++ cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+ if (!cis)
+ return ERR_PTR(-ENOMEM);
+ cis->cleanup = cis_cleanup;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 195aea2198a96..65601aa52e0d8 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2535,6 +2535,8 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
+ mutex_init(&hdev->lock);
+ mutex_init(&hdev->req_lock);
+
++ ida_init(&hdev->unset_handle_ida);
++
+ INIT_LIST_HEAD(&hdev->mesh_pending);
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+ INIT_LIST_HEAD(&hdev->reject_list);
+@@ -2789,6 +2791,7 @@ void hci_release_dev(struct hci_dev *hdev)
+ hci_codec_list_clear(&hdev->local_codecs);
+ hci_dev_unlock(hdev);
+
++ ida_destroy(&hdev->unset_handle_ida);
+ ida_simple_remove(&hci_index_ida, hdev->id);
+ kfree_skb(hdev->sent_cmd);
+ kfree_skb(hdev->recv_event);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 1e1c9147356c3..f6d3150bcbb03 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -2335,8 +2335,8 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+ }
+ } else {
+ if (!conn) {
+- conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
+- HCI_ROLE_MASTER);
++ conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
++ HCI_ROLE_MASTER);
+ if (!conn)
+ bt_dev_err(hdev, "no memory for new connection");
+ }
+@@ -3151,8 +3151,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
+ &ev->bdaddr,
+ BDADDR_BREDR)) {
+- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
+- HCI_ROLE_SLAVE);
++ conn = hci_conn_add_unset(hdev, ev->link_type,
++ &ev->bdaddr, HCI_ROLE_SLAVE);
+ if (!conn) {
+ bt_dev_err(hdev, "no memory for new conn");
+ goto unlock;
+@@ -3317,8 +3317,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
+ conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
+ &ev->bdaddr);
+ if (!conn) {
+- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
+- HCI_ROLE_SLAVE);
++ conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
++ HCI_ROLE_SLAVE);
+ if (!conn) {
+ bt_dev_err(hdev, "no memory for new connection");
+ goto unlock;
+@@ -5890,7 +5890,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ if (status)
+ goto unlock;
+
+- conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
++ conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
+ if (!conn) {
+ bt_dev_err(hdev, "no memory for new connection");
+ goto unlock;
+@@ -5952,17 +5952,11 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+
+ conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
+
+- if (handle > HCI_CONN_HANDLE_MAX) {
+- bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
+- HCI_CONN_HANDLE_MAX);
+- status = HCI_ERROR_INVALID_PARAMETERS;
+- }
+-
+ /* All connection failure handling is taken care of by the
+ * hci_conn_failed function which is triggered by the HCI
+ * request completion callbacks used for connecting.
+ */
+- if (status)
++ if (status || hci_conn_set_handle(conn, handle))
+ goto unlock;
+
+ /* Drop the connection if it has been aborted */
+@@ -5986,7 +5980,6 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ mgmt_device_connected(hdev, conn, NULL, 0);
+
+ conn->sec_level = BT_SECURITY_LOW;
+- conn->handle = handle;
+ conn->state = BT_CONFIG;
+
+ /* Store current advertising instance as connection advertising instance
+@@ -6603,7 +6596,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ struct hci_ev_le_pa_sync_established *ev = data;
+ int mask = hdev->link_mode;
+ __u8 flags = 0;
+- struct hci_conn *bis;
++ struct hci_conn *pa_sync;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+
+@@ -6620,20 +6613,19 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ if (!(flags & HCI_PROTO_DEFER))
+ goto unlock;
+
+- /* Add connection to indicate the PA sync event */
+- bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
+- HCI_ROLE_SLAVE);
++ if (ev->status) {
++ /* Add connection to indicate the failed PA sync event */
++ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
++ HCI_ROLE_SLAVE);
+
+- if (!bis)
+- goto unlock;
++ if (!pa_sync)
++ goto unlock;
+
+- if (ev->status)
+- set_bit(HCI_CONN_PA_SYNC_FAILED, &bis->flags);
+- else
+- set_bit(HCI_CONN_PA_SYNC, &bis->flags);
++ set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
+
+- /* Notify connection to iso layer */
+- hci_connect_cfm(bis, ev->status);
++ /* Notify iso layer */
++ hci_connect_cfm(pa_sync, ev->status);
++ }
+
+ unlock:
+ hci_dev_unlock(hdev);
+@@ -7020,12 +7012,12 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
+
+ cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
+ if (!cis) {
+- cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
++ cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
++ cis_handle);
+ if (!cis) {
+ hci_le_reject_cis(hdev, ev->cis_handle);
+ goto unlock;
+ }
+- cis->handle = cis_handle;
+ }
+
+ cis->iso_qos.ucast.cig = ev->cig_id;
+@@ -7125,7 +7117,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ hci_dev_lock(hdev);
+
+ if (!ev->status) {
+- pa_sync = hci_conn_hash_lookup_pa_sync(hdev, ev->handle);
++ pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
+ if (pa_sync)
+ /* Also mark the BIG sync established event on the
+ * associated PA sync hcon
+@@ -7140,10 +7132,9 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ bis = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!bis) {
+ bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
+- HCI_ROLE_SLAVE);
++ HCI_ROLE_SLAVE, handle);
+ if (!bis)
+ continue;
+- bis->handle = handle;
+ }
+
+ if (ev->status != 0x42)
+@@ -7186,15 +7177,42 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
+ struct hci_evt_le_big_info_adv_report *ev = data;
+ int mask = hdev->link_mode;
+ __u8 flags = 0;
++ struct hci_conn *pa_sync;
+
+ bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
+
+ hci_dev_lock(hdev);
+
+ mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
+- if (!(mask & HCI_LM_ACCEPT))
++ if (!(mask & HCI_LM_ACCEPT)) {
+ hci_le_pa_term_sync(hdev, ev->sync_handle);
++ goto unlock;
++ }
++
++ if (!(flags & HCI_PROTO_DEFER))
++ goto unlock;
++
++ pa_sync = hci_conn_hash_lookup_pa_sync_handle
++ (hdev,
++ le16_to_cpu(ev->sync_handle));
++
++ if (pa_sync)
++ goto unlock;
+
++ /* Add connection to indicate the PA sync event */
++ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
++ HCI_ROLE_SLAVE);
++
++ if (!pa_sync)
++ goto unlock;
++
++ pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
++ set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
++
++ /* Notify iso layer */
++ hci_connect_cfm(pa_sync, 0x00);
++
++unlock:
+ hci_dev_unlock(hdev);
+ }
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index a15ab0b874a9d..9e71362c04b48 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -152,7 +152,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ struct sk_buff *skb;
+ int err = 0;
+
+- bt_dev_dbg(hdev, "Opcode 0x%4x", opcode);
++ bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
+
+ hci_req_init(&req, hdev);
+
+@@ -248,7 +248,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
+ if (IS_ERR(skb)) {
+ if (!event)
+- bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
++ bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
+index 15b33579007cb..367e32fe30eb8 100644
+--- a/net/bluetooth/hci_sysfs.c
++++ b/net/bluetooth/hci_sysfs.c
+@@ -35,7 +35,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
+ {
+ struct hci_dev *hdev = conn->hdev;
+
+- BT_DBG("conn %p", conn);
++ bt_dev_dbg(hdev, "conn %p", conn);
+
+ conn->dev.type = &bt_link;
+ conn->dev.class = &bt_class;
+@@ -48,27 +48,30 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
+ {
+ struct hci_dev *hdev = conn->hdev;
+
+- BT_DBG("conn %p", conn);
++ bt_dev_dbg(hdev, "conn %p", conn);
+
+ if (device_is_registered(&conn->dev))
+ return;
+
+ dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
+
+- if (device_add(&conn->dev) < 0) {
++ if (device_add(&conn->dev) < 0)
+ bt_dev_err(hdev, "failed to register connection device");
+- return;
+- }
+-
+- hci_dev_hold(hdev);
+ }
+
+ void hci_conn_del_sysfs(struct hci_conn *conn)
+ {
+ struct hci_dev *hdev = conn->hdev;
+
+- if (!device_is_registered(&conn->dev))
++ bt_dev_dbg(hdev, "conn %p", conn);
++
++ if (!device_is_registered(&conn->dev)) {
++ /* If device_add() has *not* succeeded, use *only* put_device()
++ * to drop the reference count.
++ */
++ put_device(&conn->dev);
+ return;
++ }
+
+ while (1) {
+ struct device *dev;
+@@ -80,9 +83,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
+ put_device(dev);
+ }
+
+- device_del(&conn->dev);
+-
+- hci_dev_put(hdev);
++ device_unregister(&conn->dev);
+ }
+
+ static void bt_host_release(struct device *dev)
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 71248163ce9a5..2132a16be93cd 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -77,6 +77,7 @@ static struct bt_iso_qos default_qos;
+ static bool check_ucast_qos(struct bt_iso_qos *qos);
+ static bool check_bcast_qos(struct bt_iso_qos *qos);
+ static bool iso_match_sid(struct sock *sk, void *data);
++static bool iso_match_sync_handle(struct sock *sk, void *data);
+ static void iso_sock_disconn(struct sock *sk);
+
+ /* ---- ISO timers ---- */
+@@ -1202,7 +1203,6 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) {
+ iso_conn_big_sync(sk);
+ sk->sk_state = BT_LISTEN;
+- set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
+ } else {
+ iso_conn_defer_accept(pi->conn->hcon);
+ sk->sk_state = BT_CONFIG;
+@@ -1579,6 +1579,7 @@ static void iso_conn_ready(struct iso_conn *conn)
+ struct sock *sk = conn->sk;
+ struct hci_ev_le_big_sync_estabilished *ev = NULL;
+ struct hci_ev_le_pa_sync_established *ev2 = NULL;
++ struct hci_evt_le_big_info_adv_report *ev3 = NULL;
+ struct hci_conn *hcon;
+
+ BT_DBG("conn %p", conn);
+@@ -1603,14 +1604,20 @@ static void iso_conn_ready(struct iso_conn *conn)
+ parent = iso_get_sock_listen(&hcon->src,
+ &hcon->dst,
+ iso_match_big, ev);
+- } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags) ||
+- test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
++ } else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
+ ev2 = hci_recv_event_data(hcon->hdev,
+ HCI_EV_LE_PA_SYNC_ESTABLISHED);
+ if (ev2)
+ parent = iso_get_sock_listen(&hcon->src,
+ &hcon->dst,
+ iso_match_sid, ev2);
++ } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
++ ev3 = hci_recv_event_data(hcon->hdev,
++ HCI_EVT_LE_BIG_INFO_ADV_REPORT);
++ if (ev3)
++ parent = iso_get_sock_listen(&hcon->src,
++ &hcon->dst,
++ iso_match_sync_handle, ev3);
+ }
+
+ if (!parent)
+@@ -1650,11 +1657,13 @@ static void iso_conn_ready(struct iso_conn *conn)
+ hcon->sync_handle = iso_pi(parent)->sync_handle;
+ }
+
+- if (ev2 && !ev2->status) {
+- iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle;
++ if (ev3) {
+ iso_pi(sk)->qos = iso_pi(parent)->qos;
++ iso_pi(sk)->qos.bcast.encryption = ev3->encryption;
++ hcon->iso_qos = iso_pi(sk)->qos;
+ iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis;
+ memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS);
++ set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
+ }
+
+ bacpy(&iso_pi(sk)->dst, &hcon->dst);
+diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
+index 71056ee847736..0fcf357ea7ad3 100644
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -37,7 +37,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
+ ktime_t tstamp = skb->tstamp;
+ struct ip_frag_state state;
+ struct iphdr *iph;
+- int err;
++ int err = 0;
+
+ /* for offloaded checksums cleanup checksum before fragmentation */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 9f3f8930c6914..9bf90b2a75b6a 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10050,6 +10050,54 @@ void netif_tx_stop_all_queues(struct net_device *dev)
+ }
+ EXPORT_SYMBOL(netif_tx_stop_all_queues);
+
++static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
++{
++ void __percpu *v;
++
++ /* Drivers implementing ndo_get_peer_dev must support tstat
++ * accounting, so that skb_do_redirect() can bump the dev's
++ * RX stats upon network namespace switch.
++ */
++ if (dev->netdev_ops->ndo_get_peer_dev &&
++ dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
++ return -EOPNOTSUPP;
++
++ switch (dev->pcpu_stat_type) {
++ case NETDEV_PCPU_STAT_NONE:
++ return 0;
++ case NETDEV_PCPU_STAT_LSTATS:
++ v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
++ break;
++ case NETDEV_PCPU_STAT_TSTATS:
++ v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
++ break;
++ case NETDEV_PCPU_STAT_DSTATS:
++ v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return v ? 0 : -ENOMEM;
++}
++
++static void netdev_do_free_pcpu_stats(struct net_device *dev)
++{
++ switch (dev->pcpu_stat_type) {
++ case NETDEV_PCPU_STAT_NONE:
++ return;
++ case NETDEV_PCPU_STAT_LSTATS:
++ free_percpu(dev->lstats);
++ break;
++ case NETDEV_PCPU_STAT_TSTATS:
++ free_percpu(dev->tstats);
++ break;
++ case NETDEV_PCPU_STAT_DSTATS:
++ free_percpu(dev->dstats);
++ break;
++ }
++}
++
+ /**
+ * register_netdevice() - register a network device
+ * @dev: device to register
+@@ -10110,9 +10158,13 @@ int register_netdevice(struct net_device *dev)
+ goto err_uninit;
+ }
+
++ ret = netdev_do_alloc_pcpu_stats(dev);
++ if (ret)
++ goto err_uninit;
++
+ ret = dev_index_reserve(net, dev->ifindex);
+ if (ret < 0)
+- goto err_uninit;
++ goto err_free_pcpu;
+ dev->ifindex = ret;
+
+ /* Transfer changeable features to wanted_features and enable
+@@ -10218,6 +10270,8 @@ err_uninit_notify:
+ call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
+ err_ifindex_release:
+ dev_index_release(net, dev->ifindex);
++err_free_pcpu:
++ netdev_do_free_pcpu_stats(dev);
+ err_uninit:
+ if (dev->netdev_ops->ndo_uninit)
+ dev->netdev_ops->ndo_uninit(dev);
+@@ -10470,6 +10524,7 @@ void netdev_run_todo(void)
+ WARN_ON(rcu_access_pointer(dev->ip_ptr));
+ WARN_ON(rcu_access_pointer(dev->ip6_ptr));
+
++ netdev_do_free_pcpu_stats(dev);
+ if (dev->priv_destructor)
+ dev->priv_destructor(dev);
+ if (dev->needs_free_netdev)
+diff --git a/net/core/filter.c b/net/core/filter.c
+index a094694899c99..b149a165c405c 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2489,6 +2489,7 @@ int skb_do_redirect(struct sk_buff *skb)
+ net_eq(net, dev_net(dev))))
+ goto out_drop;
+ skb->dev = dev;
++ dev_sw_netstats_rx_add(dev, skb->len);
+ return -EAGAIN;
+ }
+ return flags & BPF_F_NEIGH ?
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index 77cb75e63aca1..31f923e7b5c40 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -221,8 +221,12 @@ static int page_pool_init(struct page_pool *pool,
+ return -ENOMEM;
+ #endif
+
+- if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
++ if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
++#ifdef CONFIG_PAGE_POOL_STATS
++ free_percpu(pool->recycle_stats);
++#endif
+ return -ENOMEM;
++ }
+
+ atomic_set(&pool->pages_state_release_cnt, 0);
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 4eaf7ed0d1f44..97b4a42e6e347 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4254,6 +4254,7 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
+ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ unsigned int to, struct ts_config *config)
+ {
++ unsigned int patlen = config->ops->get_pattern_len(config);
+ struct ts_state state;
+ unsigned int ret;
+
+@@ -4265,7 +4266,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
+
+ ret = textsearch_find(config, &state);
+- return (ret <= to - from ? ret : UINT_MAX);
++ return (ret + patlen <= to - from ? ret : UINT_MAX);
+ }
+ EXPORT_SYMBOL(skb_find_text);
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 6c31eefbd7778..93ecfceac1bc4 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -826,6 +826,8 @@ static void sk_psock_destroy(struct work_struct *work)
+
+ if (psock->sk_redir)
+ sock_put(psock->sk_redir);
++ if (psock->sk_pair)
++ sock_put(psock->sk_pair);
+ sock_put(psock->sk);
+ kfree(psock);
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 16584e2dd6481..bfaf47b3f3c7c 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -600,7 +600,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
+ INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
+ dst, cookie) == NULL) {
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
+ dst_release(dst);
+ return NULL;
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 69453b936bd55..524b7e581a036 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -629,9 +629,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ if (dccp_parse_options(sk, dreq, skb))
+ goto drop_and_free;
+
+- if (security_inet_conn_request(sk, skb, req))
+- goto drop_and_free;
+-
+ ireq = inet_rsk(req);
+ sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+@@ -639,6 +636,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ ireq->ireq_family = AF_INET;
+ ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
+
++ if (security_inet_conn_request(sk, skb, req))
++ goto drop_and_free;
++
+ /*
+ * Step 3: Process LISTEN state
+ *
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index c693a570682fb..6f5a556f4f6d7 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -360,15 +360,15 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ if (dccp_parse_options(sk, dreq, skb))
+ goto drop_and_free;
+
+- if (security_inet_conn_request(sk, skb, req))
+- goto drop_and_free;
+-
+ ireq = inet_rsk(req);
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ ireq->ireq_family = AF_INET6;
+ ireq->ir_mark = inet_request_mark(sk, skb);
+
++ if (security_inet_conn_request(sk, skb, req))
++ goto drop_and_free;
++
+ if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
+ np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+ np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index 3bbd5afb7b31c..fe3553f60bf39 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -505,6 +505,7 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
+ ret = skb->len;
+ break;
+ }
++ ret = 0;
+ }
+ rtnl_unlock();
+
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index b71dab630a873..80cdc6f6b34c9 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -342,9 +342,7 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
+ skb = skb_copy_expand(frame->skb_std, 0,
+ skb_tailroom(frame->skb_std) + HSR_HLEN,
+ GFP_ATOMIC);
+- prp_fill_rct(skb, frame, port);
+-
+- return skb;
++ return prp_fill_rct(skb, frame, port);
+ }
+
+ static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 418e5fb58fd3f..d515881d02a6f 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -216,8 +216,10 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
+ int tv = get_random_u32_below(max_delay);
+
+ im->tm_running = 1;
+- if (!mod_timer(&im->timer, jiffies+tv+2))
+- refcount_inc(&im->refcnt);
++ if (refcount_inc_not_zero(&im->refcnt)) {
++ if (mod_timer(&im->timer, jiffies + tv + 2))
++ ip_ma_put(im);
++ }
+ }
+
+ static void igmp_gq_start_timer(struct in_device *in_dev)
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 598c1b114d2c2..a532f749e4778 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -751,12 +751,12 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+ if (err)
+ goto unlock;
+ }
++ sock_set_flag(sk, SOCK_RCU_FREE);
+ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ sk->sk_family == AF_INET6)
+ __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
+ else
+ __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
+- sock_set_flag(sk, SOCK_RCU_FREE);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ unlock:
+ spin_unlock(&ilb2->lock);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index b214b5a2e045f..3bad9aa066db3 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -780,7 +780,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ goto reject_redirect;
+ }
+
+- n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
++ n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
+ if (!n)
+ n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
+ if (!IS_ERR(n)) {
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index dc478a0574cbe..3b4dafefb4b03 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -41,7 +41,6 @@ static siphash_aligned_key_t syncookie_secret[2];
+ * requested/supported by the syn/synack exchange.
+ */
+ #define TSBITS 6
+-#define TSMASK (((__u32)1 << TSBITS) - 1)
+
+ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
+ u32 count, int c)
+@@ -62,27 +61,22 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
+ */
+ u64 cookie_init_timestamp(struct request_sock *req, u64 now)
+ {
+- struct inet_request_sock *ireq;
+- u32 ts, ts_now = tcp_ns_to_ts(now);
++ const struct inet_request_sock *ireq = inet_rsk(req);
++ u64 ts, ts_now = tcp_ns_to_ts(now);
+ u32 options = 0;
+
+- ireq = inet_rsk(req);
+-
+ options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
+ if (ireq->sack_ok)
+ options |= TS_OPT_SACK;
+ if (ireq->ecn_ok)
+ options |= TS_OPT_ECN;
+
+- ts = ts_now & ~TSMASK;
++ ts = (ts_now >> TSBITS) << TSBITS;
+ ts |= options;
+- if (ts > ts_now) {
+- ts >>= TSBITS;
+- ts--;
+- ts <<= TSBITS;
+- ts |= options;
+- }
+- return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
++ if (ts > ts_now)
++ ts -= (1UL << TSBITS);
++
++ return ts * (NSEC_PER_SEC / TCP_TS_HZ);
+ }
+
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 804821d6bd4d4..1f9d1d445fb3b 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6450,22 +6450,23 @@ reset_and_undo:
+
+ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
+ {
++ struct tcp_sock *tp = tcp_sk(sk);
+ struct request_sock *req;
+
+ /* If we are still handling the SYNACK RTO, see if timestamp ECR allows
+ * undo. If peer SACKs triggered fast recovery, we can't undo here.
+ */
+- if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
+- tcp_try_undo_loss(sk, false);
++ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
++ tcp_try_undo_recovery(sk);
+
+ /* Reset rtx states to prevent spurious retransmits_timed_out() */
+- tcp_sk(sk)->retrans_stamp = 0;
++ tp->retrans_stamp = 0;
+ inet_csk(sk)->icsk_retransmits = 0;
+
+ /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
+ * we no longer need req so release it.
+ */
+- req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
++ req = rcu_dereference_protected(tp->fastopen_rsk,
+ lockdep_sock_is_held(sk));
+ reqsk_fastopen_remove(sk, req, false);
+
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index c196759f1d3bd..7aca12c59c184 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -470,11 +470,15 @@ void tcp_init_metrics(struct sock *sk)
+ u32 val, crtt = 0; /* cached RTT scaled by 8 */
+
+ sk_dst_confirm(sk);
++ /* ssthresh may have been reduced unnecessarily during.
++ * 3WHS. Restore it back to its initial default.
++ */
++ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ if (!dst)
+ goto reset;
+
+ rcu_read_lock();
+- tm = tcp_get_metrics(sk, dst, true);
++ tm = tcp_get_metrics(sk, dst, false);
+ if (!tm) {
+ rcu_read_unlock();
+ goto reset;
+@@ -489,11 +493,6 @@ void tcp_init_metrics(struct sock *sk)
+ tp->snd_ssthresh = val;
+ if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
+ tp->snd_ssthresh = tp->snd_cwnd_clamp;
+- } else {
+- /* ssthresh may have been reduced unnecessarily during.
+- * 3WHS. Restore it back to its initial default.
+- */
+- tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ }
+ val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
+ if (val && tp->reordering != val)
+@@ -908,7 +907,7 @@ static void tcp_metrics_flush_all(struct net *net)
+ match = net ? net_eq(tm_net(tm), net) :
+ !refcount_read(&tm_net(tm)->ns.count);
+ if (match) {
+- *pp = tm->tcpm_next;
++ rcu_assign_pointer(*pp, tm->tcpm_next);
+ kfree_rcu(tm, rcu_head);
+ } else {
+ pp = &tm->tcpm_next;
+@@ -949,7 +948,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
+ if (addr_same(&tm->tcpm_daddr, &daddr) &&
+ (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+ net_eq(tm_net(tm), net)) {
+- *pp = tm->tcpm_next;
++ rcu_assign_pointer(*pp, tm->tcpm_next);
+ kfree_rcu(tm, rcu_head);
+ found = true;
+ } else {
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index f0723460753c5..9ccfdc825004d 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1331,7 +1331,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
+ refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+
+- skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
++ skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
+
+ /* Build TCP header and checksum it. */
+ th = (struct tcphdr *)skb->data;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index f39b9c8445808..c3ff984b63547 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -714,7 +714,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
+ iph->saddr, uh->source, skb->dev->ifindex,
+ inet_sdif(skb), udptable, NULL);
+
+- if (!sk || udp_sk(sk)->encap_type) {
++ if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
+ /* No socket for error: try tunnels before discarding */
+ if (static_branch_unlikely(&udp_encap_needed_key)) {
+ sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
+@@ -1051,7 +1051,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ u8 tos, scope;
+ __be16 dport;
+ int err, is_udplite = IS_UDPLITE(sk);
+- int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
++ int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
+ int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+ struct sk_buff *skb;
+ struct ip_options_data opt_copy;
+@@ -1315,11 +1315,11 @@ void udp_splice_eof(struct socket *sock)
+ struct sock *sk = sock->sk;
+ struct udp_sock *up = udp_sk(sk);
+
+- if (!up->pending || READ_ONCE(up->corkflag))
++ if (!up->pending || udp_test_bit(CORK, sk))
+ return;
+
+ lock_sock(sk);
+- if (up->pending && !READ_ONCE(up->corkflag))
++ if (up->pending && !udp_test_bit(CORK, sk))
+ udp_push_pending_frames(sk);
+ release_sock(sk);
+ }
+@@ -1868,7 +1868,7 @@ try_again:
+ (struct sockaddr *)sin);
+ }
+
+- if (udp_sk(sk)->gro_enabled)
++ if (udp_test_bit(GRO_ENABLED, sk))
+ udp_cmsg_recv(msg, sk, skb);
+
+ if (inet_cmsg_flags(inet))
+@@ -2081,7 +2081,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ }
+ nf_reset_ct(skb);
+
+- if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
++ if (static_branch_unlikely(&udp_encap_needed_key) &&
++ READ_ONCE(up->encap_type)) {
+ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+
+ /*
+@@ -2119,7 +2120,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ /*
+ * UDP-Lite specific tests, ignored on UDP sockets
+ */
+- if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
++ if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
++ u16 pcrlen = READ_ONCE(up->pcrlen);
+
+ /*
+ * MIB statistics other than incrementing the error count are
+@@ -2132,7 +2134,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ * delivery of packets with coverage values less than a value
+ * provided by the application."
+ */
+- if (up->pcrlen == 0) { /* full coverage was set */
++ if (pcrlen == 0) { /* full coverage was set */
+ net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
+ UDP_SKB_CB(skb)->cscov, skb->len);
+ goto drop;
+@@ -2143,9 +2145,9 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ * that it wants x while sender emits packets of smaller size y.
+ * Therefore the above ...()->partial_cov statement is essential.
+ */
+- if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
++ if (UDP_SKB_CB(skb)->cscov < pcrlen) {
+ net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
+- UDP_SKB_CB(skb)->cscov, up->pcrlen);
++ UDP_SKB_CB(skb)->cscov, pcrlen);
+ goto drop;
+ }
+ }
+@@ -2618,7 +2620,7 @@ void udp_destroy_sock(struct sock *sk)
+ if (encap_destroy)
+ encap_destroy(sk);
+ }
+- if (up->encap_enabled)
++ if (udp_test_bit(ENCAP_ENABLED, sk))
+ static_branch_dec(&udp_encap_needed_key);
+ }
+ }
+@@ -2658,9 +2660,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ switch (optname) {
+ case UDP_CORK:
+ if (val != 0) {
+- WRITE_ONCE(up->corkflag, 1);
++ udp_set_bit(CORK, sk);
+ } else {
+- WRITE_ONCE(up->corkflag, 0);
++ udp_clear_bit(CORK, sk);
+ lock_sock(sk);
+ push_pending_frames(sk);
+ release_sock(sk);
+@@ -2675,17 +2677,17 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ case UDP_ENCAP_ESPINUDP_NON_IKE:
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+- up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv;
++ WRITE_ONCE(up->encap_rcv,
++ ipv6_stub->xfrm6_udp_encap_rcv);
+ else
+ #endif
+- up->encap_rcv = xfrm4_udp_encap_rcv;
++ WRITE_ONCE(up->encap_rcv,
++ xfrm4_udp_encap_rcv);
+ #endif
+ fallthrough;
+ case UDP_ENCAP_L2TPINUDP:
+- up->encap_type = val;
+- lock_sock(sk);
+- udp_tunnel_encap_enable(sk->sk_socket);
+- release_sock(sk);
++ WRITE_ONCE(up->encap_type, val);
++ udp_tunnel_encap_enable(sk);
+ break;
+ default:
+ err = -ENOPROTOOPT;
+@@ -2694,11 +2696,11 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ break;
+
+ case UDP_NO_CHECK6_TX:
+- up->no_check6_tx = valbool;
++ udp_set_no_check6_tx(sk, valbool);
+ break;
+
+ case UDP_NO_CHECK6_RX:
+- up->no_check6_rx = valbool;
++ udp_set_no_check6_rx(sk, valbool);
+ break;
+
+ case UDP_SEGMENT:
+@@ -2708,14 +2710,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ break;
+
+ case UDP_GRO:
+- lock_sock(sk);
+
+ /* when enabling GRO, accept the related GSO packet type */
+ if (valbool)
+- udp_tunnel_encap_enable(sk->sk_socket);
+- up->gro_enabled = valbool;
+- up->accept_udp_l4 = valbool;
+- release_sock(sk);
++ udp_tunnel_encap_enable(sk);
++ udp_assign_bit(GRO_ENABLED, sk, valbool);
++ udp_assign_bit(ACCEPT_L4, sk, valbool);
+ break;
+
+ /*
+@@ -2730,8 +2730,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ val = 8;
+ else if (val > USHRT_MAX)
+ val = USHRT_MAX;
+- up->pcslen = val;
+- up->pcflag |= UDPLITE_SEND_CC;
++ WRITE_ONCE(up->pcslen, val);
++ udp_set_bit(UDPLITE_SEND_CC, sk);
+ break;
+
+ /* The receiver specifies a minimum checksum coverage value. To make
+@@ -2744,8 +2744,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ val = 8;
+ else if (val > USHRT_MAX)
+ val = USHRT_MAX;
+- up->pcrlen = val;
+- up->pcflag |= UDPLITE_RECV_CC;
++ WRITE_ONCE(up->pcrlen, val);
++ udp_set_bit(UDPLITE_RECV_CC, sk);
+ break;
+
+ default:
+@@ -2783,19 +2783,19 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+
+ switch (optname) {
+ case UDP_CORK:
+- val = READ_ONCE(up->corkflag);
++ val = udp_test_bit(CORK, sk);
+ break;
+
+ case UDP_ENCAP:
+- val = up->encap_type;
++ val = READ_ONCE(up->encap_type);
+ break;
+
+ case UDP_NO_CHECK6_TX:
+- val = up->no_check6_tx;
++ val = udp_get_no_check6_tx(sk);
+ break;
+
+ case UDP_NO_CHECK6_RX:
+- val = up->no_check6_rx;
++ val = udp_get_no_check6_rx(sk);
+ break;
+
+ case UDP_SEGMENT:
+@@ -2803,17 +2803,17 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ break;
+
+ case UDP_GRO:
+- val = up->gro_enabled;
++ val = udp_test_bit(GRO_ENABLED, sk);
+ break;
+
+ /* The following two cannot be changed on UDP sockets, the return is
+ * always 0 (which corresponds to the full checksum coverage of UDP). */
+ case UDPLITE_SEND_CSCOV:
+- val = up->pcslen;
++ val = READ_ONCE(up->pcslen);
+ break;
+
+ case UDPLITE_RECV_CSCOV:
+- val = up->pcrlen;
++ val = READ_ONCE(up->pcrlen);
+ break;
+
+ default:
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 0f46b3c2e4ac5..6c95d28d0c4a7 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -557,10 +557,10 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ NAPI_GRO_CB(skb)->is_flist = 0;
+ if (!sk || !udp_sk(sk)->gro_receive) {
+ if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
+- NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
++ NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1;
+
+ if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
+- (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
++ (sk && udp_test_bit(GRO_ENABLED, sk)) || NAPI_GRO_CB(skb)->is_flist)
+ return call_gro_receive(udp_gro_receive_segment, head, skb);
+
+ /* no GRO, be sure flush the current packet */
+diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
+index 9b18f371af0d4..1e7e4aecdc48a 100644
+--- a/net/ipv4/udp_tunnel_core.c
++++ b/net/ipv4/udp_tunnel_core.c
+@@ -78,7 +78,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ udp_sk(sk)->gro_receive = cfg->gro_receive;
+ udp_sk(sk)->gro_complete = cfg->gro_complete;
+
+- udp_tunnel_encap_enable(sock);
++ udp_tunnel_encap_enable(sk);
+ }
+ EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
+
+diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
+index 39ecdad1b50ce..af37af3ab727b 100644
+--- a/net/ipv4/udplite.c
++++ b/net/ipv4/udplite.c
+@@ -21,7 +21,6 @@ EXPORT_SYMBOL(udplite_table);
+ static int udplite_sk_init(struct sock *sk)
+ {
+ udp_init_sock(sk);
+- udp_sk(sk)->pcflag = UDPLITE_BIT;
+ pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, "
+ "please contact the netdev mailing list\n");
+ return 0;
+diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
+index eac206a290d05..183f6dc372429 100644
+--- a/net/ipv4/xfrm4_input.c
++++ b/net/ipv4/xfrm4_input.c
+@@ -85,11 +85,11 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
+ struct udphdr *uh;
+ struct iphdr *iph;
+ int iphlen, len;
+-
+ __u8 *udpdata;
+ __be32 *udpdata32;
+- __u16 encap_type = up->encap_type;
++ u16 encap_type;
+
++ encap_type = READ_ONCE(up->encap_type);
+ /* if this is not encapsulated socket, then just return now */
+ if (!encap_type)
+ return 1;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 54fc4c711f2c5..1121082901b99 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -162,7 +162,13 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
+ int err;
+
+ skb_mark_not_on_list(segs);
+- err = ip6_fragment(net, sk, segs, ip6_finish_output2);
++ /* Last GSO segment can be smaller than gso_size (and MTU).
++ * Adding a fragment header would produce an "atomic fragment",
++ * which is considered harmful (RFC-8021). Avoid that.
++ */
++ err = segs->len > mtu ?
++ ip6_fragment(net, sk, segs, ip6_finish_output2) :
++ ip6_finish_output2(net, sk, segs);
+ if (err && ret == 0)
+ ret = err;
+ }
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 5014aa6634527..8698b49dfc8de 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -180,14 +180,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ treq = tcp_rsk(req);
+ treq->tfo_listener = false;
+
+- if (security_inet_conn_request(sk, skb, req))
+- goto out_free;
+-
+ req->mss = mss;
+ ireq->ir_rmt_port = th->source;
+ ireq->ir_num = ntohs(th->dest);
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
++
++ if (security_inet_conn_request(sk, skb, req))
++ goto out_free;
++
+ if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
+ np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+ np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 86b5d509a4688..f60ba42954352 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -413,7 +413,7 @@ try_again:
+ (struct sockaddr *)sin6);
+ }
+
+- if (udp_sk(sk)->gro_enabled)
++ if (udp_test_bit(GRO_ENABLED, sk))
+ udp_cmsg_recv(msg, sk, skb);
+
+ if (np->rxopt.all)
+@@ -571,7 +571,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
+ inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
+
+- if (!sk || udp_sk(sk)->encap_type) {
++ if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
+ /* No socket for error: try tunnels before discarding */
+ if (static_branch_unlikely(&udpv6_encap_needed_key)) {
+ sk = __udp6_lib_err_encap(net, hdr, offset, uh,
+@@ -688,7 +688,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ }
+ nf_reset_ct(skb);
+
+- if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
++ if (static_branch_unlikely(&udpv6_encap_needed_key) &&
++ READ_ONCE(up->encap_type)) {
+ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+
+ /*
+@@ -726,16 +727,17 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ /*
+ * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
+ */
+- if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
++ if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
++ u16 pcrlen = READ_ONCE(up->pcrlen);
+
+- if (up->pcrlen == 0) { /* full coverage was set */
++ if (pcrlen == 0) { /* full coverage was set */
+ net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
+ UDP_SKB_CB(skb)->cscov, skb->len);
+ goto drop;
+ }
+- if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
++ if (UDP_SKB_CB(skb)->cscov < pcrlen) {
+ net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
+- UDP_SKB_CB(skb)->cscov, up->pcrlen);
++ UDP_SKB_CB(skb)->cscov, pcrlen);
+ goto drop;
+ }
+ }
+@@ -858,7 +860,7 @@ start_lookup:
+ /* If zero checksum and no_check is not on for
+ * the socket then skip it.
+ */
+- if (!uh->check && !udp_sk(sk)->no_check6_rx)
++ if (!uh->check && !udp_get_no_check6_rx(sk))
+ continue;
+ if (!first) {
+ first = sk;
+@@ -980,7 +982,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
+ udp6_sk_rx_dst_set(sk, dst);
+
+- if (!uh->check && !udp_sk(sk)->no_check6_rx) {
++ if (!uh->check && !udp_get_no_check6_rx(sk)) {
+ if (refcounted)
+ sock_put(sk);
+ goto report_csum_error;
+@@ -1002,7 +1004,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ /* Unicast */
+ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+ if (sk) {
+- if (!uh->check && !udp_sk(sk)->no_check6_rx)
++ if (!uh->check && !udp_get_no_check6_rx(sk))
+ goto report_csum_error;
+ return udp6_unicast_rcv_skb(sk, skb, uh);
+ }
+@@ -1241,7 +1243,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+- if (udp_sk(sk)->no_check6_tx) {
++ if (udp_get_no_check6_tx(sk)) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1262,7 +1264,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+
+ if (is_udplite)
+ csum = udplite_csum(skb);
+- else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
++ else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
+ skb->ip_summed = CHECKSUM_NONE;
+ goto send;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
+@@ -1332,7 +1334,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ int addr_len = msg->msg_namelen;
+ bool connected = false;
+ int ulen = len;
+- int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
++ int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
+ int err;
+ int is_udplite = IS_UDPLITE(sk);
+ int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+@@ -1644,11 +1646,11 @@ static void udpv6_splice_eof(struct socket *sock)
+ struct sock *sk = sock->sk;
+ struct udp_sock *up = udp_sk(sk);
+
+- if (!up->pending || READ_ONCE(up->corkflag))
++ if (!up->pending || udp_test_bit(CORK, sk))
+ return;
+
+ lock_sock(sk);
+- if (up->pending && !READ_ONCE(up->corkflag))
++ if (up->pending && !udp_test_bit(CORK, sk))
+ udp_v6_push_pending_frames(sk);
+ release_sock(sk);
+ }
+@@ -1670,7 +1672,7 @@ void udpv6_destroy_sock(struct sock *sk)
+ if (encap_destroy)
+ encap_destroy(sk);
+ }
+- if (up->encap_enabled) {
++ if (udp_test_bit(ENCAP_ENABLED, sk)) {
+ static_branch_dec(&udpv6_encap_needed_key);
+ udp_encap_disable();
+ }
+diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
+index 267d491e97075..a60bec9b14f14 100644
+--- a/net/ipv6/udplite.c
++++ b/net/ipv6/udplite.c
+@@ -17,7 +17,6 @@
+ static int udplitev6_sk_init(struct sock *sk)
+ {
+ udpv6_init_sock(sk);
+- udp_sk(sk)->pcflag = UDPLITE_BIT;
+ pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, "
+ "please contact the netdev mailing list\n");
+ return 0;
+diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
+index 4907ab241d6be..4156387248e40 100644
+--- a/net/ipv6/xfrm6_input.c
++++ b/net/ipv6/xfrm6_input.c
+@@ -81,14 +81,14 @@ int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
+ struct ipv6hdr *ip6h;
+ int len;
+ int ip6hlen = sizeof(struct ipv6hdr);
+-
+ __u8 *udpdata;
+ __be32 *udpdata32;
+- __u16 encap_type = up->encap_type;
++ u16 encap_type;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ return xfrm4_udp_encap_rcv(sk, skb);
+
++ encap_type = READ_ONCE(up->encap_type);
+ /* if this is not encapsulated socket, then just return now */
+ if (!encap_type)
+ return 1;
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 03608d3ded4b8..8d21ff25f1602 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1139,9 +1139,9 @@ static void l2tp_tunnel_destruct(struct sock *sk)
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ /* No longer an encapsulation socket. See net/ipv4/udp.c */
+- (udp_sk(sk))->encap_type = 0;
+- (udp_sk(sk))->encap_rcv = NULL;
+- (udp_sk(sk))->encap_destroy = NULL;
++ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
++ udp_sk(sk)->encap_rcv = NULL;
++ udp_sk(sk)->encap_destroy = NULL;
+ break;
+ case L2TP_ENCAPTYPE_IP:
+ break;
+diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
+index 7cac441862e21..51bccfb00a9cd 100644
+--- a/net/llc/llc_input.c
++++ b/net/llc/llc_input.c
+@@ -127,8 +127,14 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
+ skb->transport_header += llc_len;
+ skb_pull(skb, llc_len);
+ if (skb->protocol == htons(ETH_P_802_2)) {
+- __be16 pdulen = eth_hdr(skb)->h_proto;
+- s32 data_size = ntohs(pdulen) - llc_len;
++ __be16 pdulen;
++ s32 data_size;
++
++ if (skb->mac_len < ETH_HLEN)
++ return 0;
++
++ pdulen = eth_hdr(skb)->h_proto;
++ data_size = ntohs(pdulen) - llc_len;
+
+ if (data_size < 0 ||
+ !pskb_may_pull(skb, data_size))
+diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
+index 79d1cef8f15a9..06fb8e6944b06 100644
+--- a/net/llc/llc_s_ac.c
++++ b/net/llc/llc_s_ac.c
+@@ -153,6 +153,9 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
+ int rc = 1;
+ u32 data_size;
+
++ if (skb->mac_len < ETH_HLEN)
++ return 1;
++
+ llc_pdu_decode_sa(skb, mac_da);
+ llc_pdu_decode_da(skb, mac_sa);
+ llc_pdu_decode_ssap(skb, &dsap);
+diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
+index 05c6ae0920534..f506542925109 100644
+--- a/net/llc/llc_station.c
++++ b/net/llc/llc_station.c
+@@ -76,6 +76,9 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
+ u32 data_size;
+ struct sk_buff *nskb;
+
++ if (skb->mac_len < ETH_HLEN)
++ goto out;
++
+ /* The test request command is type U (llc_len = 3) */
+ data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
+ nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 0e3a1753a51c6..715da615f0359 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -3121,6 +3121,10 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
+ else
+ *dbm = sdata->vif.bss_conf.txpower;
+
++ /* INT_MIN indicates no power level was set yet */
++ if (*dbm == INT_MIN)
++ return -EINVAL;
++
+ return 0;
+ }
+
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index 30cd0c905a24f..aa37a1410f377 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -510,10 +510,13 @@ int drv_change_vif_links(struct ieee80211_local *local,
+ if (ret)
+ return ret;
+
+- for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+- link = rcu_access_pointer(sdata->link[link_id]);
++ if (!local->in_reconfig) {
++ for_each_set_bit(link_id, &links_to_add,
++ IEEE80211_MLD_MAX_NUM_LINKS) {
++ link = rcu_access_pointer(sdata->link[link_id]);
+
+- ieee80211_link_debugfs_drv_add(link);
++ ieee80211_link_debugfs_drv_add(link);
++ }
+ }
+
+ return 0;
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index c4505593ba7a6..2bc2fbe58f944 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -23,7 +23,7 @@
+ static inline struct ieee80211_sub_if_data *
+ get_bss_sdata(struct ieee80211_sub_if_data *sdata)
+ {
+- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
++ if (sdata && sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+ u.ap);
+
+@@ -638,10 +638,13 @@ static inline void drv_flush(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u32 queues, bool drop)
+ {
+- struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
++ struct ieee80211_vif *vif;
+
+ might_sleep();
+
++ sdata = get_bss_sdata(sdata);
++ vif = sdata ? &sdata->vif : NULL;
++
+ if (sdata && !check_sdata_in_driver(sdata))
+ return;
+
+@@ -657,6 +660,8 @@ static inline void drv_flush_sta(struct ieee80211_local *local,
+ {
+ might_sleep();
+
++ sdata = get_bss_sdata(sdata);
++
+ if (sdata && !check_sdata_in_driver(sdata))
+ return;
+
+diff --git a/net/mac80211/drop.h b/net/mac80211/drop.h
+index 49dc809cab290..1570fac8411f4 100644
+--- a/net/mac80211/drop.h
++++ b/net/mac80211/drop.h
+@@ -53,4 +53,7 @@ enum mac80211_drop_reason {
+ #undef DEF
+ };
+
++#define RX_RES_IS_UNUSABLE(result) \
++ (((__force u32)(result) & SKB_DROP_REASON_SUBSYS_MASK) == ___RX_DROP_UNUSABLE)
++
+ #endif /* MAC80211_DROP_H */
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 98ef1fe1226e7..07beb72ddd25a 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1406,7 +1406,7 @@ struct ieee80211_local {
+ /* wowlan is enabled -- don't reconfig on resume */
+ bool wowlan;
+
+- struct work_struct radar_detected_work;
++ struct wiphy_work radar_detected_work;
+
+ /* number of RX chains the hardware has */
+ u8 rx_chains;
+@@ -1483,14 +1483,14 @@ struct ieee80211_local {
+ int hw_scan_ies_bufsize;
+ struct cfg80211_scan_info scan_info;
+
+- struct work_struct sched_scan_stopped_work;
++ struct wiphy_work sched_scan_stopped_work;
+ struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
+ struct cfg80211_sched_scan_request __rcu *sched_scan_req;
+ u8 scan_addr[ETH_ALEN];
+
+ unsigned long leave_oper_channel_time;
+ enum mac80211_scan_state next_scan_state;
+- struct delayed_work scan_work;
++ struct wiphy_delayed_work scan_work;
+ struct ieee80211_sub_if_data __rcu *scan_sdata;
+ /* For backward compatibility only -- do not use */
+ struct cfg80211_chan_def _oper_chandef;
+@@ -1583,9 +1583,9 @@ struct ieee80211_local {
+ /*
+ * Remain-on-channel support
+ */
+- struct delayed_work roc_work;
++ struct wiphy_delayed_work roc_work;
+ struct list_head roc_list;
+- struct work_struct hw_roc_start, hw_roc_done;
++ struct wiphy_work hw_roc_start, hw_roc_done;
+ unsigned long hw_roc_start_time;
+ u64 roc_cookie_counter;
+
+@@ -1929,7 +1929,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata,
+ u64 *changed);
+
+ /* scan/BSS handling */
+-void ieee80211_scan_work(struct work_struct *work);
++void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work);
+ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+ const u8 *ssid, u8 ssid_len,
+ struct ieee80211_channel **channels,
+@@ -1962,7 +1962,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_sched_scan_request *req);
+ int ieee80211_request_sched_scan_stop(struct ieee80211_local *local);
+ void ieee80211_sched_scan_end(struct ieee80211_local *local);
+-void ieee80211_sched_scan_stopped_work(struct work_struct *work);
++void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
++ struct wiphy_work *work);
+
+ /* off-channel/mgmt-tx */
+ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
+@@ -2566,7 +2567,8 @@ bool ieee80211_is_radar_required(struct ieee80211_local *local);
+
+ void ieee80211_dfs_cac_timer_work(struct work_struct *work);
+ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
+-void ieee80211_dfs_radar_detected_work(struct work_struct *work);
++void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
++ struct wiphy_work *work);
+ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_csa_settings *csa_settings);
+
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index be586bc0b5b7d..6e3bfb46af44d 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -691,7 +691,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ ieee80211_recalc_ps(local);
+
+ if (cancel_scan)
+- flush_delayed_work(&local->scan_work);
++ wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
+
+ if (local->open_count == 0) {
+ ieee80211_stop_device(local);
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index 6148208b320e3..16cbaea93fc32 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -195,7 +195,7 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
+
+ memset(to_free, 0, sizeof(links));
+
+- if (old_links == new_links)
++ if (old_links == new_links && dormant_links == sdata->vif.dormant_links)
+ return 0;
+
+ /* if there were no old links, need to clear the pointers to deflink */
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 24315d7b31263..4548f84451095 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -335,10 +335,7 @@ static void ieee80211_restart_work(struct work_struct *work)
+ struct ieee80211_sub_if_data *sdata;
+ int ret;
+
+- /* wait for scan work complete */
+ flush_workqueue(local->workqueue);
+- flush_work(&local->sched_scan_stopped_work);
+- flush_work(&local->radar_detected_work);
+
+ rtnl_lock();
+ /* we might do interface manipulations, so need both */
+@@ -379,8 +376,8 @@ static void ieee80211_restart_work(struct work_struct *work)
+ ieee80211_scan_cancel(local);
+
+ /* make sure any new ROC will consider local->in_reconfig */
+- flush_delayed_work(&local->roc_work);
+- flush_work(&local->hw_roc_done);
++ wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work);
++ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done);
+
+ /* wait for all packet processing to be done */
+ synchronize_net();
+@@ -809,12 +806,12 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ INIT_LIST_HEAD(&local->chanctx_list);
+ mutex_init(&local->chanctx_mtx);
+
+- INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
++ wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work);
+
+ INIT_WORK(&local->restart_work, ieee80211_restart_work);
+
+- INIT_WORK(&local->radar_detected_work,
+- ieee80211_dfs_radar_detected_work);
++ wiphy_work_init(&local->radar_detected_work,
++ ieee80211_dfs_radar_detected_work);
+
+ INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
+ local->smps_mode = IEEE80211_SMPS_OFF;
+@@ -825,8 +822,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ ieee80211_dynamic_ps_disable_work);
+ timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
+
+- INIT_WORK(&local->sched_scan_stopped_work,
+- ieee80211_sched_scan_stopped_work);
++ wiphy_work_init(&local->sched_scan_stopped_work,
++ ieee80211_sched_scan_stopped_work);
+
+ spin_lock_init(&local->ack_status_lock);
+ idr_init(&local->ack_status_frames);
+@@ -1482,13 +1479,15 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
+ */
+ ieee80211_remove_interfaces(local);
+
++ wiphy_lock(local->hw.wiphy);
++ wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work);
++ wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work);
++ wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work);
++ wiphy_unlock(local->hw.wiphy);
+ rtnl_unlock();
+
+- cancel_delayed_work_sync(&local->roc_work);
+ cancel_work_sync(&local->restart_work);
+ cancel_work_sync(&local->reconfig_filter);
+- flush_work(&local->sched_scan_stopped_work);
+- flush_work(&local->radar_detected_work);
+
+ ieee80211_clear_tx_pending(local);
+ rate_control_deinitialize(local);
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index d32e304eeb4ba..3e52aaa57b1fc 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -648,7 +648,7 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
+
+ cache = &sdata->u.mesh.tx_cache;
+ spin_lock_bh(&cache->walk_lock);
+- entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
++ entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
+ if (entry)
+ mesh_fast_tx_entry_free(cache, entry);
+ spin_unlock_bh(&cache->walk_lock);
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index cdf991e74ab99..5bedd9cef414d 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -230,7 +230,7 @@ static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
+ if (dur == LONG_MAX)
+ return false;
+
+- mod_delayed_work(local->workqueue, &local->roc_work, dur);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, dur);
+ return true;
+ }
+
+@@ -258,7 +258,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
+ roc->notified = true;
+ }
+
+-static void ieee80211_hw_roc_start(struct work_struct *work)
++static void ieee80211_hw_roc_start(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, hw_roc_start);
+@@ -285,7 +285,7 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
+
+ trace_api_ready_on_channel(local);
+
+- ieee80211_queue_work(hw, &local->hw_roc_start);
++ wiphy_work_queue(hw->wiphy, &local->hw_roc_start);
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
+
+@@ -338,7 +338,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
+ tmp->started = true;
+ tmp->abort = true;
+ }
+- ieee80211_queue_work(&local->hw, &local->hw_roc_done);
++ wiphy_work_queue(local->hw.wiphy, &local->hw_roc_done);
+ return;
+ }
+
+@@ -368,8 +368,8 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
+ ieee80211_hw_config(local, 0);
+ }
+
+- ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+- msecs_to_jiffies(min_dur));
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
++ msecs_to_jiffies(min_dur));
+
+ /* tell userspace or send frame(s) */
+ list_for_each_entry(tmp, &local->roc_list, list) {
+@@ -407,8 +407,8 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
+ _ieee80211_start_next_roc(local);
+ } else {
+ /* delay it a bit */
+- ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+- round_jiffies_relative(HZ/2));
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
++ round_jiffies_relative(HZ / 2));
+ }
+ }
+
+@@ -451,7 +451,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
+ }
+ }
+
+-static void ieee80211_roc_work(struct work_struct *work)
++static void ieee80211_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, roc_work.work);
+@@ -461,7 +461,7 @@ static void ieee80211_roc_work(struct work_struct *work)
+ mutex_unlock(&local->mtx);
+ }
+
+-static void ieee80211_hw_roc_done(struct work_struct *work)
++static void ieee80211_hw_roc_done(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, hw_roc_done);
+@@ -482,7 +482,7 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
+
+ trace_api_remain_on_channel_expired(local);
+
+- ieee80211_queue_work(hw, &local->hw_roc_done);
++ wiphy_work_queue(hw->wiphy, &local->hw_roc_done);
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
+
+@@ -586,8 +586,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
+ /* if not HW assist, just queue & schedule work */
+ if (!local->ops->remain_on_channel) {
+ list_add_tail(&roc->list, &local->roc_list);
+- ieee80211_queue_delayed_work(&local->hw,
+- &local->roc_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy,
++ &local->roc_work, 0);
+ } else {
+ /* otherwise actually kick it off here
+ * (for error handling)
+@@ -695,7 +695,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ if (!cookie)
+ return -ENOENT;
+
+- flush_work(&local->hw_roc_start);
++ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start);
+
+ mutex_lock(&local->mtx);
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+@@ -745,7 +745,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ } else {
+ /* go through work struct to return to the operating channel */
+ found->abort = true;
+- mod_delayed_work(local->workqueue, &local->roc_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, 0);
+ }
+
+ out_unlock:
+@@ -994,9 +994,9 @@ int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+
+ void ieee80211_roc_setup(struct ieee80211_local *local)
+ {
+- INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
+- INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
+- INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
++ wiphy_work_init(&local->hw_roc_start, ieee80211_hw_roc_start);
++ wiphy_work_init(&local->hw_roc_done, ieee80211_hw_roc_done);
++ wiphy_delayed_work_init(&local->roc_work, ieee80211_roc_work);
+ INIT_LIST_HEAD(&local->roc_list);
+ }
+
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 8f6b6f56b65b4..26ca2f5dc52b2 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2112,7 +2112,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
+ /* either the frame has been decrypted or will be dropped */
+ status->flag |= RX_FLAG_DECRYPTED;
+
+- if (unlikely(ieee80211_is_beacon(fc) && (result & RX_DROP_UNUSABLE) &&
++ if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) &&
+ rx->sdata->dev))
+ cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
+ skb->data, skb->len);
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 0805aa8603c61..68ec2124c3db5 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -274,8 +274,8 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ * the beacon/proberesp rx gives us an opportunity to upgrade
+ * to active scan
+ */
+- set_bit(SCAN_BEACON_DONE, &local->scanning);
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++ set_bit(SCAN_BEACON_DONE, &local->scanning);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ }
+
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+@@ -505,7 +505,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw,
+
+ memcpy(&local->scan_info, info, sizeof(*info));
+
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ }
+ EXPORT_SYMBOL(ieee80211_scan_completed);
+
+@@ -545,8 +545,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
+ /* We need to set power level at maximum rate for scanning. */
+ ieee80211_hw_config(local, 0);
+
+- ieee80211_queue_delayed_work(&local->hw,
+- &local->scan_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+
+ return 0;
+ }
+@@ -603,8 +602,8 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
+ lockdep_is_held(&local->mtx))))
+ return;
+
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+- round_jiffies_relative(0));
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++ round_jiffies_relative(0));
+ }
+
+ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
+@@ -795,8 +794,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
+ }
+
+ /* Now, just wait a bit and we are all done! */
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+- next_delay);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++ next_delay);
+ return 0;
+ } else {
+ /* Do normal software scan */
+@@ -1043,7 +1042,7 @@ static void ieee80211_scan_state_resume(struct ieee80211_local *local,
+ local->next_scan_state = SCAN_SET_CHANNEL;
+ }
+
+-void ieee80211_scan_work(struct work_struct *work)
++void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, scan_work.work);
+@@ -1137,7 +1136,8 @@ void ieee80211_scan_work(struct work_struct *work)
+ }
+ } while (next_delay == 0);
+
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++ next_delay);
+ goto out;
+
+ out_complete:
+@@ -1280,12 +1280,7 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
+ goto out;
+ }
+
+- /*
+- * If the work is currently running, it must be blocked on
+- * the mutex, but we'll set scan_sdata = NULL and it'll
+- * simply exit once it acquires the mutex.
+- */
+- cancel_delayed_work(&local->scan_work);
++ wiphy_delayed_work_cancel(local->hw.wiphy, &local->scan_work);
+ /* and clean up */
+ memset(&local->scan_info, 0, sizeof(local->scan_info));
+ __ieee80211_scan_completed(&local->hw, true);
+@@ -1427,10 +1422,11 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local)
+
+ mutex_unlock(&local->mtx);
+
+- cfg80211_sched_scan_stopped(local->hw.wiphy, 0);
++ cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0);
+ }
+
+-void ieee80211_sched_scan_stopped_work(struct work_struct *work)
++void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
++ struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local,
+@@ -1453,6 +1449,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
+ if (local->in_reconfig)
+ return;
+
+- schedule_work(&local->sched_scan_stopped_work);
++ wiphy_work_queue(hw->wiphy, &local->sched_scan_stopped_work);
+ }
+ EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 7751f8ba960ee..0c5cc75857e4f 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2990,7 +2990,7 @@ void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta,
+ WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1;
+
+ if (val)
+- sta->sta.max_amsdu_subframes = 4 << val;
++ sta->sta.max_amsdu_subframes = 4 << (4 - val);
+ }
+
+ #ifdef CONFIG_LOCKDEP
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 8a6917cf63cf9..172173b2a9eb8 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -2340,8 +2340,8 @@ static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
+ */
+ if (aborted)
+ set_bit(SCAN_ABORTED, &local->scanning);
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+- flush_delayed_work(&local->scan_work);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
++ wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
+ }
+ }
+
+@@ -4356,7 +4356,8 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
+ mutex_unlock(&local->mtx);
+ }
+
+-void ieee80211_dfs_radar_detected_work(struct work_struct *work)
++void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
++ struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, radar_detected_work);
+@@ -4374,9 +4375,7 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work)
+ }
+ mutex_unlock(&local->chanctx_mtx);
+
+- wiphy_lock(local->hw.wiphy);
+ ieee80211_dfs_cac_cancel(local);
+- wiphy_unlock(local->hw.wiphy);
+
+ if (num_chanctx > 1)
+ /* XXX: multi-channel is not supported yet */
+@@ -4391,7 +4390,7 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
+
+ trace_api_radar_detected(local);
+
+- schedule_work(&local->radar_detected_work);
++ wiphy_work_queue(hw->wiphy, &local->radar_detected_work);
+ }
+ EXPORT_SYMBOL(ieee80211_radar_detected);
+
+diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
+index bceaab8dd8e46..74698582a2859 100644
+--- a/net/mptcp/fastopen.c
++++ b/net/mptcp/fastopen.c
+@@ -52,6 +52,7 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
+
+ mptcp_set_owner_r(skb, sk);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
++ mptcp_sk(sk)->bytes_received += skb->len;
+
+ sk->sk_data_ready(sk);
+
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 9661f38126826..3011bc378462b 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1538,8 +1538,9 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ struct mptcp_pm_addr_entry *entry;
+
+ list_for_each_entry(entry, rm_list, list) {
+- remove_anno_list_by_saddr(msk, &entry->addr);
+- if (alist.nr < MPTCP_RM_IDS_MAX)
++ if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
++ lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
++ alist.nr < MPTCP_RM_IDS_MAX)
+ alist.ids[alist.nr++] = entry->addr.id;
+ }
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 886ab689a8aea..c1527f520dce3 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1231,6 +1231,8 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
+ mptcp_do_fallback(ssk);
+ }
+
++#define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
++
+ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ struct mptcp_data_frag *dfrag,
+ struct mptcp_sendmsg_info *info)
+@@ -1257,6 +1259,8 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ return -EAGAIN;
+
+ /* compute send limit */
++ if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
++ ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
+ info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
+ copy = info->size_goal;
+
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 8260202c00669..7539b9c8c2fb4 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -737,8 +737,11 @@ static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname,
+ val = inet_sk(sk)->tos;
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ bool slow;
+
++ slow = lock_sock_fast(ssk);
+ __ip_sock_set_tos(ssk, val);
++ unlock_sock_fast(ssk, slow);
+ }
+ release_sock(sk);
+
+diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
+index f8854bff286cb..62fb1031763d1 100644
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -89,11 +89,6 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
+ if ((had_link == has_link) || chained)
+ return 0;
+
+- if (had_link)
+- netif_carrier_off(ndp->ndev.dev);
+- else
+- netif_carrier_on(ndp->ndev.dev);
+-
+ if (!ndp->multi_package && !nc->package->multi_channel) {
+ if (had_link) {
+ ndp->flags |= NCSI_DEV_RESHUFFLE;
+diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
+index 6616ba5d0b049..5b37487d9d11f 100644
+--- a/net/netfilter/nf_nat_redirect.c
++++ b/net/netfilter/nf_nat_redirect.c
+@@ -80,6 +80,26 @@ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
+
+ static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+
++static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope)
++{
++ unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr);
++
++ if (ifa_addr_type & IPV6_ADDR_MAPPED)
++ return false;
++
++ if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC)))
++ return false;
++
++ if (scope) {
++ unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK;
++
++ if (!(scope & ifa_scope))
++ return false;
++ }
++
++ return true;
++}
++
+ unsigned int
+ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ unsigned int hooknum)
+@@ -89,14 +109,19 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ if (hooknum == NF_INET_LOCAL_OUT) {
+ newdst.in6 = loopback_addr;
+ } else {
++ unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr);
+ struct inet6_dev *idev;
+- struct inet6_ifaddr *ifa;
+ bool addr = false;
+
+ idev = __in6_dev_get(skb->dev);
+ if (idev != NULL) {
++ const struct inet6_ifaddr *ifa;
++
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
++ if (!nf_nat_redirect_ipv6_usable(ifa, scope))
++ continue;
++
+ newdst.in6 = ifa->addr;
+ addr = true;
+ break;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 29c651804cb22..4a450f6d12a59 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3465,10 +3465,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
+ goto cont_skip;
+ if (*idx < s_idx)
+ goto cont;
+- if (*idx > s_idx) {
+- memset(&cb->args[1], 0,
+- sizeof(cb->args) - sizeof(cb->args[0]));
+- }
+ if (prule)
+ handle = prule->handle;
+ else
+@@ -6468,6 +6464,12 @@ static int nft_setelem_deactivate(const struct net *net,
+ return ret;
+ }
+
++static void nft_setelem_catchall_destroy(struct nft_set_elem_catchall *catchall)
++{
++ list_del_rcu(&catchall->list);
++ kfree_rcu(catchall, rcu);
++}
++
+ static void nft_setelem_catchall_remove(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem)
+@@ -6476,8 +6478,7 @@ static void nft_setelem_catchall_remove(const struct net *net,
+
+ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
+ if (catchall->elem == elem->priv) {
+- list_del_rcu(&catchall->list);
+- kfree_rcu(catchall, rcu);
++ nft_setelem_catchall_destroy(catchall);
+ break;
+ }
+ }
+@@ -7209,10 +7210,11 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
+
+ if (err < 0) {
+ NL_SET_BAD_ATTR(extack, attr);
+- break;
++ return err;
+ }
+ }
+- return err;
++
++ return 0;
+ }
+
+ /*
+@@ -9638,9 +9640,8 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
+ call_rcu(&trans->rcu, nft_trans_gc_trans_free);
+ }
+
+-static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+- unsigned int gc_seq,
+- bool sync)
++struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
++ unsigned int gc_seq)
+ {
+ struct nft_set_elem_catchall *catchall;
+ const struct nft_set *set = gc->set;
+@@ -9656,11 +9657,7 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+
+ nft_set_elem_dead(ext);
+ dead_elem:
+- if (sync)
+- gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+- else
+- gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+-
++ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+ if (!gc)
+ return NULL;
+
+@@ -9670,15 +9667,34 @@ dead_elem:
+ return gc;
+ }
+
+-struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+- unsigned int gc_seq)
+-{
+- return nft_trans_gc_catchall(gc, gc_seq, false);
+-}
+-
+ struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
+ {
+- return nft_trans_gc_catchall(gc, 0, true);
++ struct nft_set_elem_catchall *catchall, *next;
++ const struct nft_set *set = gc->set;
++ struct nft_set_elem elem;
++ struct nft_set_ext *ext;
++
++ WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net));
++
++ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
++ ext = nft_set_elem_ext(set, catchall->elem);
++
++ if (!nft_set_elem_expired(ext))
++ continue;
++
++ gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
++ if (!gc)
++ return NULL;
++
++ memset(&elem, 0, sizeof(elem));
++ elem.priv = catchall->elem;
++
++ nft_setelem_data_deactivate(gc->net, gc->set, &elem);
++ nft_setelem_catchall_destroy(catchall);
++ nft_trans_gc_elem_add(gc, elem.priv);
++ }
++
++ return gc;
+ }
+
+ static void nf_tables_module_autoload_cleanup(struct net *net)
+diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
+index e596d1a842f70..f6e791a681015 100644
+--- a/net/netfilter/nft_byteorder.c
++++ b/net/netfilter/nft_byteorder.c
+@@ -38,13 +38,14 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+
+ switch (priv->size) {
+ case 8: {
++ u64 *dst64 = (void *)dst;
+ u64 src64;
+
+ switch (priv->op) {
+ case NFT_BYTEORDER_NTOH:
+ for (i = 0; i < priv->len / 8; i++) {
+ src64 = nft_reg_load64(&src[i]);
+- nft_reg_store64(&dst[i],
++ nft_reg_store64(&dst64[i],
+ be64_to_cpu((__force __be64)src64));
+ }
+ break;
+@@ -52,7 +53,7 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+ for (i = 0; i < priv->len / 8; i++) {
+ src64 = (__force __u64)
+ cpu_to_be64(nft_reg_load64(&src[i]));
+- nft_reg_store64(&dst[i], src64);
++ nft_reg_store64(&dst64[i], src64);
+ }
+ break;
+ }
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index f7da7c43333b5..ba0d3683a45d3 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -63,7 +63,7 @@ nft_meta_get_eval_time(enum nft_meta_keys key,
+ {
+ switch (key) {
+ case NFT_META_TIME_NS:
+- nft_reg_store64(dest, ktime_get_real_ns());
++ nft_reg_store64((u64 *)dest, ktime_get_real_ns());
+ break;
+ case NFT_META_TIME_DAY:
+ nft_reg_store8(dest, nft_meta_weekday());
+diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
+index 7ddb9a78e3fc8..ef93e0d3bee04 100644
+--- a/net/netfilter/xt_recent.c
++++ b/net/netfilter/xt_recent.c
+@@ -561,7 +561,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
+ {
+ struct recent_table *t = pde_data(file_inode(file));
+ struct recent_entry *e;
+- char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
++ char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")];
+ const char *c = buf;
+ union nf_inet_addr addr = {};
+ u_int16_t family;
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 0b9a785dea459..3019a4406ca4f 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -985,7 +985,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
+ if (err)
+ return err;
+
+- nf_conn_act_ct_ext_add(ct);
++ nf_conn_act_ct_ext_add(skb, ct, ctinfo);
+ } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ labels_nonzero(&info->labels.mask)) {
+ err = ovs_ct_set_labels(ct, key, &info->labels.value,
+diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
+index ac85d4644a3c3..df8a271948a1c 100644
+--- a/net/rxrpc/conn_object.c
++++ b/net/rxrpc/conn_object.c
+@@ -212,7 +212,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
+ conn->idle_timestamp = jiffies;
+ if (atomic_dec_and_test(&conn->active))
+ rxrpc_set_service_reap_timer(conn->rxnet,
+- jiffies + rxrpc_connection_expiry);
++ jiffies + rxrpc_connection_expiry * HZ);
+ }
+
+ rxrpc_put_call(call, rxrpc_call_put_io_thread);
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 030d64f282f37..92495e73b8699 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -643,12 +643,8 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
+ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_mb(); /* Read data before setting avail bit */
+ set_bit(i, &call->rtt_avail);
+- if (type != rxrpc_rtt_rx_cancel)
+- rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
+- sent_at, resp_time);
+- else
+- trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
+- orig_serial, acked_serial, 0, 0);
++ rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
++ sent_at, resp_time);
+ matched = true;
+ }
+
+@@ -801,28 +797,21 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ summary.ack_reason, nr_acks);
+ rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]);
+
+- switch (ack.reason) {
+- case RXRPC_ACK_PING_RESPONSE:
+- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+- rxrpc_rtt_rx_ping_response);
+- break;
+- case RXRPC_ACK_REQUESTED:
+- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+- rxrpc_rtt_rx_requested_ack);
+- break;
+- default:
+- if (acked_serial != 0)
++ if (acked_serial != 0) {
++ switch (ack.reason) {
++ case RXRPC_ACK_PING_RESPONSE:
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+- rxrpc_rtt_rx_cancel);
+- break;
+- }
+-
+- if (ack.reason == RXRPC_ACK_PING) {
+- rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
+- rxrpc_propose_ack_respond_to_ping);
+- } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
+- rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial,
+- rxrpc_propose_ack_respond_to_ack);
++ rxrpc_rtt_rx_ping_response);
++ break;
++ case RXRPC_ACK_REQUESTED:
++ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
++ rxrpc_rtt_rx_requested_ack);
++ break;
++ default:
++ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
++ rxrpc_rtt_rx_other_ack);
++ break;
++ }
+ }
+
+ /* If we get an EXCEEDS_WINDOW ACK from the server, it probably
+@@ -835,7 +824,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ rxrpc_is_client_call(call)) {
+ rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+ 0, -ENETRESET);
+- return;
++ goto send_response;
+ }
+
+ /* If we get an OUT_OF_SEQUENCE ACK from the server, that can also
+@@ -849,7 +838,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ rxrpc_is_client_call(call)) {
+ rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+ 0, -ENETRESET);
+- return;
++ goto send_response;
+ }
+
+ /* Discard any out-of-order or duplicate ACKs (outside lock). */
+@@ -857,7 +846,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
+ first_soft_ack, call->acks_first_seq,
+ prev_pkt, call->acks_prev_seq);
+- return;
++ goto send_response;
+ }
+
+ info.rxMTU = 0;
+@@ -897,7 +886,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ case RXRPC_CALL_SERVER_AWAIT_ACK:
+ break;
+ default:
+- return;
++ goto send_response;
+ }
+
+ if (before(hard_ack, call->acks_hard_ack) ||
+@@ -909,7 +898,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ if (after(hard_ack, call->acks_hard_ack)) {
+ if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
+ rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
+- return;
++ goto send_response;
+ }
+ }
+
+@@ -927,6 +916,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ rxrpc_propose_ack_ping_for_lost_reply);
+
+ rxrpc_congestion_management(call, skb, &summary, acked_serial);
++
++send_response:
++ if (ack.reason == RXRPC_ACK_PING)
++ rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
++ rxrpc_propose_ack_respond_to_ping);
++ else if (sp->hdr.flags & RXRPC_REQUEST_ACK)
++ rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial,
++ rxrpc_propose_ack_respond_to_ack);
+ }
+
+ /*
+diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
+index 7d910aee4f8cb..c553a30e9c838 100644
+--- a/net/rxrpc/local_object.c
++++ b/net/rxrpc/local_object.c
+@@ -87,7 +87,7 @@ static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
+ struct rxrpc_local *local =
+ container_of(timer, struct rxrpc_local, client_conn_reap_timer);
+
+- if (local->kill_all_client_conns &&
++ if (!local->kill_all_client_conns &&
+ test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
+ rxrpc_wake_up_io_thread(local);
+ }
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index fb52d6f9aff93..6dcc4585576e8 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -376,6 +376,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
+ entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
+ }
+
++static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
++{
++ struct nf_conn_act_ct_ext *act_ct_ext;
++
++ act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
++ if (act_ct_ext) {
++ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
++ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
++ }
++}
++
+ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
+ struct nf_conn *ct,
+ bool tcp, bool bidirectional)
+@@ -671,6 +682,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
+ else
+ ctinfo = IP_CT_ESTABLISHED_REPLY;
+
++ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
++ tcf_ct_flow_ct_ext_ifidx_update(flow);
+ flow_offload_refresh(nf_ft, flow, force_refresh);
+ if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
+ /* Process this flow in SW to allow promoting to ASSURED */
+@@ -1030,7 +1043,7 @@ do_nat:
+ tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
+
+ if (!nf_ct_is_confirmed(ct))
+- nf_conn_act_ct_ext_add(ct);
++ nf_conn_act_ct_ext_add(skb, ct, ctinfo);
+
+ /* This will take care of sending queued events
+ * even if the connection is already confirmed.
+@@ -1522,6 +1535,9 @@ static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
++ if (tcf_ct_helper(act))
++ return -EOPNOTSUPP;
++
+ entry->id = FLOW_ACTION_CT;
+ entry->ct.action = tcf_ct_action(act);
+ entry->ct.zone = tcf_ct_zone(act);
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 35ddebae88941..741339ac94833 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -275,7 +275,7 @@ static int __smc_release(struct smc_sock *smc)
+
+ if (!smc->use_fallback) {
+ rc = smc_close_active(smc);
+- sock_set_flag(sk, SOCK_DEAD);
++ smc_sock_set_flag(sk, SOCK_DEAD);
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+ } else {
+ if (sk->sk_state != SMC_CLOSED) {
+@@ -598,8 +598,12 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc)
+ struct smc_llc_qentry *qentry;
+ int rc;
+
+- /* receive CONFIRM LINK request from server over RoCE fabric */
+- qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
++ /* Receive CONFIRM LINK request from server over RoCE fabric.
++ * Increasing the client's timeout by twice as much as the server's
++ * timeout by default can temporarily avoid decline messages of
++ * both sides crossing or colliding
++ */
++ qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME,
+ SMC_LLC_CONFIRM_LINK);
+ if (!qentry) {
+ struct smc_clc_msg_decline dclc;
+@@ -1743,7 +1747,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
+ if (new_clcsock)
+ sock_release(new_clcsock);
+ new_sk->sk_state = SMC_CLOSED;
+- sock_set_flag(new_sk, SOCK_DEAD);
++ smc_sock_set_flag(new_sk, SOCK_DEAD);
+ sock_put(new_sk); /* final */
+ *new_smc = NULL;
+ goto out;
+diff --git a/net/smc/smc.h b/net/smc/smc.h
+index 24745fde4ac26..e377980b84145 100644
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -377,4 +377,9 @@ int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
+ int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+ int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+
++static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag)
++{
++ set_bit(flag, &sk->sk_flags);
++}
++
+ #endif /* __SMC_H */
+diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
+index 89105e95b4523..3c06625ceb200 100644
+--- a/net/smc/smc_cdc.c
++++ b/net/smc/smc_cdc.c
+@@ -28,13 +28,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
+ {
+ struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
+ struct smc_connection *conn = cdcpend->conn;
++ struct smc_buf_desc *sndbuf_desc;
+ struct smc_sock *smc;
+ int diff;
+
++ sndbuf_desc = conn->sndbuf_desc;
+ smc = container_of(conn, struct smc_sock, conn);
+ bh_lock_sock(&smc->sk);
+- if (!wc_status) {
+- diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
++ if (!wc_status && sndbuf_desc) {
++ diff = smc_curs_diff(sndbuf_desc->len,
+ &cdcpend->conn->tx_curs_fin,
+ &cdcpend->cursor);
+ /* sndbuf_space is decreased in smc_sendmsg */
+@@ -114,9 +116,6 @@ int smc_cdc_msg_send(struct smc_connection *conn,
+ union smc_host_cursor cfed;
+ int rc;
+
+- if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
+- return -ENOBUFS;
+-
+ smc_cdc_add_pending_send(conn, pend);
+
+ conn->tx_cdc_seq++;
+@@ -385,7 +384,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
+ smc->sk.sk_shutdown |= RCV_SHUTDOWN;
+ if (smc->clcsock && smc->clcsock->sk)
+ smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
+- sock_set_flag(&smc->sk, SOCK_DONE);
++ smc_sock_set_flag(&smc->sk, SOCK_DONE);
+ sock_hold(&smc->sk); /* sock_put in close_work */
+ if (!queue_work(smc_close_wq, &conn->close_work))
+ sock_put(&smc->sk);
+diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
+index dbdf03e8aa5b5..10219f55aad14 100644
+--- a/net/smc/smc_close.c
++++ b/net/smc/smc_close.c
+@@ -116,7 +116,8 @@ static void smc_close_cancel_work(struct smc_sock *smc)
+ struct sock *sk = &smc->sk;
+
+ release_sock(sk);
+- cancel_work_sync(&smc->conn.close_work);
++ if (cancel_work_sync(&smc->conn.close_work))
++ sock_put(sk);
+ cancel_delayed_work_sync(&smc->conn.tx_work);
+ lock_sock(sk);
+ }
+@@ -173,7 +174,7 @@ void smc_close_active_abort(struct smc_sock *smc)
+ break;
+ }
+
+- sock_set_flag(sk, SOCK_DEAD);
++ smc_sock_set_flag(sk, SOCK_DEAD);
+ sk->sk_state_change(sk);
+
+ if (release_clcsock) {
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 9c210273d06b7..339dfc5b92246 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -111,7 +111,8 @@ static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
+
+ pipefs_sb = rpc_get_sb_net(net);
+ if (pipefs_sb) {
+- __rpc_clnt_remove_pipedir(clnt);
++ if (pipefs_sb == clnt->pipefs_sb)
++ __rpc_clnt_remove_pipedir(clnt);
+ rpc_put_sb_net(net);
+ }
+ }
+@@ -151,6 +152,8 @@ rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
+ {
+ struct dentry *dentry;
+
++ clnt->pipefs_sb = pipefs_sb;
++
+ if (clnt->cl_program->pipe_dir_name != NULL) {
+ dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
+ if (IS_ERR(dentry))
+@@ -2171,6 +2174,7 @@ call_connect_status(struct rpc_task *task)
+ task->tk_status = 0;
+ switch (status) {
+ case -ECONNREFUSED:
++ case -ECONNRESET:
+ /* A positive refusal suggests a rebind is needed. */
+ if (RPC_IS_SOFTCONN(task))
+ break;
+@@ -2179,7 +2183,6 @@ call_connect_status(struct rpc_task *task)
+ goto out_retry;
+ }
+ fallthrough;
+- case -ECONNRESET:
+ case -ECONNABORTED:
+ case -ENETDOWN:
+ case -ENETUNREACH:
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 5988a5c5ff3f0..102c3818bc54d 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -769,6 +769,10 @@ void rpcb_getport_async(struct rpc_task *task)
+
+ child = rpcb_call_async(rpcb_clnt, map, proc);
+ rpc_release_client(rpcb_clnt);
++ if (IS_ERR(child)) {
++ /* rpcb_map_release() has freed the arguments */
++ return;
++ }
+
+ xprt->stat.bind_count++;
+ rpc_put_task(child);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 85c8bcaebb80f..3b05f90a3e50d 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -852,7 +852,8 @@ out_readfail:
+ if (ret == -EINVAL)
+ svc_rdma_send_error(rdma_xprt, ctxt, ret);
+ svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
+- return ret;
++ svc_xprt_deferred_close(xprt);
++ return -ENOTCONN;
+
+ out_backchannel:
+ svc_rdma_handle_bc_reply(rqstp, ctxt);
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index e33b4f29f77cf..d0143823658d5 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1446,7 +1446,7 @@ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
+ p = (struct tipc_gap_ack_blks *)msg_data(hdr);
+ sz = ntohs(p->len);
+ /* Sanity check */
+- if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
++ if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
+ /* Good, check if the desired type exists */
+ if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
+ goto ok;
+@@ -1533,7 +1533,7 @@ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
+ __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
+
+ /* Total len */
+- len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
++ len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
+ ga->len = htons(len);
+ return len;
+ }
+diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
+index e8fd257c0e688..1a9a5bdaccf4f 100644
+--- a/net/tipc/netlink.c
++++ b/net/tipc/netlink.c
+@@ -88,7 +88,7 @@ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+
+ const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+ [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
+- [TIPC_NLA_LINK_NAME] = { .type = NLA_STRING,
++ [TIPC_NLA_LINK_NAME] = { .type = NLA_NUL_STRING,
+ .len = TIPC_MAX_LINK_NAME },
+ [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
+@@ -125,7 +125,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
+
+ const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
+ [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
+- [TIPC_NLA_BEARER_NAME] = { .type = NLA_STRING,
++ [TIPC_NLA_BEARER_NAME] = { .type = NLA_NUL_STRING,
+ .len = TIPC_MAX_BEARER_NAME },
+ [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
+ [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 5bc076f2fa74a..c763008a8adba 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -102,6 +102,7 @@ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
+ return -EMSGSIZE;
+
+ skb_put(skb, TLV_SPACE(len));
++ memset(tlv, 0, TLV_SPACE(len));
+ tlv->tlv_type = htons(type);
+ tlv->tlv_len = htons(TLV_LENGTH(len));
+ if (len && data)
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index e9d1e83a859d1..779815b885e94 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1232,11 +1232,14 @@ void tls_sw_splice_eof(struct socket *sock)
+ lock_sock(sk);
+
+ retry:
++ /* same checks as in tls_sw_push_pending_record() */
+ rec = ctx->open_rec;
+ if (!rec)
+ goto unlock;
+
+ msg_pl = &rec->msg_plaintext;
++ if (msg_pl->sg.size == 0)
++ goto unlock;
+
+ /* Check the BPF advisor and perform transmission. */
+ ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
+@@ -1491,7 +1494,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ */
+ aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
+ aead_size = ALIGN(aead_size, __alignof__(*dctx));
+- mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
++ mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
+ sk->sk_allocation);
+ if (!mem) {
+ err = -ENOMEM;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 3e8a04a136688..1e1a88bd4e688 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -212,8 +212,6 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
+ }
+ #endif /* CONFIG_SECURITY_NETWORK */
+
+-#define unix_peer(sk) (unix_sk(sk)->peer)
+-
+ static inline int unix_our_peer(struct sock *sk, struct sock *osk)
+ {
+ return unix_peer(osk) == sk;
+@@ -2553,15 +2551,16 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+
+ if (!(state->flags & MSG_PEEK))
+ WRITE_ONCE(u->oob_skb, NULL);
+-
++ else
++ skb_get(oob_skb);
+ unix_state_unlock(sk);
+
+ chunk = state->recv_actor(oob_skb, 0, chunk, state);
+
+- if (!(state->flags & MSG_PEEK)) {
++ if (!(state->flags & MSG_PEEK))
+ UNIXCB(oob_skb).consumed += 1;
+- kfree_skb(oob_skb);
+- }
++
++ consume_skb(oob_skb);
+
+ mutex_unlock(&u->iolock);
+
+diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
+index 2f9d8271c6ec7..7ea7c3a0d0d06 100644
+--- a/net/unix/unix_bpf.c
++++ b/net/unix/unix_bpf.c
+@@ -159,12 +159,17 @@ int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool re
+
+ int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
+ {
++ struct sock *sk_pair;
++
+ if (restore) {
+ sk->sk_write_space = psock->saved_write_space;
+ sock_replace_proto(sk, psock->sk_proto);
+ return 0;
+ }
+
++ sk_pair = unix_peer(sk);
++ sock_hold(sk_pair);
++ psock->sk_pair = sk_pair;
+ unix_stream_bpf_check_needs_rebuild(psock->sk_proto);
+ sock_replace_proto(sk, &unix_stream_bpf_prot);
+ return 0;
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 020cf17ab7e47..ccd8cefeea7ba 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -89,6 +89,7 @@
+ #include <linux/types.h>
+ #include <linux/bitops.h>
+ #include <linux/cred.h>
++#include <linux/errqueue.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -110,6 +111,7 @@
+ #include <linux/workqueue.h>
+ #include <net/sock.h>
+ #include <net/af_vsock.h>
++#include <uapi/linux/vm_sockets.h>
+
+ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
+ static void vsock_sk_destruct(struct sock *sk);
+@@ -2134,6 +2136,10 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int err;
+
+ sk = sock->sk;
++
++ if (unlikely(flags & MSG_ERRQUEUE))
++ return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR);
++
+ vsk = vsock_sk(sk);
+ err = 0;
+
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 352d042b130b5..8bc272b6003bb 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -68,6 +68,8 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
+ hdr->dst_port = cpu_to_le32(dst_port);
+ hdr->flags = cpu_to_le32(info->flags);
+ hdr->len = cpu_to_le32(len);
++ hdr->buf_alloc = cpu_to_le32(0);
++ hdr->fwd_cnt = cpu_to_le32(0);
+
+ if (info->msg && len > 0) {
+ payload = skb_put(skb, len);
+@@ -1204,11 +1206,17 @@ virtio_transport_recv_connected(struct sock *sk,
+ vsk->peer_shutdown |= RCV_SHUTDOWN;
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
+ vsk->peer_shutdown |= SEND_SHUTDOWN;
+- if (vsk->peer_shutdown == SHUTDOWN_MASK &&
+- vsock_stream_has_data(vsk) <= 0 &&
+- !sock_flag(sk, SOCK_DONE)) {
+- (void)virtio_transport_reset(vsk, NULL);
+- virtio_transport_do_close(vsk, true);
++ if (vsk->peer_shutdown == SHUTDOWN_MASK) {
++ if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) {
++ (void)virtio_transport_reset(vsk, NULL);
++ virtio_transport_do_close(vsk, true);
++ }
++ /* Remove this socket anyway because the remote peer sent
++ * the shutdown. This way a new connection will succeed
++ * if the remote peer uses the same source port,
++ * even if the old socket is still unreleased, but now disconnected.
++ */
++ vsock_remove_sock(vsk);
+ }
+ if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
+ sk->sk_state_change(sk);
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index acec41c1809a8..563cfbe3237c9 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1049,7 +1049,8 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy)
+ }
+ EXPORT_SYMBOL(wiphy_rfkill_start_polling);
+
+-void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
++ struct wiphy_work *end)
+ {
+ unsigned int runaway_limit = 100;
+ unsigned long flags;
+@@ -1068,6 +1069,10 @@ void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
+ wk->func(&rdev->wiphy, wk);
+
+ spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++
++ if (wk == end)
++ break;
++
+ if (WARN_ON(--runaway_limit == 0))
+ INIT_LIST_HEAD(&rdev->wiphy_work_list);
+ }
+@@ -1118,7 +1123,7 @@ void wiphy_unregister(struct wiphy *wiphy)
+ #endif
+
+ /* surely nothing is reachable now, clean up work */
+- cfg80211_process_wiphy_works(rdev);
++ cfg80211_process_wiphy_works(rdev, NULL);
+ wiphy_unlock(&rdev->wiphy);
+ rtnl_unlock();
+
+@@ -1640,6 +1645,21 @@ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
+ }
+ EXPORT_SYMBOL_GPL(wiphy_work_cancel);
+
++void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work)
++{
++ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
++ unsigned long flags;
++ bool run;
++
++ spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++ run = !work || !list_empty(&work->entry);
++ spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
++
++ if (run)
++ cfg80211_process_wiphy_works(rdev, work);
++}
++EXPORT_SYMBOL_GPL(wiphy_work_flush);
++
+ void wiphy_delayed_work_timer(struct timer_list *t)
+ {
+ struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer);
+@@ -1672,6 +1692,16 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel);
+
++void wiphy_delayed_work_flush(struct wiphy *wiphy,
++ struct wiphy_delayed_work *dwork)
++{
++ lockdep_assert_held(&wiphy->mtx);
++
++ del_timer_sync(&dwork->timer);
++ wiphy_work_flush(wiphy, &dwork->work);
++}
++EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
++
+ static int __init cfg80211_init(void)
+ {
+ int err;
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index ba9c7170afa44..e536c0b615a09 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -464,7 +464,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, enum nl80211_iftype ntype,
+ struct vif_params *params);
+ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+-void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev);
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
++ struct wiphy_work *end);
+ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
+
+ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 8210a6090ac16..e4cc6209c7b9b 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -2358,8 +2358,8 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
+
+ /* elem might be invalid after the memmove */
+ next = (void *)(elem->data + elem->datalen);
+-
+ elem_datalen = elem->datalen;
++
+ if (elem->id == WLAN_EID_EXTENSION) {
+ copied = elem->datalen - 1;
+ if (copied > data_len)
+@@ -2380,7 +2380,7 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
+
+ for (elem = next;
+ elem->data < ies + ieslen &&
+- elem->data + elem->datalen < ies + ieslen;
++ elem->data + elem->datalen <= ies + ieslen;
+ elem = next) {
+ /* elem might be invalid after the memmove */
+ next = (void *)(elem->data + elem->datalen);
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index c629bac3f2983..565511a3f461e 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -105,14 +105,14 @@ static int wiphy_suspend(struct device *dev)
+ cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
+ }
+- cfg80211_process_wiphy_works(rdev);
++ cfg80211_process_wiphy_works(rdev, NULL);
+ if (rdev->ops->suspend)
+ ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
+ if (ret == 1) {
+ /* Driver refuse to configure wowlan */
+ cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
+- cfg80211_process_wiphy_works(rdev);
++ cfg80211_process_wiphy_works(rdev, NULL);
+ ret = rdev_suspend(rdev, NULL);
+ }
+ if (ret == 0)
+diff --git a/samples/bpf/syscall_tp_user.c b/samples/bpf/syscall_tp_user.c
+index 7a788bb837fc1..7a09ac74fac07 100644
+--- a/samples/bpf/syscall_tp_user.c
++++ b/samples/bpf/syscall_tp_user.c
+@@ -17,9 +17,9 @@
+
+ static void usage(const char *cmd)
+ {
+- printf("USAGE: %s [-i num_progs] [-h]\n", cmd);
+- printf(" -i num_progs # number of progs of the test\n");
+- printf(" -h # help\n");
++ printf("USAGE: %s [-i nr_tests] [-h]\n", cmd);
++ printf(" -i nr_tests # rounds of test to run\n");
++ printf(" -h # help\n");
+ }
+
+ static void verify_map(int map_id)
+@@ -45,14 +45,14 @@ static void verify_map(int map_id)
+ }
+ }
+
+-static int test(char *filename, int num_progs)
++static int test(char *filename, int nr_tests)
+ {
+- int map0_fds[num_progs], map1_fds[num_progs], fd, i, j = 0;
+- struct bpf_link *links[num_progs * 4];
+- struct bpf_object *objs[num_progs];
++ int map0_fds[nr_tests], map1_fds[nr_tests], fd, i, j = 0;
++ struct bpf_link **links = NULL;
++ struct bpf_object *objs[nr_tests];
+ struct bpf_program *prog;
+
+- for (i = 0; i < num_progs; i++) {
++ for (i = 0; i < nr_tests; i++) {
+ objs[i] = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(objs[i])) {
+ fprintf(stderr, "opening BPF object file failed\n");
+@@ -60,6 +60,19 @@ static int test(char *filename, int num_progs)
+ goto cleanup;
+ }
+
++ /* One-time initialization */
++ if (!links) {
++ int nr_progs = 0;
++
++ bpf_object__for_each_program(prog, objs[i])
++ nr_progs += 1;
++
++ links = calloc(nr_progs * nr_tests, sizeof(struct bpf_link *));
++
++ if (!links)
++ goto cleanup;
++ }
++
+ /* load BPF program */
+ if (bpf_object__load(objs[i])) {
+ fprintf(stderr, "loading BPF object file failed\n");
+@@ -101,14 +114,18 @@ static int test(char *filename, int num_progs)
+ close(fd);
+
+ /* verify the map */
+- for (i = 0; i < num_progs; i++) {
++ for (i = 0; i < nr_tests; i++) {
+ verify_map(map0_fds[i]);
+ verify_map(map1_fds[i]);
+ }
+
+ cleanup:
+- for (j--; j >= 0; j--)
+- bpf_link__destroy(links[j]);
++ if (links) {
++ for (j--; j >= 0; j--)
++ bpf_link__destroy(links[j]);
++
++ free(links);
++ }
+
+ for (i--; i >= 0; i--)
+ bpf_object__close(objs[i]);
+@@ -117,13 +134,13 @@ cleanup:
+
+ int main(int argc, char **argv)
+ {
+- int opt, num_progs = 1;
++ int opt, nr_tests = 1;
+ char filename[256];
+
+ while ((opt = getopt(argc, argv, "i:h")) != -1) {
+ switch (opt) {
+ case 'i':
+- num_progs = atoi(optarg);
++ nr_tests = atoi(optarg);
+ break;
+ case 'h':
+ default:
+@@ -134,5 +151,5 @@ int main(int argc, char **argv)
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+- return test(filename, num_progs);
++ return test(filename, nr_tests);
+ }
+diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
+index 3cd6ca15f390d..c9f3e03124d7f 100644
+--- a/scripts/Makefile.vmlinux
++++ b/scripts/Makefile.vmlinux
+@@ -19,6 +19,7 @@ quiet_cmd_cc_o_c = CC $@
+
+ ifdef CONFIG_MODULES
+ KASAN_SANITIZE_.vmlinux.export.o := n
++KCSAN_SANITIZE_.vmlinux.export.o := n
+ GCOV_PROFILE_.vmlinux.export.o := n
+ targets += .vmlinux.export.o
+ vmlinux: .vmlinux.export.o
+diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o
+index 0edfdb40364b8..25b3b587d37c0 100644
+--- a/scripts/Makefile.vmlinux_o
++++ b/scripts/Makefile.vmlinux_o
+@@ -37,7 +37,8 @@ objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
+
+ vmlinux-objtool-args-$(delay-objtool) += $(objtool-args-y)
+ vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable
+-vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr $(if $(CONFIG_CPU_UNRET_ENTRY), --unret)
++vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \
++ $(if $(or $(CONFIG_CPU_UNRET_ENTRY),$(CONFIG_CPU_SRSO)), --unret)
+
+ objtool-args = $(vmlinux-objtool-args-y) --link
+
+diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
+index 951b74ba1b242..910bd21d08f48 100644
+--- a/scripts/gcc-plugins/randomize_layout_plugin.c
++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
+@@ -191,12 +191,14 @@ static void partition_struct(tree *fields, unsigned long length, struct partitio
+
+ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
+ {
+- unsigned long i, x;
++ unsigned long i, x, index;
+ struct partition_group size_group[length];
+ unsigned long num_groups = 0;
+ unsigned long randnum;
+
+ partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
++
++ /* FIXME: this group shuffle is currently a no-op. */
+ for (i = num_groups - 1; i > 0; i--) {
+ struct partition_group tmp;
+ randnum = ranval(prng_state) % (i + 1);
+@@ -206,11 +208,14 @@ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prn
+ }
+
+ for (x = 0; x < num_groups; x++) {
+- for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
++ for (index = size_group[x].length - 1; index > 0; index--) {
+ tree tmp;
++
++ i = size_group[x].start + index;
+ if (DECL_BIT_FIELD_TYPE(newtree[i]))
+ continue;
+- randnum = ranval(prng_state) % (i + 1);
++ randnum = ranval(prng_state) % (index + 1);
++ randnum += size_group[x].start;
+ // we could handle this case differently if desired
+ if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
+ continue;
+@@ -273,8 +278,6 @@ static bool is_flexible_array(const_tree field)
+ {
+ const_tree fieldtype;
+ const_tree typesize;
+- const_tree elemtype;
+- const_tree elemsize;
+
+ fieldtype = TREE_TYPE(field);
+ typesize = TYPE_SIZE(fieldtype);
+@@ -282,20 +285,12 @@ static bool is_flexible_array(const_tree field)
+ if (TREE_CODE(fieldtype) != ARRAY_TYPE)
+ return false;
+
+- elemtype = TREE_TYPE(fieldtype);
+- elemsize = TYPE_SIZE(elemtype);
+-
+ /* size of type is represented in bits */
+
+ if (typesize == NULL_TREE && TYPE_DOMAIN(fieldtype) != NULL_TREE &&
+ TYPE_MAX_VALUE(TYPE_DOMAIN(fieldtype)) == NULL_TREE)
+ return true;
+
+- if (typesize != NULL_TREE &&
+- (TREE_CONSTANT(typesize) && (!tree_to_uhwi(typesize) ||
+- tree_to_uhwi(typesize) == tree_to_uhwi(elemsize))))
+- return true;
+-
+ return false;
+ }
+
+diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
+index e3517d4ab8ec9..e810e0c27ff18 100644
+--- a/scripts/gdb/linux/constants.py.in
++++ b/scripts/gdb/linux/constants.py.in
+@@ -66,10 +66,11 @@ LX_GDBPARSED(IRQD_LEVEL)
+ LX_GDBPARSED(IRQ_HIDDEN)
+
+ /* linux/module.h */
+-LX_GDBPARSED(MOD_TEXT)
+-LX_GDBPARSED(MOD_DATA)
+-LX_GDBPARSED(MOD_RODATA)
+-LX_GDBPARSED(MOD_RO_AFTER_INIT)
++if IS_BUILTIN(CONFIG_MODULES):
++ LX_GDBPARSED(MOD_TEXT)
++ LX_GDBPARSED(MOD_DATA)
++ LX_GDBPARSED(MOD_RODATA)
++ LX_GDBPARSED(MOD_RO_AFTER_INIT)
+
+ /* linux/mount.h */
+ LX_VALUE(MNT_NOSUID)
+@@ -157,3 +158,4 @@ LX_CONFIG(CONFIG_STACKDEPOT)
+ LX_CONFIG(CONFIG_PAGE_OWNER)
+ LX_CONFIG(CONFIG_SLUB_DEBUG)
+ LX_CONFIG(CONFIG_SLAB_FREELIST_HARDENED)
++LX_CONFIG(CONFIG_MMU)
+diff --git a/scripts/gdb/linux/vmalloc.py b/scripts/gdb/linux/vmalloc.py
+index 48e4a4fae7bbf..d3c8a0274d1ed 100644
+--- a/scripts/gdb/linux/vmalloc.py
++++ b/scripts/gdb/linux/vmalloc.py
+@@ -10,8 +10,9 @@ import gdb
+ import re
+ from linux import lists, utils, stackdepot, constants, mm
+
+-vmap_area_type = utils.CachedType('struct vmap_area')
+-vmap_area_ptr_type = vmap_area_type.get_type().pointer()
++if constants.LX_CONFIG_MMU:
++ vmap_area_type = utils.CachedType('struct vmap_area')
++ vmap_area_ptr_type = vmap_area_type.get_type().pointer()
+
+ def is_vmalloc_addr(x):
+ pg_ops = mm.page_ops().ops
+@@ -25,6 +26,9 @@ class LxVmallocInfo(gdb.Command):
+ super(LxVmallocInfo, self).__init__("lx-vmallocinfo", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
++ if not constants.LX_CONFIG_MMU:
++ raise gdb.GdbError("Requires MMU support")
++
+ vmap_area_list = gdb.parse_and_eval('vmap_area_list')
+ for vmap_area in lists.list_for_each_entry(vmap_area_list, vmap_area_ptr_type, "list"):
+ if not vmap_area['vm']:
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 7056751c29b1f..6583b36dbe694 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1348,13 +1348,13 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
+ /* Looks like: tee:uuid */
+ static int do_tee_entry(const char *filename, void *symval, char *alias)
+ {
+- DEF_FIELD(symval, tee_client_device_id, uuid);
++ DEF_FIELD_ADDR(symval, tee_client_device_id, uuid);
+
+ sprintf(alias, "tee:%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+- uuid.b[0], uuid.b[1], uuid.b[2], uuid.b[3], uuid.b[4],
+- uuid.b[5], uuid.b[6], uuid.b[7], uuid.b[8], uuid.b[9],
+- uuid.b[10], uuid.b[11], uuid.b[12], uuid.b[13], uuid.b[14],
+- uuid.b[15]);
++ uuid->b[0], uuid->b[1], uuid->b[2], uuid->b[3], uuid->b[4],
++ uuid->b[5], uuid->b[6], uuid->b[7], uuid->b[8], uuid->b[9],
++ uuid->b[10], uuid->b[11], uuid->b[12], uuid->b[13], uuid->b[14],
++ uuid->b[15]);
+
+ add_wildcard(alias);
+ return 1;
+@@ -1401,10 +1401,10 @@ static int do_mhi_ep_entry(const char *filename, void *symval, char *alias)
+ /* Looks like: ishtp:{guid} */
+ static int do_ishtp_entry(const char *filename, void *symval, char *alias)
+ {
+- DEF_FIELD(symval, ishtp_device_id, guid);
++ DEF_FIELD_ADDR(symval, ishtp_device_id, guid);
+
+ strcpy(alias, ISHTP_MODULE_PREFIX "{");
+- add_guid(alias, guid);
++ add_guid(alias, *guid);
+ strcat(alias, "}");
+
+ return 1;
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index bd6a910f65282..261cef4c622fb 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -423,7 +423,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
+ /* high level check about policy management - fine grained in
+ * below after unpack
+ */
+- error = aa_may_manage_policy(label, ns, mask);
++ error = aa_may_manage_policy(current_cred(), label, ns, mask);
+ if (error)
+ goto end_section;
+
+@@ -486,7 +486,8 @@ static ssize_t profile_remove(struct file *f, const char __user *buf,
+ /* high level check about policy management - fine grained in
+ * below after unpack
+ */
+- error = aa_may_manage_policy(label, ns, AA_MAY_REMOVE_POLICY);
++ error = aa_may_manage_policy(current_cred(), label, ns,
++ AA_MAY_REMOVE_POLICY);
+ if (error)
+ goto out;
+
+@@ -1805,7 +1806,8 @@ static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
+ int error;
+
+ label = begin_current_label_crit_section();
+- error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
++ error = aa_may_manage_policy(current_cred(), label, NULL,
++ AA_MAY_LOAD_POLICY);
+ end_current_label_crit_section(label);
+ if (error)
+ return error;
+@@ -1854,7 +1856,8 @@ static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
+ int error;
+
+ label = begin_current_label_crit_section();
+- error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
++ error = aa_may_manage_policy(current_cred(), label, NULL,
++ AA_MAY_LOAD_POLICY);
+ end_current_label_crit_section(label);
+ if (error)
+ return error;
+diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
+index 5a7978aa4b19e..6933cb2f679b0 100644
+--- a/security/apparmor/audit.c
++++ b/security/apparmor/audit.c
+@@ -85,37 +85,36 @@ static const char *const aa_class_names[] = {
+ /**
+ * audit_pre() - core AppArmor function.
+ * @ab: audit buffer to fill (NOT NULL)
+- * @ca: audit structure containing data to audit (NOT NULL)
++ * @va: audit structure containing data to audit (NOT NULL)
+ *
+- * Record common AppArmor audit data from @sa
++ * Record common AppArmor audit data from @va
+ */
+-static void audit_pre(struct audit_buffer *ab, void *ca)
++static void audit_pre(struct audit_buffer *ab, void *va)
+ {
+- struct common_audit_data *sa = ca;
++ struct apparmor_audit_data *ad = aad_of_va(va);
+
+ if (aa_g_audit_header) {
+ audit_log_format(ab, "apparmor=\"%s\"",
+- aa_audit_type[aad(sa)->type]);
++ aa_audit_type[ad->type]);
+ }
+
+- if (aad(sa)->op) {
+- audit_log_format(ab, " operation=\"%s\"", aad(sa)->op);
+- }
++ if (ad->op)
++ audit_log_format(ab, " operation=\"%s\"", ad->op);
+
+- if (aad(sa)->class)
++ if (ad->class)
+ audit_log_format(ab, " class=\"%s\"",
+- aad(sa)->class <= AA_CLASS_LAST ?
+- aa_class_names[aad(sa)->class] :
++ ad->class <= AA_CLASS_LAST ?
++ aa_class_names[ad->class] :
+ "unknown");
+
+- if (aad(sa)->info) {
+- audit_log_format(ab, " info=\"%s\"", aad(sa)->info);
+- if (aad(sa)->error)
+- audit_log_format(ab, " error=%d", aad(sa)->error);
++ if (ad->info) {
++ audit_log_format(ab, " info=\"%s\"", ad->info);
++ if (ad->error)
++ audit_log_format(ab, " error=%d", ad->error);
+ }
+
+- if (aad(sa)->label) {
+- struct aa_label *label = aad(sa)->label;
++ if (ad->subj_label) {
++ struct aa_label *label = ad->subj_label;
+
+ if (label_isprofile(label)) {
+ struct aa_profile *profile = labels_profile(label);
+@@ -134,42 +133,44 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
+ }
+ }
+
+- if (aad(sa)->name) {
++ if (ad->name) {
+ audit_log_format(ab, " name=");
+- audit_log_untrustedstring(ab, aad(sa)->name);
++ audit_log_untrustedstring(ab, ad->name);
+ }
+ }
+
+ /**
+ * aa_audit_msg - Log a message to the audit subsystem
+- * @sa: audit event structure (NOT NULL)
++ * @type: audit type for the message
++ * @ad: audit event structure (NOT NULL)
+ * @cb: optional callback fn for type specific fields (MAYBE NULL)
+ */
+-void aa_audit_msg(int type, struct common_audit_data *sa,
++void aa_audit_msg(int type, struct apparmor_audit_data *ad,
+ void (*cb) (struct audit_buffer *, void *))
+ {
+- aad(sa)->type = type;
+- common_lsm_audit(sa, audit_pre, cb);
++ ad->type = type;
++ common_lsm_audit(&ad->common, audit_pre, cb);
+ }
+
+ /**
+ * aa_audit - Log a profile based audit event to the audit subsystem
+ * @type: audit type for the message
+ * @profile: profile to check against (NOT NULL)
+- * @sa: audit event (NOT NULL)
++ * @ad: audit event (NOT NULL)
+ * @cb: optional callback fn for type specific fields (MAYBE NULL)
+ *
+ * Handle default message switching based off of audit mode flags
+ *
+ * Returns: error on failure
+ */
+-int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
++int aa_audit(int type, struct aa_profile *profile,
++ struct apparmor_audit_data *ad,
+ void (*cb) (struct audit_buffer *, void *))
+ {
+ AA_BUG(!profile);
+
+ if (type == AUDIT_APPARMOR_AUTO) {
+- if (likely(!aad(sa)->error)) {
++ if (likely(!ad->error)) {
+ if (AUDIT_MODE(profile) != AUDIT_ALL)
+ return 0;
+ type = AUDIT_APPARMOR_AUDIT;
+@@ -181,24 +182,24 @@ int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
+ if (AUDIT_MODE(profile) == AUDIT_QUIET ||
+ (type == AUDIT_APPARMOR_DENIED &&
+ AUDIT_MODE(profile) == AUDIT_QUIET_DENIED))
+- return aad(sa)->error;
++ return ad->error;
+
+ if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
+ type = AUDIT_APPARMOR_KILL;
+
+- aad(sa)->label = &profile->label;
++ ad->subj_label = &profile->label;
+
+- aa_audit_msg(type, sa, cb);
++ aa_audit_msg(type, ad, cb);
+
+- if (aad(sa)->type == AUDIT_APPARMOR_KILL)
++ if (ad->type == AUDIT_APPARMOR_KILL)
+ (void)send_sig_info(SIGKILL, NULL,
+- sa->type == LSM_AUDIT_DATA_TASK && sa->u.tsk ?
+- sa->u.tsk : current);
++ ad->common.type == LSM_AUDIT_DATA_TASK &&
++ ad->common.u.tsk ? ad->common.u.tsk : current);
+
+- if (aad(sa)->type == AUDIT_APPARMOR_ALLOWED)
+- return complain_error(aad(sa)->error);
++ if (ad->type == AUDIT_APPARMOR_ALLOWED)
++ return complain_error(ad->error);
+
+- return aad(sa)->error;
++ return ad->error;
+ }
+
+ struct aa_audit_rule {
+diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
+index 326a51838ef28..2fb6a2ea0b998 100644
+--- a/security/apparmor/capability.c
++++ b/security/apparmor/capability.c
+@@ -51,7 +51,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
+
+ /**
+ * audit_caps - audit a capability
+- * @sa: audit data
++ * @as: audit data
+ * @profile: profile being tested for confinement (NOT NULL)
+ * @cap: capability tested
+ * @error: error code returned by test
+@@ -59,9 +59,9 @@ static void audit_cb(struct audit_buffer *ab, void *va)
+ * Do auditing of capability and handle, audit/complain/kill modes switching
+ * and duplicate message elimination.
+ *
+- * Returns: 0 or sa->error on success, error code on failure
++ * Returns: 0 or ad->error on success, error code on failure
+ */
+-static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
++static int audit_caps(struct apparmor_audit_data *ad, struct aa_profile *profile,
+ int cap, int error)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+@@ -69,7 +69,7 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
+ struct audit_cache *ent;
+ int type = AUDIT_APPARMOR_AUTO;
+
+- aad(sa)->error = error;
++ ad->error = error;
+
+ if (likely(!error)) {
+ /* test if auditing is being forced */
+@@ -101,7 +101,7 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
+ }
+ put_cpu_var(audit_cache);
+
+- return aa_audit(type, profile, sa, audit_cb);
++ return aa_audit(type, profile, ad, audit_cb);
+ }
+
+ /**
+@@ -109,12 +109,12 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
+ * @profile: profile being enforced (NOT NULL, NOT unconfined)
+ * @cap: capability to test if allowed
+ * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
+- * @sa: audit data (MAY BE NULL indicating no auditing)
++ * @ad: audit data (MAY BE NULL indicating no auditing)
+ *
+ * Returns: 0 if allowed else -EPERM
+ */
+ static int profile_capable(struct aa_profile *profile, int cap,
+- unsigned int opts, struct common_audit_data *sa)
++ unsigned int opts, struct apparmor_audit_data *ad)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules), list);
+@@ -132,14 +132,15 @@ static int profile_capable(struct aa_profile *profile, int cap,
+ /* audit the cap request in complain mode but note that it
+ * should be optional.
+ */
+- aad(sa)->info = "optional: no audit";
++ ad->info = "optional: no audit";
+ }
+
+- return audit_caps(sa, profile, cap, error);
++ return audit_caps(ad, profile, cap, error);
+ }
+
+ /**
+ * aa_capable - test permission to use capability
++ * @subj_cread: cred we are testing capability against
+ * @label: label being tested for capability (NOT NULL)
+ * @cap: capability to be tested
+ * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
+@@ -148,15 +149,17 @@ static int profile_capable(struct aa_profile *profile, int cap,
+ *
+ * Returns: 0 on success, or else an error code.
+ */
+-int aa_capable(struct aa_label *label, int cap, unsigned int opts)
++int aa_capable(const struct cred *subj_cred, struct aa_label *label,
++ int cap, unsigned int opts)
+ {
+ struct aa_profile *profile;
+ int error = 0;
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_CAP, AA_CLASS_CAP, OP_CAPABLE);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_CAP, AA_CLASS_CAP, OP_CAPABLE);
+
+- sa.u.cap = cap;
++ ad.subj_cred = subj_cred;
++ ad.common.u.cap = cap;
+ error = fn_for_each_confined(label, profile,
+- profile_capable(profile, cap, opts, &sa));
++ profile_capable(profile, cap, opts, &ad));
+
+ return error;
+ }
+diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
+index f3715cda59c52..543105cf7e334 100644
+--- a/security/apparmor/domain.c
++++ b/security/apparmor/domain.c
+@@ -31,6 +31,7 @@
+
+ /**
+ * may_change_ptraced_domain - check if can change profile on ptraced task
++ * @cred: cred of task changing domain
+ * @to_label: profile to change to (NOT NULL)
+ * @info: message if there is an error
+ *
+@@ -39,28 +40,34 @@
+ *
+ * Returns: %0 or error if change not allowed
+ */
+-static int may_change_ptraced_domain(struct aa_label *to_label,
++static int may_change_ptraced_domain(const struct cred *to_cred,
++ struct aa_label *to_label,
+ const char **info)
+ {
+ struct task_struct *tracer;
+ struct aa_label *tracerl = NULL;
++ const struct cred *tracer_cred = NULL;
++
+ int error = 0;
+
+ rcu_read_lock();
+ tracer = ptrace_parent(current);
+- if (tracer)
++ if (tracer) {
+ /* released below */
+ tracerl = aa_get_task_label(tracer);
+-
++ tracer_cred = get_task_cred(tracer);
++ }
+ /* not ptraced */
+ if (!tracer || unconfined(tracerl))
+ goto out;
+
+- error = aa_may_ptrace(tracerl, to_label, PTRACE_MODE_ATTACH);
++ error = aa_may_ptrace(tracer_cred, tracerl, to_cred, to_label,
++ PTRACE_MODE_ATTACH);
+
+ out:
+ rcu_read_unlock();
+ aa_put_label(tracerl);
++ put_cred(tracer_cred);
+
+ if (error)
+ *info = "ptrace prevents transition";
+@@ -619,7 +626,8 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
+ return new;
+ }
+
+-static struct aa_label *profile_transition(struct aa_profile *profile,
++static struct aa_label *profile_transition(const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const struct linux_binprm *bprm,
+ char *buffer, struct path_cond *cond,
+ bool *secure_exec)
+@@ -709,7 +717,8 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
+ }
+
+ audit:
+- aa_audit_file(profile, &perms, OP_EXEC, MAY_EXEC, name, target, new,
++ aa_audit_file(subj_cred, profile, &perms, OP_EXEC, MAY_EXEC, name,
++ target, new,
+ cond->uid, info, error);
+ if (!new || nonewprivs) {
+ aa_put_label(new);
+@@ -719,7 +728,8 @@ audit:
+ return new;
+ }
+
+-static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
++static int profile_onexec(const struct cred *subj_cred,
++ struct aa_profile *profile, struct aa_label *onexec,
+ bool stack, const struct linux_binprm *bprm,
+ char *buffer, struct path_cond *cond,
+ bool *secure_exec)
+@@ -787,13 +797,15 @@ static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
+ }
+
+ audit:
+- return aa_audit_file(profile, &perms, OP_EXEC, AA_MAY_ONEXEC, xname,
++ return aa_audit_file(subj_cred, profile, &perms, OP_EXEC,
++ AA_MAY_ONEXEC, xname,
+ NULL, onexec, cond->uid, info, error);
+ }
+
+ /* ensure none ns domain transitions are correctly applied with onexec */
+
+-static struct aa_label *handle_onexec(struct aa_label *label,
++static struct aa_label *handle_onexec(const struct cred *subj_cred,
++ struct aa_label *label,
+ struct aa_label *onexec, bool stack,
+ const struct linux_binprm *bprm,
+ char *buffer, struct path_cond *cond,
+@@ -810,26 +822,28 @@ static struct aa_label *handle_onexec(struct aa_label *label,
+
+ if (!stack) {
+ error = fn_for_each_in_ns(label, profile,
+- profile_onexec(profile, onexec, stack,
++ profile_onexec(subj_cred, profile, onexec, stack,
+ bprm, buffer, cond, unsafe));
+ if (error)
+ return ERR_PTR(error);
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+ aa_get_newest_label(onexec),
+- profile_transition(profile, bprm, buffer,
++ profile_transition(subj_cred, profile, bprm,
++ buffer,
+ cond, unsafe));
+
+ } else {
+ /* TODO: determine how much we want to loosen this */
+ error = fn_for_each_in_ns(label, profile,
+- profile_onexec(profile, onexec, stack, bprm,
++ profile_onexec(subj_cred, profile, onexec, stack, bprm,
+ buffer, cond, unsafe));
+ if (error)
+ return ERR_PTR(error);
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+ aa_label_merge(&profile->label, onexec,
+ GFP_KERNEL),
+- profile_transition(profile, bprm, buffer,
++ profile_transition(subj_cred, profile, bprm,
++ buffer,
+ cond, unsafe));
+ }
+
+@@ -838,7 +852,8 @@ static struct aa_label *handle_onexec(struct aa_label *label,
+
+ /* TODO: get rid of GLOBAL_ROOT_UID */
+ error = fn_for_each_in_ns(label, profile,
+- aa_audit_file(profile, &nullperms, OP_CHANGE_ONEXEC,
++ aa_audit_file(subj_cred, profile, &nullperms,
++ OP_CHANGE_ONEXEC,
+ AA_MAY_ONEXEC, bprm->filename, NULL,
+ onexec, GLOBAL_ROOT_UID,
+ "failed to build target label", -ENOMEM));
+@@ -857,6 +872,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+ {
+ struct aa_task_ctx *ctx;
+ struct aa_label *label, *new = NULL;
++ const struct cred *subj_cred;
+ struct aa_profile *profile;
+ char *buffer = NULL;
+ const char *info = NULL;
+@@ -869,6 +885,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+ file_inode(bprm->file)->i_mode
+ };
+
++ subj_cred = current_cred();
+ ctx = task_ctx(current);
+ AA_BUG(!cred_label(bprm->cred));
+ AA_BUG(!ctx);
+@@ -895,11 +912,12 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+
+ /* Test for onexec first as onexec override other x transitions. */
+ if (ctx->onexec)
+- new = handle_onexec(label, ctx->onexec, ctx->token,
++ new = handle_onexec(subj_cred, label, ctx->onexec, ctx->token,
+ bprm, buffer, &cond, &unsafe);
+ else
+ new = fn_label_build(label, profile, GFP_KERNEL,
+- profile_transition(profile, bprm, buffer,
++ profile_transition(subj_cred, profile, bprm,
++ buffer,
+ &cond, &unsafe));
+
+ AA_BUG(!new);
+@@ -934,7 +952,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+
+ if (bprm->unsafe & (LSM_UNSAFE_PTRACE)) {
+ /* TODO: test needs to be profile of label to new */
+- error = may_change_ptraced_domain(new, &info);
++ error = may_change_ptraced_domain(bprm->cred, new, &info);
+ if (error)
+ goto audit;
+ }
+@@ -971,7 +989,8 @@ done:
+
+ audit:
+ error = fn_for_each(label, profile,
+- aa_audit_file(profile, &nullperms, OP_EXEC, MAY_EXEC,
++ aa_audit_file(current_cred(), profile, &nullperms,
++ OP_EXEC, MAY_EXEC,
+ bprm->filename, NULL, new,
+ vfsuid_into_kuid(vfsuid), info, error));
+ aa_put_label(new);
+@@ -987,7 +1006,8 @@ audit:
+ *
+ * Returns: label for hat transition OR ERR_PTR. Does NOT return NULL
+ */
+-static struct aa_label *build_change_hat(struct aa_profile *profile,
++static struct aa_label *build_change_hat(const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const char *name, bool sibling)
+ {
+ struct aa_profile *root, *hat = NULL;
+@@ -1019,7 +1039,8 @@ static struct aa_label *build_change_hat(struct aa_profile *profile,
+ aa_put_profile(root);
+
+ audit:
+- aa_audit_file(profile, &nullperms, OP_CHANGE_HAT, AA_MAY_CHANGEHAT,
++ aa_audit_file(subj_cred, profile, &nullperms, OP_CHANGE_HAT,
++ AA_MAY_CHANGEHAT,
+ name, hat ? hat->base.hname : NULL,
+ hat ? &hat->label : NULL, GLOBAL_ROOT_UID, info,
+ error);
+@@ -1035,7 +1056,8 @@ audit:
+ *
+ * Returns: label for hat transition or ERR_PTR. Does not return NULL
+ */
+-static struct aa_label *change_hat(struct aa_label *label, const char *hats[],
++static struct aa_label *change_hat(const struct cred *subj_cred,
++ struct aa_label *label, const char *hats[],
+ int count, int flags)
+ {
+ struct aa_profile *profile, *root, *hat = NULL;
+@@ -1111,7 +1133,8 @@ fail:
+ */
+ /* TODO: get rid of GLOBAL_ROOT_UID */
+ if (count > 1 || COMPLAIN_MODE(profile)) {
+- aa_audit_file(profile, &nullperms, OP_CHANGE_HAT,
++ aa_audit_file(subj_cred, profile, &nullperms,
++ OP_CHANGE_HAT,
+ AA_MAY_CHANGEHAT, name, NULL, NULL,
+ GLOBAL_ROOT_UID, info, error);
+ }
+@@ -1120,7 +1143,8 @@ fail:
+
+ build:
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+- build_change_hat(profile, name, sibling),
++ build_change_hat(subj_cred, profile, name,
++ sibling),
+ aa_get_label(&profile->label));
+ if (!new) {
+ info = "label build failed";
+@@ -1150,7 +1174,7 @@ build:
+ */
+ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+ {
+- const struct cred *cred;
++ const struct cred *subj_cred;
+ struct aa_task_ctx *ctx = task_ctx(current);
+ struct aa_label *label, *previous, *new = NULL, *target = NULL;
+ struct aa_profile *profile;
+@@ -1159,8 +1183,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+ int error = 0;
+
+ /* released below */
+- cred = get_current_cred();
+- label = aa_get_newest_cred_label(cred);
++ subj_cred = get_current_cred();
++ label = aa_get_newest_cred_label(subj_cred);
+ previous = aa_get_newest_label(ctx->previous);
+
+ /*
+@@ -1180,7 +1204,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+ }
+
+ if (count) {
+- new = change_hat(label, hats, count, flags);
++ new = change_hat(subj_cred, label, hats, count, flags);
+ AA_BUG(!new);
+ if (IS_ERR(new)) {
+ error = PTR_ERR(new);
+@@ -1189,7 +1213,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+ goto out;
+ }
+
+- error = may_change_ptraced_domain(new, &info);
++ /* target cred is the same as current except new label */
++ error = may_change_ptraced_domain(subj_cred, new, &info);
+ if (error)
+ goto fail;
+
+@@ -1242,7 +1267,7 @@ out:
+ aa_put_label(new);
+ aa_put_label(previous);
+ aa_put_label(label);
+- put_cred(cred);
++ put_cred(subj_cred);
+
+ return error;
+
+@@ -1252,7 +1277,7 @@ kill:
+
+ fail:
+ fn_for_each_in_ns(label, profile,
+- aa_audit_file(profile, &perms, OP_CHANGE_HAT,
++ aa_audit_file(subj_cred, profile, &perms, OP_CHANGE_HAT,
+ AA_MAY_CHANGEHAT, NULL, NULL, target,
+ GLOBAL_ROOT_UID, info, error));
+
+@@ -1261,6 +1286,7 @@ fail:
+
+
+ static int change_profile_perms_wrapper(const char *op, const char *name,
++ const struct cred *subj_cred,
+ struct aa_profile *profile,
+ struct aa_label *target, bool stack,
+ u32 request, struct aa_perms *perms)
+@@ -1275,7 +1301,8 @@ static int change_profile_perms_wrapper(const char *op, const char *name,
+ rules->file.start[AA_CLASS_FILE],
+ perms);
+ if (error)
+- error = aa_audit_file(profile, perms, op, request, name,
++ error = aa_audit_file(subj_cred, profile, perms, op, request,
++ name,
+ NULL, target, GLOBAL_ROOT_UID, info,
+ error);
+
+@@ -1304,6 +1331,7 @@ int aa_change_profile(const char *fqname, int flags)
+ const char *auditname = fqname; /* retain leading & if stack */
+ bool stack = flags & AA_CHANGE_STACK;
+ struct aa_task_ctx *ctx = task_ctx(current);
++ const struct cred *subj_cred = get_current_cred();
+ int error = 0;
+ char *op;
+ u32 request;
+@@ -1381,6 +1409,7 @@ int aa_change_profile(const char *fqname, int flags)
+ */
+ error = fn_for_each_in_ns(label, profile,
+ change_profile_perms_wrapper(op, auditname,
++ subj_cred,
+ profile, target, stack,
+ request, &perms));
+ if (error)
+@@ -1391,7 +1420,7 @@ int aa_change_profile(const char *fqname, int flags)
+
+ check:
+ /* check if tracing task is allowed to trace target domain */
+- error = may_change_ptraced_domain(target, &info);
++ error = may_change_ptraced_domain(subj_cred, target, &info);
+ if (error && !fn_for_each_in_ns(label, profile,
+ COMPLAIN_MODE(profile)))
+ goto audit;
+@@ -1451,7 +1480,8 @@ check:
+
+ audit:
+ error = fn_for_each_in_ns(label, profile,
+- aa_audit_file(profile, &perms, op, request, auditname,
++ aa_audit_file(subj_cred,
++ profile, &perms, op, request, auditname,
+ NULL, new ? new : target,
+ GLOBAL_ROOT_UID, info, error));
+
+@@ -1459,6 +1489,7 @@ out:
+ aa_put_label(new);
+ aa_put_label(target);
+ aa_put_label(label);
++ put_cred(subj_cred);
+
+ return error;
+ }
+diff --git a/security/apparmor/file.c b/security/apparmor/file.c
+index 698b124e649f6..6fd21324a097f 100644
+--- a/security/apparmor/file.c
++++ b/security/apparmor/file.c
+@@ -44,38 +44,40 @@ static u32 map_mask_to_chr_mask(u32 mask)
+ static void file_audit_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
+- kuid_t fsuid = current_fsuid();
++ struct apparmor_audit_data *ad = aad(sa);
++ kuid_t fsuid = ad->subj_cred ? ad->subj_cred->fsuid : current_fsuid();
+ char str[10];
+
+- if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
++ if (ad->request & AA_AUDIT_FILE_MASK) {
+ aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
+- map_mask_to_chr_mask(aad(sa)->request));
++ map_mask_to_chr_mask(ad->request));
+ audit_log_format(ab, " requested_mask=\"%s\"", str);
+ }
+- if (aad(sa)->denied & AA_AUDIT_FILE_MASK) {
++ if (ad->denied & AA_AUDIT_FILE_MASK) {
+ aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
+- map_mask_to_chr_mask(aad(sa)->denied));
++ map_mask_to_chr_mask(ad->denied));
+ audit_log_format(ab, " denied_mask=\"%s\"", str);
+ }
+- if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
++ if (ad->request & AA_AUDIT_FILE_MASK) {
+ audit_log_format(ab, " fsuid=%d",
+ from_kuid(&init_user_ns, fsuid));
+ audit_log_format(ab, " ouid=%d",
+- from_kuid(&init_user_ns, aad(sa)->fs.ouid));
++ from_kuid(&init_user_ns, ad->fs.ouid));
+ }
+
+- if (aad(sa)->peer) {
++ if (ad->peer) {
+ audit_log_format(ab, " target=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAG_VIEW_SUBNS, GFP_KERNEL);
+- } else if (aad(sa)->fs.target) {
++ } else if (ad->fs.target) {
+ audit_log_format(ab, " target=");
+- audit_log_untrustedstring(ab, aad(sa)->fs.target);
++ audit_log_untrustedstring(ab, ad->fs.target);
+ }
+ }
+
+ /**
+ * aa_audit_file - handle the auditing of file operations
++ * @subj_cred: cred of the subject
+ * @profile: the profile being enforced (NOT NULL)
+ * @perms: the permissions computed for the request (NOT NULL)
+ * @op: operation being mediated
+@@ -89,59 +91,74 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
+ *
+ * Returns: %0 or error on failure
+ */
+-int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
++int aa_audit_file(const struct cred *subj_cred,
++ struct aa_profile *profile, struct aa_perms *perms,
+ const char *op, u32 request, const char *name,
+ const char *target, struct aa_label *tlabel,
+ kuid_t ouid, const char *info, int error)
+ {
+ int type = AUDIT_APPARMOR_AUTO;
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_TASK, AA_CLASS_FILE, op);
+-
+- sa.u.tsk = NULL;
+- aad(&sa)->request = request;
+- aad(&sa)->name = name;
+- aad(&sa)->fs.target = target;
+- aad(&sa)->peer = tlabel;
+- aad(&sa)->fs.ouid = ouid;
+- aad(&sa)->info = info;
+- aad(&sa)->error = error;
+- sa.u.tsk = NULL;
+-
+- if (likely(!aad(&sa)->error)) {
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_TASK, AA_CLASS_FILE, op);
++
++ ad.subj_cred = subj_cred;
++ ad.request = request;
++ ad.name = name;
++ ad.fs.target = target;
++ ad.peer = tlabel;
++ ad.fs.ouid = ouid;
++ ad.info = info;
++ ad.error = error;
++ ad.common.u.tsk = NULL;
++
++ if (likely(!ad.error)) {
+ u32 mask = perms->audit;
+
+ if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
+ mask = 0xffff;
+
+ /* mask off perms that are not being force audited */
+- aad(&sa)->request &= mask;
++ ad.request &= mask;
+
+- if (likely(!aad(&sa)->request))
++ if (likely(!ad.request))
+ return 0;
+ type = AUDIT_APPARMOR_AUDIT;
+ } else {
+ /* only report permissions that were denied */
+- aad(&sa)->request = aad(&sa)->request & ~perms->allow;
+- AA_BUG(!aad(&sa)->request);
++ ad.request = ad.request & ~perms->allow;
++ AA_BUG(!ad.request);
+
+- if (aad(&sa)->request & perms->kill)
++ if (ad.request & perms->kill)
+ type = AUDIT_APPARMOR_KILL;
+
+ /* quiet known rejects, assumes quiet and kill do not overlap */
+- if ((aad(&sa)->request & perms->quiet) &&
++ if ((ad.request & perms->quiet) &&
+ AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+ AUDIT_MODE(profile) != AUDIT_ALL)
+- aad(&sa)->request &= ~perms->quiet;
++ ad.request &= ~perms->quiet;
+
+- if (!aad(&sa)->request)
+- return aad(&sa)->error;
++ if (!ad.request)
++ return ad.error;
+ }
+
+- aad(&sa)->denied = aad(&sa)->request & ~perms->allow;
+- return aa_audit(type, profile, &sa, file_audit_cb);
++ ad.denied = ad.request & ~perms->allow;
++ return aa_audit(type, profile, &ad, file_audit_cb);
+ }
+
+-static int path_name(const char *op, struct aa_label *label,
++/**
++ * is_deleted - test if a file has been completely unlinked
++ * @dentry: dentry of file to test for deletion (NOT NULL)
++ *
++ * Returns: true if deleted else false
++ */
++static inline bool is_deleted(struct dentry *dentry)
++{
++ if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
++ return true;
++ return false;
++}
++
++static int path_name(const char *op, const struct cred *subj_cred,
++ struct aa_label *label,
+ const struct path *path, int flags, char *buffer,
+ const char **name, struct path_cond *cond, u32 request)
+ {
+@@ -153,7 +170,8 @@ static int path_name(const char *op, struct aa_label *label,
+ labels_profile(label)->disconnected);
+ if (error) {
+ fn_for_each_confined(label, profile,
+- aa_audit_file(profile, &nullperms, op, request, *name,
++ aa_audit_file(subj_cred,
++ profile, &nullperms, op, request, *name,
+ NULL, NULL, cond->uid, info, error));
+ return error;
+ }
+@@ -207,9 +225,9 @@ aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
+ return state;
+ }
+
+-static int __aa_path_perm(const char *op, struct aa_profile *profile,
+- const char *name, u32 request,
+- struct path_cond *cond, int flags,
++static int __aa_path_perm(const char *op, const struct cred *subj_cred,
++ struct aa_profile *profile, const char *name,
++ u32 request, struct path_cond *cond, int flags,
+ struct aa_perms *perms)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+@@ -222,12 +240,14 @@ static int __aa_path_perm(const char *op, struct aa_profile *profile,
+ name, cond, perms);
+ if (request & ~perms->allow)
+ e = -EACCES;
+- return aa_audit_file(profile, perms, op, request, name, NULL, NULL,
++ return aa_audit_file(subj_cred,
++ profile, perms, op, request, name, NULL, NULL,
+ cond->uid, NULL, e);
+ }
+
+
+-static int profile_path_perm(const char *op, struct aa_profile *profile,
++static int profile_path_perm(const char *op, const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const struct path *path, char *buffer, u32 request,
+ struct path_cond *cond, int flags,
+ struct aa_perms *perms)
+@@ -238,18 +258,19 @@ static int profile_path_perm(const char *op, struct aa_profile *profile,
+ if (profile_unconfined(profile))
+ return 0;
+
+- error = path_name(op, &profile->label, path,
++ error = path_name(op, subj_cred, &profile->label, path,
+ flags | profile->path_flags, buffer, &name, cond,
+ request);
+ if (error)
+ return error;
+- return __aa_path_perm(op, profile, name, request, cond, flags,
+- perms);
++ return __aa_path_perm(op, subj_cred, profile, name, request, cond,
++ flags, perms);
+ }
+
+ /**
+ * aa_path_perm - do permissions check & audit for @path
+ * @op: operation being checked
++ * @subj_cred: subject cred
+ * @label: profile being enforced (NOT NULL)
+ * @path: path to check permissions of (NOT NULL)
+ * @flags: any additional path flags beyond what the profile specifies
+@@ -258,7 +279,8 @@ static int profile_path_perm(const char *op, struct aa_profile *profile,
+ *
+ * Returns: %0 else error if access denied or other error
+ */
+-int aa_path_perm(const char *op, struct aa_label *label,
++int aa_path_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label,
+ const struct path *path, int flags, u32 request,
+ struct path_cond *cond)
+ {
+@@ -273,8 +295,8 @@ int aa_path_perm(const char *op, struct aa_label *label,
+ if (!buffer)
+ return -ENOMEM;
+ error = fn_for_each_confined(label, profile,
+- profile_path_perm(op, profile, path, buffer, request,
+- cond, flags, &perms));
++ profile_path_perm(op, subj_cred, profile, path, buffer,
++ request, cond, flags, &perms));
+
+ aa_put_buffer(buffer);
+
+@@ -301,7 +323,8 @@ static inline bool xindex_is_subset(u32 link, u32 target)
+ return true;
+ }
+
+-static int profile_path_link(struct aa_profile *profile,
++static int profile_path_link(const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const struct path *link, char *buffer,
+ const struct path *target, char *buffer2,
+ struct path_cond *cond)
+@@ -315,13 +338,15 @@ static int profile_path_link(struct aa_profile *profile,
+ aa_state_t state;
+ int error;
+
+- error = path_name(OP_LINK, &profile->label, link, profile->path_flags,
++ error = path_name(OP_LINK, subj_cred, &profile->label, link,
++ profile->path_flags,
+ buffer, &lname, cond, AA_MAY_LINK);
+ if (error)
+ goto audit;
+
+ /* buffer2 freed below, tname is pointer in buffer2 */
+- error = path_name(OP_LINK, &profile->label, target, profile->path_flags,
++ error = path_name(OP_LINK, subj_cred, &profile->label, target,
++ profile->path_flags,
+ buffer2, &tname, cond, AA_MAY_LINK);
+ if (error)
+ goto audit;
+@@ -381,12 +406,14 @@ done_tests:
+ error = 0;
+
+ audit:
+- return aa_audit_file(profile, &lperms, OP_LINK, request, lname, tname,
++ return aa_audit_file(subj_cred,
++ profile, &lperms, OP_LINK, request, lname, tname,
+ NULL, cond->uid, info, error);
+ }
+
+ /**
+ * aa_path_link - Handle hard link permission check
++ * @subj_cred: subject cred
+ * @label: the label being enforced (NOT NULL)
+ * @old_dentry: the target dentry (NOT NULL)
+ * @new_dir: directory the new link will be created in (NOT NULL)
+@@ -403,7 +430,8 @@ audit:
+ *
+ * Returns: %0 if allowed else error
+ */
+-int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
++int aa_path_link(const struct cred *subj_cred,
++ struct aa_label *label, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry)
+ {
+ struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
+@@ -424,8 +452,8 @@ int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
+ goto out;
+
+ error = fn_for_each_confined(label, profile,
+- profile_path_link(profile, &link, buffer, &target,
+- buffer2, &cond));
++ profile_path_link(subj_cred, profile, &link, buffer,
++ &target, buffer2, &cond));
+ out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(buffer2);
+@@ -453,7 +481,8 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
+ spin_unlock(&fctx->lock);
+ }
+
+-static int __file_path_perm(const char *op, struct aa_label *label,
++static int __file_path_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label,
+ struct aa_label *flabel, struct file *file,
+ u32 request, u32 denied, bool in_atomic)
+ {
+@@ -480,7 +509,8 @@ static int __file_path_perm(const char *op, struct aa_label *label,
+
+ /* check every profile in task label not in current cache */
+ error = fn_for_each_not_in_set(flabel, label, profile,
+- profile_path_perm(op, profile, &file->f_path, buffer,
++ profile_path_perm(op, subj_cred, profile,
++ &file->f_path, buffer,
+ request, &cond, flags, &perms));
+ if (denied && !error) {
+ /*
+@@ -493,12 +523,14 @@ static int __file_path_perm(const char *op, struct aa_label *label,
+ */
+ if (label == flabel)
+ error = fn_for_each(label, profile,
+- profile_path_perm(op, profile, &file->f_path,
++ profile_path_perm(op, subj_cred,
++ profile, &file->f_path,
+ buffer, request, &cond, flags,
+ &perms));
+ else
+ error = fn_for_each_not_in_set(label, flabel, profile,
+- profile_path_perm(op, profile, &file->f_path,
++ profile_path_perm(op, subj_cred,
++ profile, &file->f_path,
+ buffer, request, &cond, flags,
+ &perms));
+ }
+@@ -510,7 +542,8 @@ static int __file_path_perm(const char *op, struct aa_label *label,
+ return error;
+ }
+
+-static int __file_sock_perm(const char *op, struct aa_label *label,
++static int __file_sock_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label,
+ struct aa_label *flabel, struct file *file,
+ u32 request, u32 denied)
+ {
+@@ -524,11 +557,12 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
+ return 0;
+
+ /* TODO: improve to skip profiles cached in flabel */
+- error = aa_sock_file_perm(label, op, request, sock);
++ error = aa_sock_file_perm(subj_cred, label, op, request, sock);
+ if (denied) {
+ /* TODO: improve to skip profiles checked above */
+ /* check every profile in file label to is cached */
+- last_error(error, aa_sock_file_perm(flabel, op, request, sock));
++ last_error(error, aa_sock_file_perm(subj_cred, flabel, op,
++ request, sock));
+ }
+ if (!error)
+ update_file_ctx(file_ctx(file), label, request);
+@@ -539,6 +573,7 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
+ /**
+ * aa_file_perm - do permission revalidation check & audit for @file
+ * @op: operation being checked
++ * @subj_cred: subject cred
+ * @label: label being enforced (NOT NULL)
+ * @file: file to revalidate access permissions on (NOT NULL)
+ * @request: requested permissions
+@@ -546,7 +581,8 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
+ *
+ * Returns: %0 if access allowed else error
+ */
+-int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
++int aa_file_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label, struct file *file,
+ u32 request, bool in_atomic)
+ {
+ struct aa_file_ctx *fctx;
+@@ -582,19 +618,19 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
+ /* TODO: label cross check */
+
+ if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
+- error = __file_path_perm(op, label, flabel, file, request,
+- denied, in_atomic);
++ error = __file_path_perm(op, subj_cred, label, flabel, file,
++ request, denied, in_atomic);
+
+ else if (S_ISSOCK(file_inode(file)->i_mode))
+- error = __file_sock_perm(op, label, flabel, file, request,
+- denied);
++ error = __file_sock_perm(op, subj_cred, label, flabel, file,
++ request, denied);
+ aa_put_label(flabel);
+
+ done:
+ return error;
+ }
+
+-static void revalidate_tty(struct aa_label *label)
++static void revalidate_tty(const struct cred *subj_cred, struct aa_label *label)
+ {
+ struct tty_struct *tty;
+ int drop_tty = 0;
+@@ -612,8 +648,8 @@ static void revalidate_tty(struct aa_label *label)
+ struct tty_file_private, list);
+ file = file_priv->file;
+
+- if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE,
+- IN_ATOMIC))
++ if (aa_file_perm(OP_INHERIT, subj_cred, label, file,
++ MAY_READ | MAY_WRITE, IN_ATOMIC))
+ drop_tty = 1;
+ }
+ spin_unlock(&tty->files_lock);
+@@ -623,12 +659,17 @@ static void revalidate_tty(struct aa_label *label)
+ no_tty();
+ }
+
++struct cred_label {
++ const struct cred *cred;
++ struct aa_label *label;
++};
++
+ static int match_file(const void *p, struct file *file, unsigned int fd)
+ {
+- struct aa_label *label = (struct aa_label *)p;
++ struct cred_label *cl = (struct cred_label *)p;
+
+- if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file),
+- IN_ATOMIC))
++ if (aa_file_perm(OP_INHERIT, cl->cred, cl->label, file,
++ aa_map_file_to_perms(file), IN_ATOMIC))
+ return fd + 1;
+ return 0;
+ }
+@@ -638,13 +679,17 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
+ void aa_inherit_files(const struct cred *cred, struct files_struct *files)
+ {
+ struct aa_label *label = aa_get_newest_cred_label(cred);
++ struct cred_label cl = {
++ .cred = cred,
++ .label = label,
++ };
+ struct file *devnull = NULL;
+ unsigned int n;
+
+- revalidate_tty(label);
++ revalidate_tty(cred, label);
+
+ /* Revalidate access to inherited open files. */
+- n = iterate_fd(files, 0, match_file, label);
++ n = iterate_fd(files, 0, match_file, &cl);
+ if (!n) /* none found? */
+ goto out;
+
+@@ -654,7 +699,7 @@ void aa_inherit_files(const struct cred *cred, struct files_struct *files)
+ /* replace all the matching ones with this */
+ do {
+ replace_fd(n - 1, devnull, 0);
+- } while ((n = iterate_fd(files, n, match_file, label)) != 0);
++ } while ((n = iterate_fd(files, n, match_file, &cl)) != 0);
+ if (devnull)
+ fput(devnull);
+ out:
+diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
+index c328f07f11cd8..42d701fec5a6d 100644
+--- a/security/apparmor/include/audit.h
++++ b/security/apparmor/include/audit.h
+@@ -109,7 +109,8 @@ struct apparmor_audit_data {
+ int type;
+ u16 class;
+ const char *op;
+- struct aa_label *label;
++ const struct cred *subj_cred;
++ struct aa_label *subj_label;
+ const char *name;
+ const char *info;
+ u32 request;
+@@ -152,33 +153,35 @@ struct apparmor_audit_data {
+ unsigned long flags;
+ } mnt;
+ };
++
++ struct common_audit_data common;
+ };
+
+ /* macros for dealing with apparmor_audit_data structure */
+-#define aad(SA) ((SA)->apparmor_audit_data)
++#define aad(SA) (container_of(SA, struct apparmor_audit_data, common))
++#define aad_of_va(VA) aad((struct common_audit_data *)(VA))
++
+ #define DEFINE_AUDIT_DATA(NAME, T, C, X) \
+ /* TODO: cleanup audit init so we don't need _aad = {0,} */ \
+- struct apparmor_audit_data NAME ## _aad = { \
++ struct apparmor_audit_data NAME = { \
+ .class = (C), \
+ .op = (X), \
+- }; \
+- struct common_audit_data NAME = \
+- { \
+- .type = (T), \
+- .u.tsk = NULL, \
+- }; \
+- NAME.apparmor_audit_data = &(NAME ## _aad)
+-
+-void aa_audit_msg(int type, struct common_audit_data *sa,
++ .common.type = (T), \
++ .common.u.tsk = NULL, \
++ .common.apparmor_audit_data = &NAME, \
++ };
++
++void aa_audit_msg(int type, struct apparmor_audit_data *ad,
+ void (*cb) (struct audit_buffer *, void *));
+-int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
++int aa_audit(int type, struct aa_profile *profile,
++ struct apparmor_audit_data *ad,
+ void (*cb) (struct audit_buffer *, void *));
+
+-#define aa_audit_error(ERROR, SA, CB) \
++#define aa_audit_error(ERROR, AD, CB) \
+ ({ \
+- aad((SA))->error = (ERROR); \
+- aa_audit_msg(AUDIT_APPARMOR_ERROR, (SA), (CB)); \
+- aad((SA))->error; \
++ (AD)->error = (ERROR); \
++ aa_audit_msg(AUDIT_APPARMOR_ERROR, (AD), (CB)); \
++ (AD)->error; \
+ })
+
+
+diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
+index d420e2d10b31b..d6dcc604ec0cc 100644
+--- a/security/apparmor/include/capability.h
++++ b/security/apparmor/include/capability.h
+@@ -36,7 +36,8 @@ struct aa_caps {
+
+ extern struct aa_sfs_entry aa_sfs_entry_caps[];
+
+-int aa_capable(struct aa_label *label, int cap, unsigned int opts);
++int aa_capable(const struct cred *subj_cred, struct aa_label *label,
++ int cap, unsigned int opts);
+
+ static inline void aa_free_cap_rules(struct aa_caps *caps)
+ {
+diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
+index 5be620af33ba0..64dc6d1a7a05c 100644
+--- a/security/apparmor/include/file.h
++++ b/security/apparmor/include/file.h
+@@ -108,7 +108,8 @@ struct path_cond {
+
+ #define COMBINED_PERM_MASK(X) ((X).allow | (X).audit | (X).quiet | (X).kill)
+
+-int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
++int aa_audit_file(const struct cred *cred,
++ struct aa_profile *profile, struct aa_perms *perms,
+ const char *op, u32 request, const char *name,
+ const char *target, struct aa_label *tlabel, kuid_t ouid,
+ const char *info, int error);
+@@ -119,14 +120,16 @@ aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
+ const char *name, struct path_cond *cond,
+ struct aa_perms *perms);
+
+-int aa_path_perm(const char *op, struct aa_label *label,
+- const struct path *path, int flags, u32 request,
+- struct path_cond *cond);
++int aa_path_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
++ int flags, u32 request, struct path_cond *cond);
+
+-int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
+- const struct path *new_dir, struct dentry *new_dentry);
++int aa_path_link(const struct cred *subj_cred, struct aa_label *label,
++ struct dentry *old_dentry, const struct path *new_dir,
++ struct dentry *new_dentry);
+
+-int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
++int aa_file_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label, struct file *file,
+ u32 request, bool in_atomic);
+
+ void aa_inherit_files(const struct cred *cred, struct files_struct *files);
+diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
+index a1ac6ffb95e9c..74d17052f76bc 100644
+--- a/security/apparmor/include/ipc.h
++++ b/security/apparmor/include/ipc.h
+@@ -13,6 +13,8 @@
+
+ #include <linux/sched.h>
+
+-int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig);
++int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender,
++ const struct cred *target_cred, struct aa_label *target,
++ int sig);
+
+ #endif /* __AA_IPC_H */
+diff --git a/security/apparmor/include/mount.h b/security/apparmor/include/mount.h
+index a710683b24965..46834f8281794 100644
+--- a/security/apparmor/include/mount.h
++++ b/security/apparmor/include/mount.h
+@@ -25,26 +25,36 @@
+
+ #define AA_MS_IGNORE_MASK (MS_KERNMOUNT | MS_NOSEC | MS_ACTIVE | MS_BORN)
+
+-int aa_remount(struct aa_label *label, const struct path *path,
++int aa_remount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ unsigned long flags, void *data);
+
+-int aa_bind_mount(struct aa_label *label, const struct path *path,
++int aa_bind_mount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ const char *old_name, unsigned long flags);
+
+
+-int aa_mount_change_type(struct aa_label *label, const struct path *path,
++int aa_mount_change_type(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ unsigned long flags);
+
+-int aa_move_mount(struct aa_label *label, const struct path *path,
+- const char *old_name);
++int aa_move_mount_old(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
++ const char *old_name);
++int aa_move_mount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *from_path,
++ const struct path *to_path);
+
+-int aa_new_mount(struct aa_label *label, const char *dev_name,
++int aa_new_mount(const struct cred *subj_cred,
++ struct aa_label *label, const char *dev_name,
+ const struct path *path, const char *type, unsigned long flags,
+ void *data);
+
+-int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags);
++int aa_umount(const struct cred *subj_cred,
++ struct aa_label *label, struct vfsmount *mnt, int flags);
+
+-int aa_pivotroot(struct aa_label *label, const struct path *old_path,
++int aa_pivotroot(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *old_path,
+ const struct path *new_path);
+
+ #endif /* __AA_MOUNT_H */
+diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
+index 6fa440b5daed8..aa8515af677f0 100644
+--- a/security/apparmor/include/net.h
++++ b/security/apparmor/include/net.h
+@@ -61,9 +61,9 @@ struct aa_sk_ctx {
+ LSM_AUDIT_DATA_NONE, \
+ AA_CLASS_NET, \
+ OP); \
+- NAME.u.net = &(NAME ## _net); \
+- aad(&NAME)->net.type = (T); \
+- aad(&NAME)->net.protocol = (P)
++ NAME.common.u.net = &(NAME ## _net); \
++ NAME.net.type = (T); \
++ NAME.net.protocol = (P)
+
+ #define DEFINE_AUDIT_SK(NAME, OP, SK) \
+ DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
+@@ -90,21 +90,24 @@ struct aa_secmark {
+ extern struct aa_sfs_entry aa_sfs_entry_network[];
+
+ void audit_net_cb(struct audit_buffer *ab, void *va);
+-int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
++int aa_profile_af_perm(struct aa_profile *profile,
++ struct apparmor_audit_data *ad,
+ u32 request, u16 family, int type);
+-int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
++int aa_af_perm(const struct cred *subj_cred, struct aa_label *label,
++ const char *op, u32 request, u16 family,
+ int type, int protocol);
+ static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
+- struct common_audit_data *sa,
++ struct apparmor_audit_data *ad,
+ u32 request,
+ struct sock *sk)
+ {
+- return aa_profile_af_perm(profile, sa, request, sk->sk_family,
++ return aa_profile_af_perm(profile, ad, request, sk->sk_family,
+ sk->sk_type);
+ }
+ int aa_sk_perm(const char *op, u32 request, struct sock *sk);
+
+-int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
++int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label,
++ const char *op, u32 request,
+ struct socket *sock);
+
+ int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
+diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
+index 797a7a00644d2..83534df8939fd 100644
+--- a/security/apparmor/include/perms.h
++++ b/security/apparmor/include/perms.h
+@@ -212,8 +212,8 @@ void aa_profile_match_label(struct aa_profile *profile,
+ int type, u32 request, struct aa_perms *perms);
+ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+ u32 request, int type, u32 *deny,
+- struct common_audit_data *sa);
++ struct apparmor_audit_data *ad);
+ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+- u32 request, struct common_audit_data *sa,
++ u32 request, struct apparmor_audit_data *ad,
+ void (*cb)(struct audit_buffer *, void *));
+ #endif /* __AA_PERM_H */
+diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
+index 545f791cabdae..fa15a5c7febb8 100644
+--- a/security/apparmor/include/policy.h
++++ b/security/apparmor/include/policy.h
+@@ -370,9 +370,12 @@ static inline int AUDIT_MODE(struct aa_profile *profile)
+ return profile->audit;
+ }
+
+-bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns);
+-bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns);
+-int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns,
++bool aa_policy_view_capable(const struct cred *subj_cred,
++ struct aa_label *label, struct aa_ns *ns);
++bool aa_policy_admin_capable(const struct cred *subj_cred,
++ struct aa_label *label, struct aa_ns *ns);
++int aa_may_manage_policy(const struct cred *subj_cred,
++ struct aa_label *label, struct aa_ns *ns,
+ u32 mask);
+ bool aa_current_policy_view_capable(struct aa_ns *ns);
+ bool aa_current_policy_admin_capable(struct aa_ns *ns);
+diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
+index 961d85d328ea9..ad2c0da8e64fc 100644
+--- a/security/apparmor/include/resource.h
++++ b/security/apparmor/include/resource.h
+@@ -33,7 +33,8 @@ struct aa_rlimit {
+ extern struct aa_sfs_entry aa_sfs_entry_rlimit[];
+
+ int aa_map_resource(int resource);
+-int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
++int aa_task_setrlimit(const struct cred *subj_cred, struct aa_label *label,
++ struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim);
+
+ void __aa_transition_rlimits(struct aa_label *old, struct aa_label *new);
+diff --git a/security/apparmor/include/task.h b/security/apparmor/include/task.h
+index 13437d62c70f4..29ba55107b7d6 100644
+--- a/security/apparmor/include/task.h
++++ b/security/apparmor/include/task.h
+@@ -91,7 +91,8 @@ static inline void aa_clear_task_ctx_trans(struct aa_task_ctx *ctx)
+ "segv usr2 pipe alrm term stkflt chld cont stop stp ttin ttou urg " \
+ "xcpu xfsz vtalrm prof winch io pwr sys emt lost"
+
+-int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
++int aa_may_ptrace(const struct cred *tracer_cred, struct aa_label *tracer,
++ const struct cred *tracee_cred, struct aa_label *tracee,
+ u32 request);
+
+
+diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
+index 5acde746775f7..c0d0dbd7b4c4b 100644
+--- a/security/apparmor/ipc.c
++++ b/security/apparmor/ipc.c
+@@ -52,31 +52,33 @@ static const char *audit_signal_mask(u32 mask)
+ static void audit_signal_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->request & AA_SIGNAL_PERM_MASK) {
++ if (ad->request & AA_SIGNAL_PERM_MASK) {
+ audit_log_format(ab, " requested_mask=\"%s\"",
+- audit_signal_mask(aad(sa)->request));
+- if (aad(sa)->denied & AA_SIGNAL_PERM_MASK) {
++ audit_signal_mask(ad->request));
++ if (ad->denied & AA_SIGNAL_PERM_MASK) {
+ audit_log_format(ab, " denied_mask=\"%s\"",
+- audit_signal_mask(aad(sa)->denied));
++ audit_signal_mask(ad->denied));
+ }
+ }
+- if (aad(sa)->signal == SIGUNKNOWN)
++ if (ad->signal == SIGUNKNOWN)
+ audit_log_format(ab, "signal=unknown(%d)",
+- aad(sa)->unmappedsig);
+- else if (aad(sa)->signal < MAXMAPPED_SIGNAME)
+- audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
++ ad->unmappedsig);
++ else if (ad->signal < MAXMAPPED_SIGNAME)
++ audit_log_format(ab, " signal=%s", sig_names[ad->signal]);
+ else
+ audit_log_format(ab, " signal=rtmin+%d",
+- aad(sa)->signal - SIGRT_BASE);
++ ad->signal - SIGRT_BASE);
+ audit_log_format(ab, " peer=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+
+-static int profile_signal_perm(struct aa_profile *profile,
++static int profile_signal_perm(const struct cred *cred,
++ struct aa_profile *profile,
+ struct aa_label *peer, u32 request,
+- struct common_audit_data *sa)
++ struct apparmor_audit_data *ad)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules), list);
+@@ -87,24 +89,29 @@ static int profile_signal_perm(struct aa_profile *profile,
+ !ANY_RULE_MEDIATES(&profile->rules, AA_CLASS_SIGNAL))
+ return 0;
+
+- aad(sa)->peer = peer;
++ ad->subj_cred = cred;
++ ad->peer = peer;
+ /* TODO: secondary cache check <profile, profile, perm> */
+ state = aa_dfa_next(rules->policy.dfa,
+ rules->policy.start[AA_CLASS_SIGNAL],
+- aad(sa)->signal);
++ ad->signal);
+ aa_label_match(profile, rules, peer, state, false, request, &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+- return aa_check_perms(profile, &perms, request, sa, audit_signal_cb);
++ return aa_check_perms(profile, &perms, request, ad, audit_signal_cb);
+ }
+
+-int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig)
++int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender,
++ const struct cred *target_cred, struct aa_label *target,
++ int sig)
+ {
+ struct aa_profile *profile;
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_SIGNAL, OP_SIGNAL);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_SIGNAL, OP_SIGNAL);
+
+- aad(&sa)->signal = map_signal_num(sig);
+- aad(&sa)->unmappedsig = sig;
++ ad.signal = map_signal_num(sig);
++ ad.unmappedsig = sig;
+ return xcheck_labels(sender, target, profile,
+- profile_signal_perm(profile, target, MAY_WRITE, &sa),
+- profile_signal_perm(profile, sender, MAY_READ, &sa));
++ profile_signal_perm(subj_cred, profile, target,
++ MAY_WRITE, &ad),
++ profile_signal_perm(target_cred, profile, sender,
++ MAY_READ, &ad));
+ }
+diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
+index a630c951bb3b8..c87bccafff446 100644
+--- a/security/apparmor/lib.c
++++ b/security/apparmor/lib.c
+@@ -27,7 +27,7 @@ struct aa_perms allperms = { .allow = ALL_PERMS_MASK,
+
+ /**
+ * aa_free_str_table - free entries str table
+- * @str: the string table to free (MAYBE NULL)
++ * @t: the string table to free (MAYBE NULL)
+ */
+ void aa_free_str_table(struct aa_str_table *t)
+ {
+@@ -85,6 +85,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
+ /**
+ * skipn_spaces - Removes leading whitespace from @str.
+ * @str: The string to be stripped.
++ * @n: length of str to parse, will stop at \0 if encountered before n
+ *
+ * Returns a pointer to the first non-whitespace character in @str.
+ * if all whitespace will return NULL
+@@ -143,10 +144,10 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
+ void aa_info_message(const char *str)
+ {
+ if (audit_enabled) {
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
+
+- aad(&sa)->info = str;
+- aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
++ ad.info = str;
++ aa_audit_msg(AUDIT_APPARMOR_STATUS, &ad, NULL);
+ }
+ printk(KERN_INFO "AppArmor: %s\n", str);
+ }
+@@ -281,21 +282,22 @@ void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
+ static void aa_audit_perms_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->request) {
++ if (ad->request) {
+ audit_log_format(ab, " requested_mask=");
+- aa_audit_perm_mask(ab, aad(sa)->request, aa_file_perm_chrs,
++ aa_audit_perm_mask(ab, ad->request, aa_file_perm_chrs,
+ PERMS_CHRS_MASK, aa_file_perm_names,
+ PERMS_NAMES_MASK);
+ }
+- if (aad(sa)->denied) {
++ if (ad->denied) {
+ audit_log_format(ab, "denied_mask=");
+- aa_audit_perm_mask(ab, aad(sa)->denied, aa_file_perm_chrs,
++ aa_audit_perm_mask(ab, ad->denied, aa_file_perm_chrs,
+ PERMS_CHRS_MASK, aa_file_perm_names,
+ PERMS_NAMES_MASK);
+ }
+ audit_log_format(ab, " peer=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+
+@@ -349,21 +351,20 @@ void aa_profile_match_label(struct aa_profile *profile,
+ /* currently unused */
+ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+ u32 request, int type, u32 *deny,
+- struct common_audit_data *sa)
++ struct apparmor_audit_data *ad)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules), list);
+ struct aa_perms perms;
+
+- aad(sa)->label = &profile->label;
+- aad(sa)->peer = &target->label;
+- aad(sa)->request = request;
++ ad->peer = &target->label;
++ ad->request = request;
+
+ aa_profile_match_label(profile, rules, &target->label, type, request,
+ &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+ *deny |= request & perms.deny;
+- return aa_check_perms(profile, &perms, request, sa, aa_audit_perms_cb);
++ return aa_check_perms(profile, &perms, request, ad, aa_audit_perms_cb);
+ }
+
+ /**
+@@ -371,8 +372,7 @@ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+ * @profile: profile being checked
+ * @perms: perms computed for the request
+ * @request: requested perms
+- * @deny: Returns: explicit deny set
+- * @sa: initialized audit structure (MAY BE NULL if not auditing)
++ * @ad: initialized audit structure (MAY BE NULL if not auditing)
+ * @cb: callback fn for type specific fields (MAY BE NULL)
+ *
+ * Returns: 0 if permission else error code
+@@ -385,7 +385,7 @@ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+ * with a positive value.
+ */
+ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+- u32 request, struct common_audit_data *sa,
++ u32 request, struct apparmor_audit_data *ad,
+ void (*cb)(struct audit_buffer *, void *))
+ {
+ int type, error;
+@@ -394,7 +394,7 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+ if (likely(!denied)) {
+ /* mask off perms that are not being force audited */
+ request &= perms->audit;
+- if (!request || !sa)
++ if (!request || !ad)
+ return 0;
+
+ type = AUDIT_APPARMOR_AUDIT;
+@@ -413,16 +413,16 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+ error = -ENOENT;
+
+ denied &= ~perms->quiet;
+- if (!sa || !denied)
++ if (!ad || !denied)
+ return error;
+ }
+
+- if (sa) {
+- aad(sa)->label = &profile->label;
+- aad(sa)->request = request;
+- aad(sa)->denied = denied;
+- aad(sa)->error = error;
+- aa_audit_msg(type, sa, cb);
++ if (ad) {
++ ad->subj_label = &profile->label;
++ ad->request = request;
++ ad->denied = denied;
++ ad->error = error;
++ aa_audit_msg(type, ad, cb);
+ }
+
+ if (type == AUDIT_APPARMOR_ALLOWED)
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index 108eccc5ada58..6fdab1b5ede5c 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -116,15 +116,17 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+ {
+ struct aa_label *tracer, *tracee;
++ const struct cred *cred;
+ int error;
+
++ cred = get_task_cred(child);
++ tracee = cred_label(cred); /* ref count on cred */
+ tracer = __begin_current_label_crit_section();
+- tracee = aa_get_task_label(child);
+- error = aa_may_ptrace(tracer, tracee,
++ error = aa_may_ptrace(current_cred(), tracer, cred, tracee,
+ (mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
+ : AA_PTRACE_TRACE);
+- aa_put_label(tracee);
+ __end_current_label_crit_section(tracer);
++ put_cred(cred);
+
+ return error;
+ }
+@@ -132,12 +134,15 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
+ static int apparmor_ptrace_traceme(struct task_struct *parent)
+ {
+ struct aa_label *tracer, *tracee;
++ const struct cred *cred;
+ int error;
+
+ tracee = __begin_current_label_crit_section();
+- tracer = aa_get_task_label(parent);
+- error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE);
+- aa_put_label(tracer);
++ cred = get_task_cred(parent);
++ tracer = cred_label(cred); /* ref count on cred */
++ error = aa_may_ptrace(cred, tracer, current_cred(), tracee,
++ AA_PTRACE_TRACE);
++ put_cred(cred);
+ __end_current_label_crit_section(tracee);
+
+ return error;
+@@ -188,7 +193,7 @@ static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
+
+ label = aa_get_newest_cred_label(cred);
+ if (!unconfined(label))
+- error = aa_capable(label, cap, opts);
++ error = aa_capable(cred, label, cap, opts);
+ aa_put_label(label);
+
+ return error;
+@@ -211,7 +216,8 @@ static int common_perm(const char *op, const struct path *path, u32 mask,
+
+ label = __begin_current_label_crit_section();
+ if (!unconfined(label))
+- error = aa_path_perm(op, label, path, 0, mask, cond);
++ error = aa_path_perm(op, current_cred(), label, path, 0, mask,
++ cond);
+ __end_current_label_crit_section(label);
+
+ return error;
+@@ -357,7 +363,8 @@ static int apparmor_path_link(struct dentry *old_dentry, const struct path *new_
+
+ label = begin_current_label_crit_section();
+ if (!unconfined(label))
+- error = aa_path_link(label, old_dentry, new_dir, new_dentry);
++ error = aa_path_link(current_cred(), label, old_dentry, new_dir,
++ new_dentry);
+ end_current_label_crit_section(label);
+
+ return error;
+@@ -396,23 +403,27 @@ static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_d
+ vfsuid = i_uid_into_vfsuid(idmap, d_backing_inode(old_dentry));
+ cond_exchange.uid = vfsuid_into_kuid(vfsuid);
+
+- error = aa_path_perm(OP_RENAME_SRC, label, &new_path, 0,
++ error = aa_path_perm(OP_RENAME_SRC, current_cred(),
++ label, &new_path, 0,
+ MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
+ AA_MAY_SETATTR | AA_MAY_DELETE,
+ &cond_exchange);
+ if (!error)
+- error = aa_path_perm(OP_RENAME_DEST, label, &old_path,
++ error = aa_path_perm(OP_RENAME_DEST, current_cred(),
++ label, &old_path,
+ 0, MAY_WRITE | AA_MAY_SETATTR |
+ AA_MAY_CREATE, &cond_exchange);
+ }
+
+ if (!error)
+- error = aa_path_perm(OP_RENAME_SRC, label, &old_path, 0,
++ error = aa_path_perm(OP_RENAME_SRC, current_cred(),
++ label, &old_path, 0,
+ MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
+ AA_MAY_SETATTR | AA_MAY_DELETE,
+ &cond);
+ if (!error)
+- error = aa_path_perm(OP_RENAME_DEST, label, &new_path,
++ error = aa_path_perm(OP_RENAME_DEST, current_cred(),
++ label, &new_path,
+ 0, MAY_WRITE | AA_MAY_SETATTR |
+ AA_MAY_CREATE, &cond);
+
+@@ -467,7 +478,8 @@ static int apparmor_file_open(struct file *file)
+ vfsuid = i_uid_into_vfsuid(idmap, inode);
+ cond.uid = vfsuid_into_kuid(vfsuid);
+
+- error = aa_path_perm(OP_OPEN, label, &file->f_path, 0,
++ error = aa_path_perm(OP_OPEN, file->f_cred,
++ label, &file->f_path, 0,
+ aa_map_file_to_perms(file), &cond);
+ /* todo cache full allowed permissions set and state */
+ fctx->allow = aa_map_file_to_perms(file);
+@@ -507,7 +519,7 @@ static int common_file_perm(const char *op, struct file *file, u32 mask,
+ return -EACCES;
+
+ label = __begin_current_label_crit_section();
+- error = aa_file_perm(op, label, file, mask, in_atomic);
++ error = aa_file_perm(op, current_cred(), label, file, mask, in_atomic);
+ __end_current_label_crit_section(label);
+
+ return error;
+@@ -585,23 +597,42 @@ static int apparmor_sb_mount(const char *dev_name, const struct path *path,
+ label = __begin_current_label_crit_section();
+ if (!unconfined(label)) {
+ if (flags & MS_REMOUNT)
+- error = aa_remount(label, path, flags, data);
++ error = aa_remount(current_cred(), label, path, flags,
++ data);
+ else if (flags & MS_BIND)
+- error = aa_bind_mount(label, path, dev_name, flags);
++ error = aa_bind_mount(current_cred(), label, path,
++ dev_name, flags);
+ else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE |
+ MS_UNBINDABLE))
+- error = aa_mount_change_type(label, path, flags);
++ error = aa_mount_change_type(current_cred(), label,
++ path, flags);
+ else if (flags & MS_MOVE)
+- error = aa_move_mount(label, path, dev_name);
++ error = aa_move_mount_old(current_cred(), label, path,
++ dev_name);
+ else
+- error = aa_new_mount(label, dev_name, path, type,
+- flags, data);
++ error = aa_new_mount(current_cred(), label, dev_name,
++ path, type, flags, data);
+ }
+ __end_current_label_crit_section(label);
+
+ return error;
+ }
+
++static int apparmor_move_mount(const struct path *from_path,
++ const struct path *to_path)
++{
++ struct aa_label *label;
++ int error = 0;
++
++ label = __begin_current_label_crit_section();
++ if (!unconfined(label))
++ error = aa_move_mount(current_cred(), label, from_path,
++ to_path);
++ __end_current_label_crit_section(label);
++
++ return error;
++}
++
+ static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
+ {
+ struct aa_label *label;
+@@ -609,7 +640,7 @@ static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
+
+ label = __begin_current_label_crit_section();
+ if (!unconfined(label))
+- error = aa_umount(label, mnt, flags);
++ error = aa_umount(current_cred(), label, mnt, flags);
+ __end_current_label_crit_section(label);
+
+ return error;
+@@ -623,7 +654,7 @@ static int apparmor_sb_pivotroot(const struct path *old_path,
+
+ label = aa_get_current_label();
+ if (!unconfined(label))
+- error = aa_pivotroot(label, old_path, new_path);
++ error = aa_pivotroot(current_cred(), label, old_path, new_path);
+ aa_put_label(label);
+
+ return error;
+@@ -662,7 +693,7 @@ static int apparmor_setprocattr(const char *name, void *value,
+ char *command, *largs = NULL, *args = value;
+ size_t arg_size;
+ int error;
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE,
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE,
+ OP_SETPROCATTR);
+
+ if (size == 0)
+@@ -722,11 +753,11 @@ out:
+ return error;
+
+ fail:
+- aad(&sa)->label = begin_current_label_crit_section();
+- aad(&sa)->info = name;
+- aad(&sa)->error = error = -EINVAL;
+- aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
+- end_current_label_crit_section(aad(&sa)->label);
++ ad.subj_label = begin_current_label_crit_section();
++ ad.info = name;
++ ad.error = error = -EINVAL;
++ aa_audit_msg(AUDIT_APPARMOR_DENIED, &ad, NULL);
++ end_current_label_crit_section(ad.subj_label);
+ goto out;
+ }
+
+@@ -785,7 +816,8 @@ static int apparmor_task_setrlimit(struct task_struct *task,
+ int error = 0;
+
+ if (!unconfined(label))
+- error = aa_task_setrlimit(label, task, resource, new_rlim);
++ error = aa_task_setrlimit(current_cred(), label, task,
++ resource, new_rlim);
+ __end_current_label_crit_section(label);
+
+ return error;
+@@ -794,26 +826,27 @@ static int apparmor_task_setrlimit(struct task_struct *task,
+ static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo *info,
+ int sig, const struct cred *cred)
+ {
++ const struct cred *tc;
+ struct aa_label *cl, *tl;
+ int error;
+
++ tc = get_task_cred(target);
++ tl = aa_get_newest_cred_label(tc);
+ if (cred) {
+ /*
+ * Dealing with USB IO specific behavior
+ */
+ cl = aa_get_newest_cred_label(cred);
+- tl = aa_get_task_label(target);
+- error = aa_may_signal(cl, tl, sig);
++ error = aa_may_signal(cred, cl, tc, tl, sig);
+ aa_put_label(cl);
+- aa_put_label(tl);
+ return error;
++ } else {
++ cl = __begin_current_label_crit_section();
++ error = aa_may_signal(current_cred(), cl, tc, tl, sig);
++ __end_current_label_crit_section(cl);
+ }
+-
+- cl = __begin_current_label_crit_section();
+- tl = aa_get_task_label(target);
+- error = aa_may_signal(cl, tl, sig);
+ aa_put_label(tl);
+- __end_current_label_crit_section(cl);
++ put_cred(tc);
+
+ return error;
+ }
+@@ -879,7 +912,8 @@ static int apparmor_socket_create(int family, int type, int protocol, int kern)
+ if (!(kern || unconfined(label)))
+ error = af_select(family,
+ create_perm(label, family, type, protocol),
+- aa_af_perm(label, OP_CREATE, AA_MAY_CREATE,
++ aa_af_perm(current_cred(), label,
++ OP_CREATE, AA_MAY_CREATE,
+ family, type, protocol));
+ end_current_label_crit_section(label);
+
+@@ -1221,6 +1255,7 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(capget, apparmor_capget),
+ LSM_HOOK_INIT(capable, apparmor_capable),
+
++ LSM_HOOK_INIT(move_mount, apparmor_move_mount),
+ LSM_HOOK_INIT(sb_mount, apparmor_sb_mount),
+ LSM_HOOK_INIT(sb_umount, apparmor_sb_umount),
+ LSM_HOOK_INIT(sb_pivotroot, apparmor_sb_pivotroot),
+diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
+index cdfa430ae2161..f2a114e540079 100644
+--- a/security/apparmor/mount.c
++++ b/security/apparmor/mount.c
+@@ -86,32 +86,34 @@ static void audit_mnt_flags(struct audit_buffer *ab, unsigned long flags)
+ static void audit_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->mnt.type) {
++ if (ad->mnt.type) {
+ audit_log_format(ab, " fstype=");
+- audit_log_untrustedstring(ab, aad(sa)->mnt.type);
++ audit_log_untrustedstring(ab, ad->mnt.type);
+ }
+- if (aad(sa)->mnt.src_name) {
++ if (ad->mnt.src_name) {
+ audit_log_format(ab, " srcname=");
+- audit_log_untrustedstring(ab, aad(sa)->mnt.src_name);
++ audit_log_untrustedstring(ab, ad->mnt.src_name);
+ }
+- if (aad(sa)->mnt.trans) {
++ if (ad->mnt.trans) {
+ audit_log_format(ab, " trans=");
+- audit_log_untrustedstring(ab, aad(sa)->mnt.trans);
++ audit_log_untrustedstring(ab, ad->mnt.trans);
+ }
+- if (aad(sa)->mnt.flags) {
++ if (ad->mnt.flags) {
+ audit_log_format(ab, " flags=\"");
+- audit_mnt_flags(ab, aad(sa)->mnt.flags);
++ audit_mnt_flags(ab, ad->mnt.flags);
+ audit_log_format(ab, "\"");
+ }
+- if (aad(sa)->mnt.data) {
++ if (ad->mnt.data) {
+ audit_log_format(ab, " options=");
+- audit_log_untrustedstring(ab, aad(sa)->mnt.data);
++ audit_log_untrustedstring(ab, ad->mnt.data);
+ }
+ }
+
+ /**
+ * audit_mount - handle the auditing of mount operations
++ * @subj_cred: cred of the subject
+ * @profile: the profile being enforced (NOT NULL)
+ * @op: operation being mediated (NOT NULL)
+ * @name: name of object being mediated (MAYBE NULL)
+@@ -127,14 +129,15 @@ static void audit_cb(struct audit_buffer *ab, void *va)
+ *
+ * Returns: %0 or error on failure
+ */
+-static int audit_mount(struct aa_profile *profile, const char *op,
++static int audit_mount(const struct cred *subj_cred,
++ struct aa_profile *profile, const char *op,
+ const char *name, const char *src_name,
+ const char *type, const char *trans,
+ unsigned long flags, const void *data, u32 request,
+ struct aa_perms *perms, const char *info, int error)
+ {
+ int audit_type = AUDIT_APPARMOR_AUTO;
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_MOUNT, op);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_MOUNT, op);
+
+ if (likely(!error)) {
+ u32 mask = perms->audit;
+@@ -165,17 +168,18 @@ static int audit_mount(struct aa_profile *profile, const char *op,
+ return error;
+ }
+
+- aad(&sa)->name = name;
+- aad(&sa)->mnt.src_name = src_name;
+- aad(&sa)->mnt.type = type;
+- aad(&sa)->mnt.trans = trans;
+- aad(&sa)->mnt.flags = flags;
++ ad.subj_cred = subj_cred;
++ ad.name = name;
++ ad.mnt.src_name = src_name;
++ ad.mnt.type = type;
++ ad.mnt.trans = trans;
++ ad.mnt.flags = flags;
+ if (data && (perms->audit & AA_AUDIT_DATA))
+- aad(&sa)->mnt.data = data;
+- aad(&sa)->info = info;
+- aad(&sa)->error = error;
++ ad.mnt.data = data;
++ ad.info = info;
++ ad.error = error;
+
+- return aa_audit(audit_type, profile, &sa, audit_cb);
++ return aa_audit(audit_type, profile, &ad, audit_cb);
+ }
+
+ /**
+@@ -283,6 +287,7 @@ static int path_flags(struct aa_profile *profile, const struct path *path)
+
+ /**
+ * match_mnt_path_str - handle path matching for mount
++ * @subj_cred: cred of confined subject
+ * @profile: the confining profile
+ * @mntpath: for the mntpnt (NOT NULL)
+ * @buffer: buffer to be used to lookup mntpath
+@@ -295,7 +300,8 @@ static int path_flags(struct aa_profile *profile, const struct path *path)
+ *
+ * Returns: 0 on success else error
+ */
+-static int match_mnt_path_str(struct aa_profile *profile,
++static int match_mnt_path_str(const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const struct path *mntpath, char *buffer,
+ const char *devname, const char *type,
+ unsigned long flags, void *data, bool binary,
+@@ -336,12 +342,14 @@ static int match_mnt_path_str(struct aa_profile *profile,
+ error = 0;
+
+ audit:
+- return audit_mount(profile, OP_MOUNT, mntpnt, devname, type, NULL,
++ return audit_mount(subj_cred, profile, OP_MOUNT, mntpnt, devname,
++ type, NULL,
+ flags, data, AA_MAY_MOUNT, &perms, info, error);
+ }
+
+ /**
+ * match_mnt - handle path matching for mount
++ * @subj_cred: cred of the subject
+ * @profile: the confining profile
+ * @path: for the mntpnt (NOT NULL)
+ * @buffer: buffer to be used to lookup mntpath
+@@ -354,7 +362,8 @@ audit:
+ *
+ * Returns: 0 on success else error
+ */
+-static int match_mnt(struct aa_profile *profile, const struct path *path,
++static int match_mnt(const struct cred *subj_cred,
++ struct aa_profile *profile, const struct path *path,
+ char *buffer, const struct path *devpath, char *devbuffer,
+ const char *type, unsigned long flags, void *data,
+ bool binary)
+@@ -378,11 +387,12 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
+ devname = ERR_PTR(error);
+ }
+
+- return match_mnt_path_str(profile, path, buffer, devname, type, flags,
+- data, binary, info);
++ return match_mnt_path_str(subj_cred, profile, path, buffer, devname,
++ type, flags, data, binary, info);
+ }
+
+-int aa_remount(struct aa_label *label, const struct path *path,
++int aa_remount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ unsigned long flags, void *data)
+ {
+ struct aa_profile *profile;
+@@ -399,14 +409,16 @@ int aa_remount(struct aa_label *label, const struct path *path,
+ if (!buffer)
+ return -ENOMEM;
+ error = fn_for_each_confined(label, profile,
+- match_mnt(profile, path, buffer, NULL, NULL, NULL,
++ match_mnt(subj_cred, profile, path, buffer, NULL,
++ NULL, NULL,
+ flags, data, binary));
+ aa_put_buffer(buffer);
+
+ return error;
+ }
+
+-int aa_bind_mount(struct aa_label *label, const struct path *path,
++int aa_bind_mount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ const char *dev_name, unsigned long flags)
+ {
+ struct aa_profile *profile;
+@@ -433,8 +445,8 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
+ goto out;
+
+ error = fn_for_each_confined(label, profile,
+- match_mnt(profile, path, buffer, &old_path, old_buffer,
+- NULL, flags, NULL, false));
++ match_mnt(subj_cred, profile, path, buffer, &old_path,
++ old_buffer, NULL, flags, NULL, false));
+ out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(old_buffer);
+@@ -443,7 +455,8 @@ out:
+ return error;
+ }
+
+-int aa_mount_change_type(struct aa_label *label, const struct path *path,
++int aa_mount_change_type(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ unsigned long flags)
+ {
+ struct aa_profile *profile;
+@@ -461,50 +474,63 @@ int aa_mount_change_type(struct aa_label *label, const struct path *path,
+ if (!buffer)
+ return -ENOMEM;
+ error = fn_for_each_confined(label, profile,
+- match_mnt(profile, path, buffer, NULL, NULL, NULL,
++ match_mnt(subj_cred, profile, path, buffer, NULL,
++ NULL, NULL,
+ flags, NULL, false));
+ aa_put_buffer(buffer);
+
+ return error;
+ }
+
+-int aa_move_mount(struct aa_label *label, const struct path *path,
+- const char *orig_name)
++int aa_move_mount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *from_path,
++ const struct path *to_path)
+ {
+ struct aa_profile *profile;
+- char *buffer = NULL, *old_buffer = NULL;
+- struct path old_path;
++ char *to_buffer = NULL, *from_buffer = NULL;
+ int error;
+
+ AA_BUG(!label);
+- AA_BUG(!path);
++ AA_BUG(!from_path);
++ AA_BUG(!to_path);
++
++ to_buffer = aa_get_buffer(false);
++ from_buffer = aa_get_buffer(false);
++ error = -ENOMEM;
++ if (!to_buffer || !from_buffer)
++ goto out;
++ error = fn_for_each_confined(label, profile,
++ match_mnt(subj_cred, profile, to_path, to_buffer,
++ from_path, from_buffer,
++ NULL, MS_MOVE, NULL, false));
++out:
++ aa_put_buffer(to_buffer);
++ aa_put_buffer(from_buffer);
++
++ return error;
++}
++
++int aa_move_mount_old(const struct cred *subj_cred, struct aa_label *label,
++ const struct path *path, const char *orig_name)
++{
++ struct path old_path;
++ int error;
+
+ if (!orig_name || !*orig_name)
+ return -EINVAL;
+-
+ error = kern_path(orig_name, LOOKUP_FOLLOW, &old_path);
+ if (error)
+ return error;
+
+- buffer = aa_get_buffer(false);
+- old_buffer = aa_get_buffer(false);
+- error = -ENOMEM;
+- if (!buffer || !old_buffer)
+- goto out;
+- error = fn_for_each_confined(label, profile,
+- match_mnt(profile, path, buffer, &old_path, old_buffer,
+- NULL, MS_MOVE, NULL, false));
+-out:
+- aa_put_buffer(buffer);
+- aa_put_buffer(old_buffer);
++ error = aa_move_mount(subj_cred, label, &old_path, path);
+ path_put(&old_path);
+
+ return error;
+ }
+
+-int aa_new_mount(struct aa_label *label, const char *dev_name,
+- const struct path *path, const char *type, unsigned long flags,
+- void *data)
++int aa_new_mount(const struct cred *subj_cred, struct aa_label *label,
++ const char *dev_name, const struct path *path,
++ const char *type, unsigned long flags, void *data)
+ {
+ struct aa_profile *profile;
+ char *buffer = NULL, *dev_buffer = NULL;
+@@ -549,12 +575,14 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
+ goto out;
+ }
+ error = fn_for_each_confined(label, profile,
+- match_mnt(profile, path, buffer, dev_path, dev_buffer,
++ match_mnt(subj_cred, profile, path, buffer,
++ dev_path, dev_buffer,
+ type, flags, data, binary));
+ } else {
+ error = fn_for_each_confined(label, profile,
+- match_mnt_path_str(profile, path, buffer, dev_name,
+- type, flags, data, binary, NULL));
++ match_mnt_path_str(subj_cred, profile, path,
++ buffer, dev_name,
++ type, flags, data, binary, NULL));
+ }
+
+ out:
+@@ -566,7 +594,8 @@ out:
+ return error;
+ }
+
+-static int profile_umount(struct aa_profile *profile, const struct path *path,
++static int profile_umount(const struct cred *subj_cred,
++ struct aa_profile *profile, const struct path *path,
+ char *buffer)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+@@ -595,11 +624,13 @@ static int profile_umount(struct aa_profile *profile, const struct path *path,
+ error = -EACCES;
+
+ audit:
+- return audit_mount(profile, OP_UMOUNT, name, NULL, NULL, NULL, 0, NULL,
++ return audit_mount(subj_cred, profile, OP_UMOUNT, name, NULL, NULL,
++ NULL, 0, NULL,
+ AA_MAY_UMOUNT, &perms, info, error);
+ }
+
+-int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
++int aa_umount(const struct cred *subj_cred, struct aa_label *label,
++ struct vfsmount *mnt, int flags)
+ {
+ struct aa_profile *profile;
+ char *buffer = NULL;
+@@ -614,7 +645,7 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
+ return -ENOMEM;
+
+ error = fn_for_each_confined(label, profile,
+- profile_umount(profile, &path, buffer));
++ profile_umount(subj_cred, profile, &path, buffer));
+ aa_put_buffer(buffer);
+
+ return error;
+@@ -624,7 +655,8 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
+ *
+ * Returns: label for transition or ERR_PTR. Does not return NULL
+ */
+-static struct aa_label *build_pivotroot(struct aa_profile *profile,
++static struct aa_label *build_pivotroot(const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const struct path *new_path,
+ char *new_buffer,
+ const struct path *old_path,
+@@ -669,7 +701,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
+ error = 0;
+
+ audit:
+- error = audit_mount(profile, OP_PIVOTROOT, new_name, old_name,
++ error = audit_mount(subj_cred, profile, OP_PIVOTROOT, new_name,
++ old_name,
+ NULL, trans_name, 0, NULL, AA_MAY_PIVOTROOT,
+ &perms, info, error);
+ if (error)
+@@ -678,7 +711,8 @@ audit:
+ return aa_get_newest_label(&profile->label);
+ }
+
+-int aa_pivotroot(struct aa_label *label, const struct path *old_path,
++int aa_pivotroot(const struct cred *subj_cred, struct aa_label *label,
++ const struct path *old_path,
+ const struct path *new_path)
+ {
+ struct aa_profile *profile;
+@@ -696,7 +730,8 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
+ if (!old_buffer || !new_buffer)
+ goto out;
+ target = fn_label_build(label, profile, GFP_KERNEL,
+- build_pivotroot(profile, new_path, new_buffer,
++ build_pivotroot(subj_cred, profile, new_path,
++ new_buffer,
+ old_path, old_buffer));
+ if (!target) {
+ info = "label build failed";
+@@ -722,7 +757,8 @@ out:
+ fail:
+ /* TODO: add back in auditing of new_name and old_name */
+ error = fn_for_each(label, profile,
+- audit_mount(profile, OP_PIVOTROOT, NULL /*new_name */,
++ audit_mount(subj_cred, profile, OP_PIVOTROOT,
++ NULL /*new_name */,
+ NULL /* old_name */,
+ NULL, NULL,
+ 0, NULL, AA_MAY_PIVOTROOT, &nullperms, info,
+diff --git a/security/apparmor/net.c b/security/apparmor/net.c
+index 788be1609a865..704c171232ab4 100644
+--- a/security/apparmor/net.c
++++ b/security/apparmor/net.c
+@@ -71,6 +71,7 @@ static const char * const net_mask_names[] = {
+ void audit_net_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+ if (address_family_names[sa->u.net->family])
+ audit_log_format(ab, " family=\"%s\"",
+@@ -78,35 +79,36 @@ void audit_net_cb(struct audit_buffer *ab, void *va)
+ else
+ audit_log_format(ab, " family=\"unknown(%d)\"",
+ sa->u.net->family);
+- if (sock_type_names[aad(sa)->net.type])
++ if (sock_type_names[ad->net.type])
+ audit_log_format(ab, " sock_type=\"%s\"",
+- sock_type_names[aad(sa)->net.type]);
++ sock_type_names[ad->net.type]);
+ else
+ audit_log_format(ab, " sock_type=\"unknown(%d)\"",
+- aad(sa)->net.type);
+- audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
++ ad->net.type);
++ audit_log_format(ab, " protocol=%d", ad->net.protocol);
+
+- if (aad(sa)->request & NET_PERMS_MASK) {
++ if (ad->request & NET_PERMS_MASK) {
+ audit_log_format(ab, " requested_mask=");
+- aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
++ aa_audit_perm_mask(ab, ad->request, NULL, 0,
+ net_mask_names, NET_PERMS_MASK);
+
+- if (aad(sa)->denied & NET_PERMS_MASK) {
++ if (ad->denied & NET_PERMS_MASK) {
+ audit_log_format(ab, " denied_mask=");
+- aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
++ aa_audit_perm_mask(ab, ad->denied, NULL, 0,
+ net_mask_names, NET_PERMS_MASK);
+ }
+ }
+- if (aad(sa)->peer) {
++ if (ad->peer) {
+ audit_log_format(ab, " peer=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+ }
+
+ /* Generic af perm */
+-int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
+- u32 request, u16 family, int type)
++int aa_profile_af_perm(struct aa_profile *profile,
++ struct apparmor_audit_data *ad, u32 request, u16 family,
++ int type)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules), list);
+@@ -130,21 +132,23 @@ int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
+ perms = *aa_lookup_perms(&rules->policy, state);
+ aa_apply_modes_to_perms(profile, &perms);
+
+- return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
++ return aa_check_perms(profile, &perms, request, ad, audit_net_cb);
+ }
+
+-int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
+- int type, int protocol)
++int aa_af_perm(const struct cred *subj_cred, struct aa_label *label,
++ const char *op, u32 request, u16 family, int type, int protocol)
+ {
+ struct aa_profile *profile;
+- DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
++ DEFINE_AUDIT_NET(ad, op, NULL, family, type, protocol);
+
+ return fn_for_each_confined(label, profile,
+- aa_profile_af_perm(profile, &sa, request, family,
++ aa_profile_af_perm(profile, &ad, request, family,
+ type));
+ }
+
+-static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
++static int aa_label_sk_perm(const struct cred *subj_cred,
++ struct aa_label *label,
++ const char *op, u32 request,
+ struct sock *sk)
+ {
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+@@ -155,10 +159,11 @@ static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
+
+ if (ctx->label != kernel_t && !unconfined(label)) {
+ struct aa_profile *profile;
+- DEFINE_AUDIT_SK(sa, op, sk);
++ DEFINE_AUDIT_SK(ad, op, sk);
+
++ ad.subj_cred = subj_cred;
+ error = fn_for_each_confined(label, profile,
+- aa_profile_af_sk_perm(profile, &sa, request, sk));
++ aa_profile_af_sk_perm(profile, &ad, request, sk));
+ }
+
+ return error;
+@@ -174,21 +179,21 @@ int aa_sk_perm(const char *op, u32 request, struct sock *sk)
+
+ /* TODO: switch to begin_current_label ???? */
+ label = begin_current_label_crit_section();
+- error = aa_label_sk_perm(label, op, request, sk);
++ error = aa_label_sk_perm(current_cred(), label, op, request, sk);
+ end_current_label_crit_section(label);
+
+ return error;
+ }
+
+
+-int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
+- struct socket *sock)
++int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label,
++ const char *op, u32 request, struct socket *sock)
+ {
+ AA_BUG(!label);
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+
+- return aa_label_sk_perm(label, op, request, sock->sk);
++ return aa_label_sk_perm(subj_cred, label, op, request, sock->sk);
+ }
+
+ #ifdef CONFIG_NETWORK_SECMARK
+@@ -214,7 +219,7 @@ static int apparmor_secmark_init(struct aa_secmark *secmark)
+ }
+
+ static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
+- struct common_audit_data *sa)
++ struct apparmor_audit_data *ad)
+ {
+ int i, ret;
+ struct aa_perms perms = { };
+@@ -245,17 +250,17 @@ static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
+
+ aa_apply_modes_to_perms(profile, &perms);
+
+- return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
++ return aa_check_perms(profile, &perms, request, ad, audit_net_cb);
+ }
+
+ int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
+ u32 secid, const struct sock *sk)
+ {
+ struct aa_profile *profile;
+- DEFINE_AUDIT_SK(sa, op, sk);
++ DEFINE_AUDIT_SK(ad, op, sk);
+
+ return fn_for_each_confined(label, profile,
+ aa_secmark_perm(profile, request, secid,
+- &sa));
++ &ad));
+ }
+ #endif
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index b38f7b2a5e1d5..8a07793ce1032 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -255,6 +255,7 @@ void aa_free_profile(struct aa_profile *profile)
+
+ aa_put_ns(profile->ns);
+ kfree_sensitive(profile->rename);
++ kfree_sensitive(profile->disconnected);
+
+ free_attachment(&profile->attach);
+
+@@ -285,6 +286,7 @@ void aa_free_profile(struct aa_profile *profile)
+ /**
+ * aa_alloc_profile - allocate, initialize and return a new profile
+ * @hname: name of the profile (NOT NULL)
++ * @proxy: proxy to use OR null if to allocate a new one
+ * @gfp: allocation type
+ *
+ * Returns: refcount profile or NULL on failure
+@@ -721,16 +723,17 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace,
+ static void audit_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->iface.ns) {
++ if (ad->iface.ns) {
+ audit_log_format(ab, " ns=");
+- audit_log_untrustedstring(ab, aad(sa)->iface.ns);
++ audit_log_untrustedstring(ab, ad->iface.ns);
+ }
+ }
+
+ /**
+ * audit_policy - Do auditing of policy changes
+- * @label: label to check if it can manage policy
++ * @subj_label: label to check if it can manage policy
+ * @op: policy operation being performed
+ * @ns_name: name of namespace being manipulated
+ * @name: name of profile being manipulated (NOT NULL)
+@@ -739,19 +742,19 @@ static void audit_cb(struct audit_buffer *ab, void *va)
+ *
+ * Returns: the error to be returned after audit is done
+ */
+-static int audit_policy(struct aa_label *label, const char *op,
++static int audit_policy(struct aa_label *subj_label, const char *op,
+ const char *ns_name, const char *name,
+ const char *info, int error)
+ {
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op);
+
+- aad(&sa)->iface.ns = ns_name;
+- aad(&sa)->name = name;
+- aad(&sa)->info = info;
+- aad(&sa)->error = error;
+- aad(&sa)->label = label;
++ ad.iface.ns = ns_name;
++ ad.name = name;
++ ad.info = info;
++ ad.error = error;
++ ad.subj_label = subj_label;
+
+- aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, audit_cb);
++ aa_audit_msg(AUDIT_APPARMOR_STATUS, &ad, audit_cb);
+
+ return error;
+ }
+@@ -759,31 +762,35 @@ static int audit_policy(struct aa_label *label, const char *op,
+ /* don't call out to other LSMs in the stack for apparmor policy admin
+ * permissions
+ */
+-static int policy_ns_capable(struct aa_label *label,
++static int policy_ns_capable(const struct cred *subj_cred,
++ struct aa_label *label,
+ struct user_namespace *userns, int cap)
+ {
+ int err;
+
+ /* check for MAC_ADMIN cap in cred */
+- err = cap_capable(current_cred(), userns, cap, CAP_OPT_NONE);
++ err = cap_capable(subj_cred, userns, cap, CAP_OPT_NONE);
+ if (!err)
+- err = aa_capable(label, cap, CAP_OPT_NONE);
++ err = aa_capable(subj_cred, label, cap, CAP_OPT_NONE);
+
+ return err;
+ }
+
+ /**
+ * aa_policy_view_capable - check if viewing policy in at @ns is allowed
+- * label: label that is trying to view policy in ns
+- * ns: namespace being viewed by @label (may be NULL if @label's ns)
++ * @subj_cred: cred of subject
++ * @label: label that is trying to view policy in ns
++ * @ns: namespace being viewed by @label (may be NULL if @label's ns)
++ *
+ * Returns: true if viewing policy is allowed
+ *
+ * If @ns is NULL then the namespace being viewed is assumed to be the
+ * tasks current namespace.
+ */
+-bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
++bool aa_policy_view_capable(const struct cred *subj_cred,
++ struct aa_label *label, struct aa_ns *ns)
+ {
+- struct user_namespace *user_ns = current_user_ns();
++ struct user_namespace *user_ns = subj_cred->user_ns;
+ struct aa_ns *view_ns = labels_view(label);
+ bool root_in_user_ns = uid_eq(current_euid(), make_kuid(user_ns, 0)) ||
+ in_egroup_p(make_kgid(user_ns, 0));
+@@ -800,15 +807,17 @@ bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
+ return response;
+ }
+
+-bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns)
++bool aa_policy_admin_capable(const struct cred *subj_cred,
++ struct aa_label *label, struct aa_ns *ns)
+ {
+- struct user_namespace *user_ns = current_user_ns();
+- bool capable = policy_ns_capable(label, user_ns, CAP_MAC_ADMIN) == 0;
++ struct user_namespace *user_ns = subj_cred->user_ns;
++ bool capable = policy_ns_capable(subj_cred, label, user_ns,
++ CAP_MAC_ADMIN) == 0;
+
+ AA_DEBUG("cap_mac_admin? %d\n", capable);
+ AA_DEBUG("policy locked? %d\n", aa_g_lock_policy);
+
+- return aa_policy_view_capable(label, ns) && capable &&
++ return aa_policy_view_capable(subj_cred, label, ns) && capable &&
+ !aa_g_lock_policy;
+ }
+
+@@ -818,7 +827,7 @@ bool aa_current_policy_view_capable(struct aa_ns *ns)
+ bool res;
+
+ label = __begin_current_label_crit_section();
+- res = aa_policy_view_capable(label, ns);
++ res = aa_policy_view_capable(current_cred(), label, ns);
+ __end_current_label_crit_section(label);
+
+ return res;
+@@ -830,7 +839,7 @@ bool aa_current_policy_admin_capable(struct aa_ns *ns)
+ bool res;
+
+ label = __begin_current_label_crit_section();
+- res = aa_policy_admin_capable(label, ns);
++ res = aa_policy_admin_capable(current_cred(), label, ns);
+ __end_current_label_crit_section(label);
+
+ return res;
+@@ -838,12 +847,15 @@ bool aa_current_policy_admin_capable(struct aa_ns *ns)
+
+ /**
+ * aa_may_manage_policy - can the current task manage policy
++ * @subj_cred; subjects cred
+ * @label: label to check if it can manage policy
++ * @ns: namespace being managed by @label (may be NULL if @label's ns)
+ * @mask: contains the policy manipulation operation being done
+ *
+ * Returns: 0 if the task is allowed to manipulate policy else error
+ */
+-int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
++int aa_may_manage_policy(const struct cred *subj_cred, struct aa_label *label,
++ struct aa_ns *ns, u32 mask)
+ {
+ const char *op;
+
+@@ -859,7 +871,7 @@ int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
+ return audit_policy(label, op, NULL, NULL, "policy_locked",
+ -EACCES);
+
+- if (!aa_policy_admin_capable(label, ns))
++ if (!aa_policy_admin_capable(subj_cred, label, ns))
+ return audit_policy(label, op, NULL, NULL, "not policy admin",
+ -EACCES);
+
+@@ -950,11 +962,11 @@ static void __replace_profile(struct aa_profile *old, struct aa_profile *new)
+
+ /**
+ * __lookup_replace - lookup replacement information for a profile
+- * @ns - namespace the lookup occurs in
+- * @hname - name of profile to lookup
+- * @noreplace - true if not replacing an existing profile
+- * @p - Returns: profile to be replaced
+- * @info - Returns: info string on why lookup failed
++ * @ns: namespace the lookup occurs in
++ * @hname: name of profile to lookup
++ * @noreplace: true if not replacing an existing profile
++ * @p: Returns - profile to be replaced
++ * @info: Returns - info string on why lookup failed
+ *
+ * Returns: profile to replace (no ref) on success else ptr error
+ */
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 8b8846073e142..dbc83455d900e 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -34,17 +34,18 @@
+ static void audit_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->iface.ns) {
++ if (ad->iface.ns) {
+ audit_log_format(ab, " ns=");
+- audit_log_untrustedstring(ab, aad(sa)->iface.ns);
++ audit_log_untrustedstring(ab, ad->iface.ns);
+ }
+- if (aad(sa)->name) {
++ if (ad->name) {
+ audit_log_format(ab, " name=");
+- audit_log_untrustedstring(ab, aad(sa)->name);
++ audit_log_untrustedstring(ab, ad->name);
+ }
+- if (aad(sa)->iface.pos)
+- audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
++ if (ad->iface.pos)
++ audit_log_format(ab, " offset=%ld", ad->iface.pos);
+ }
+
+ /**
+@@ -63,18 +64,18 @@ static int audit_iface(struct aa_profile *new, const char *ns_name,
+ int error)
+ {
+ struct aa_profile *profile = labels_profile(aa_current_raw_label());
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
+ if (e)
+- aad(&sa)->iface.pos = e->pos - e->start;
+- aad(&sa)->iface.ns = ns_name;
++ ad.iface.pos = e->pos - e->start;
++ ad.iface.ns = ns_name;
+ if (new)
+- aad(&sa)->name = new->base.hname;
++ ad.name = new->base.hname;
+ else
+- aad(&sa)->name = name;
+- aad(&sa)->info = info;
+- aad(&sa)->error = error;
++ ad.name = name;
++ ad.info = info;
++ ad.error = error;
+
+- return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
++ return aa_audit(AUDIT_APPARMOR_STATUS, profile, &ad, audit_cb);
+ }
+
+ void __aa_loaddata_update(struct aa_loaddata *data, long revision)
+@@ -807,7 +808,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ const char *info = "failed to unpack profile";
+ size_t ns_len;
+ struct rhashtable_params params = { 0 };
+- char *key = NULL;
++ char *key = NULL, *disconnected = NULL;
+ struct aa_data *data;
+ int error = -EPROTO;
+ kernel_cap_t tmpcap;
+@@ -873,7 +874,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ }
+
+ /* disconnected attachment string is optional */
+- (void) aa_unpack_str(e, &profile->disconnected, "disconnected");
++ (void) aa_unpack_strdup(e, &disconnected, "disconnected");
++ profile->disconnected = disconnected;
+
+ /* per profile debug flags (complain, audit) */
+ if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
+diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
+index e859481648962..dcc94c3153d51 100644
+--- a/security/apparmor/resource.c
++++ b/security/apparmor/resource.c
+@@ -30,18 +30,20 @@ struct aa_sfs_entry aa_sfs_entry_rlimit[] = {
+ static void audit_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+ audit_log_format(ab, " rlimit=%s value=%lu",
+- rlim_names[aad(sa)->rlim.rlim], aad(sa)->rlim.max);
+- if (aad(sa)->peer) {
++ rlim_names[ad->rlim.rlim], ad->rlim.max);
++ if (ad->peer) {
+ audit_log_format(ab, " peer=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+ }
+
+ /**
+ * audit_resource - audit setting resource limit
++ * @subj_cred: cred setting the resource
+ * @profile: profile being enforced (NOT NULL)
+ * @resource: rlimit being auditing
+ * @value: value being set
+@@ -49,22 +51,24 @@ static void audit_cb(struct audit_buffer *ab, void *va)
+ * @info: info being auditing
+ * @error: error value
+ *
+- * Returns: 0 or sa->error else other error code on failure
++ * Returns: 0 or ad->error else other error code on failure
+ */
+-static int audit_resource(struct aa_profile *profile, unsigned int resource,
++static int audit_resource(const struct cred *subj_cred,
++ struct aa_profile *profile, unsigned int resource,
+ unsigned long value, struct aa_label *peer,
+ const char *info, int error)
+ {
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_RLIMITS,
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_RLIMITS,
+ OP_SETRLIMIT);
+
+- aad(&sa)->rlim.rlim = resource;
+- aad(&sa)->rlim.max = value;
+- aad(&sa)->peer = peer;
+- aad(&sa)->info = info;
+- aad(&sa)->error = error;
++ ad.subj_cred = subj_cred;
++ ad.rlim.rlim = resource;
++ ad.rlim.max = value;
++ ad.peer = peer;
++ ad.info = info;
++ ad.error = error;
+
+- return aa_audit(AUDIT_APPARMOR_AUTO, profile, &sa, audit_cb);
++ return aa_audit(AUDIT_APPARMOR_AUTO, profile, &ad, audit_cb);
+ }
+
+ /**
+@@ -81,7 +85,8 @@ int aa_map_resource(int resource)
+ return rlim_map[resource];
+ }
+
+-static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
++static int profile_setrlimit(const struct cred *subj_cred,
++ struct aa_profile *profile, unsigned int resource,
+ struct rlimit *new_rlim)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+@@ -91,22 +96,24 @@ static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
+ if (rules->rlimits.mask & (1 << resource) && new_rlim->rlim_max >
+ rules->rlimits.limits[resource].rlim_max)
+ e = -EACCES;
+- return audit_resource(profile, resource, new_rlim->rlim_max, NULL, NULL,
+- e);
++ return audit_resource(subj_cred, profile, resource, new_rlim->rlim_max,
++ NULL, NULL, e);
+ }
+
+ /**
+ * aa_task_setrlimit - test permission to set an rlimit
+- * @label - label confining the task (NOT NULL)
+- * @task - task the resource is being set on
+- * @resource - the resource being set
+- * @new_rlim - the new resource limit (NOT NULL)
++ * @subj_cred: cred setting the limit
++ * @label: label confining the task (NOT NULL)
++ * @task: task the resource is being set on
++ * @resource: the resource being set
++ * @new_rlim: the new resource limit (NOT NULL)
+ *
+ * Control raising the processes hard limit.
+ *
+ * Returns: 0 or error code if setting resource failed
+ */
+-int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
++int aa_task_setrlimit(const struct cred *subj_cred, struct aa_label *label,
++ struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim)
+ {
+ struct aa_profile *profile;
+@@ -125,14 +132,15 @@ int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
+ */
+
+ if (label != peer &&
+- aa_capable(label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
++ aa_capable(subj_cred, label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
+ error = fn_for_each(label, profile,
+- audit_resource(profile, resource,
++ audit_resource(subj_cred, profile, resource,
+ new_rlim->rlim_max, peer,
+ "cap_sys_resource", -EACCES));
+ else
+ error = fn_for_each_confined(label, profile,
+- profile_setrlimit(profile, resource, new_rlim));
++ profile_setrlimit(subj_cred, profile, resource,
++ new_rlim));
+ aa_put_label(peer);
+
+ return error;
+diff --git a/security/apparmor/task.c b/security/apparmor/task.c
+index 84d16a29bfcbc..0d7af707cccdd 100644
+--- a/security/apparmor/task.c
++++ b/security/apparmor/task.c
+@@ -208,70 +208,75 @@ static const char *audit_ptrace_mask(u32 mask)
+ static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->request & AA_PTRACE_PERM_MASK) {
++ if (ad->request & AA_PTRACE_PERM_MASK) {
+ audit_log_format(ab, " requested_mask=\"%s\"",
+- audit_ptrace_mask(aad(sa)->request));
++ audit_ptrace_mask(ad->request));
+
+- if (aad(sa)->denied & AA_PTRACE_PERM_MASK) {
++ if (ad->denied & AA_PTRACE_PERM_MASK) {
+ audit_log_format(ab, " denied_mask=\"%s\"",
+- audit_ptrace_mask(aad(sa)->denied));
++ audit_ptrace_mask(ad->denied));
+ }
+ }
+ audit_log_format(ab, " peer=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+
+ /* assumes check for RULE_MEDIATES is already done */
+ /* TODO: conditionals */
+-static int profile_ptrace_perm(struct aa_profile *profile,
+- struct aa_label *peer, u32 request,
+- struct common_audit_data *sa)
++static int profile_ptrace_perm(const struct cred *cred,
++ struct aa_profile *profile,
++ struct aa_label *peer, u32 request,
++ struct apparmor_audit_data *ad)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules), list);
+ struct aa_perms perms = { };
+
+- aad(sa)->peer = peer;
++ ad->subj_cred = cred;
++ ad->peer = peer;
+ aa_profile_match_label(profile, rules, peer, AA_CLASS_PTRACE, request,
+ &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+- return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
++ return aa_check_perms(profile, &perms, request, ad, audit_ptrace_cb);
+ }
+
+-static int profile_tracee_perm(struct aa_profile *tracee,
++static int profile_tracee_perm(const struct cred *cred,
++ struct aa_profile *tracee,
+ struct aa_label *tracer, u32 request,
+- struct common_audit_data *sa)
++ struct apparmor_audit_data *ad)
+ {
+ if (profile_unconfined(tracee) || unconfined(tracer) ||
+ !ANY_RULE_MEDIATES(&tracee->rules, AA_CLASS_PTRACE))
+ return 0;
+
+- return profile_ptrace_perm(tracee, tracer, request, sa);
++ return profile_ptrace_perm(cred, tracee, tracer, request, ad);
+ }
+
+-static int profile_tracer_perm(struct aa_profile *tracer,
++static int profile_tracer_perm(const struct cred *cred,
++ struct aa_profile *tracer,
+ struct aa_label *tracee, u32 request,
+- struct common_audit_data *sa)
++ struct apparmor_audit_data *ad)
+ {
+ if (profile_unconfined(tracer))
+ return 0;
+
+ if (ANY_RULE_MEDIATES(&tracer->rules, AA_CLASS_PTRACE))
+- return profile_ptrace_perm(tracer, tracee, request, sa);
++ return profile_ptrace_perm(cred, tracer, tracee, request, ad);
+
+ /* profile uses the old style capability check for ptrace */
+ if (&tracer->label == tracee)
+ return 0;
+
+- aad(sa)->label = &tracer->label;
+- aad(sa)->peer = tracee;
+- aad(sa)->request = 0;
+- aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE,
+- CAP_OPT_NONE);
++ ad->subj_label = &tracer->label;
++ ad->peer = tracee;
++ ad->request = 0;
++ ad->error = aa_capable(cred, &tracer->label, CAP_SYS_PTRACE,
++ CAP_OPT_NONE);
+
+- return aa_audit(AUDIT_APPARMOR_AUTO, tracer, sa, audit_ptrace_cb);
++ return aa_audit(AUDIT_APPARMOR_AUTO, tracer, ad, audit_ptrace_cb);
+ }
+
+ /**
+@@ -282,7 +287,8 @@ static int profile_tracer_perm(struct aa_profile *tracer,
+ *
+ * Returns: %0 else error code if permission denied or error
+ */
+-int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
++int aa_may_ptrace(const struct cred *tracer_cred, struct aa_label *tracer,
++ const struct cred *tracee_cred, struct aa_label *tracee,
+ u32 request)
+ {
+ struct aa_profile *profile;
+@@ -290,6 +296,8 @@ int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_PTRACE, OP_PTRACE);
+
+ return xcheck_labels(tracer, tracee, profile,
+- profile_tracer_perm(profile, tracee, request, &sa),
+- profile_tracee_perm(profile, tracer, xrequest, &sa));
++ profile_tracer_perm(tracer_cred, profile, tracee,
++ request, &sa),
++ profile_tracee_perm(tracee_cred, profile, tracer,
++ xrequest, &sa));
+ }
+diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
+index 232191ee09e31..b6e074ac02273 100644
+--- a/security/integrity/Kconfig
++++ b/security/integrity/Kconfig
+@@ -68,8 +68,6 @@ config INTEGRITY_MACHINE_KEYRING
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ depends on SYSTEM_BLACKLIST_KEYRING
+ depends on LOAD_UEFI_KEYS || LOAD_PPC_KEYS
+- select INTEGRITY_CA_MACHINE_KEYRING if LOAD_PPC_KEYS
+- select INTEGRITY_CA_MACHINE_KEYRING_MAX if LOAD_PPC_KEYS
+ help
+ If set, provide a keyring to which Machine Owner Keys (MOK) may
+ be added. This keyring shall contain just MOK keys. Unlike keys
+diff --git a/security/integrity/iint.c b/security/integrity/iint.c
+index a462df827de2d..27ea19fb1f54c 100644
+--- a/security/integrity/iint.c
++++ b/security/integrity/iint.c
+@@ -66,9 +66,32 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+ return iint;
+ }
+
+-static void iint_free(struct integrity_iint_cache *iint)
++#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH+1)
++
++/*
++ * It is not clear that IMA should be nested at all, but as long is it measures
++ * files both on overlayfs and on underlying fs, we need to annotate the iint
++ * mutex to avoid lockdep false positives related to IMA + overlayfs.
++ * See ovl_lockdep_annotate_inode_mutex_key() for more details.
++ */
++static inline void iint_lockdep_annotate(struct integrity_iint_cache *iint,
++ struct inode *inode)
++{
++#ifdef CONFIG_LOCKDEP
++ static struct lock_class_key iint_mutex_key[IMA_MAX_NESTING];
++
++ int depth = inode->i_sb->s_stack_depth;
++
++ if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
++ depth = 0;
++
++ lockdep_set_class(&iint->mutex, &iint_mutex_key[depth]);
++#endif
++}
++
++static void iint_init_always(struct integrity_iint_cache *iint,
++ struct inode *inode)
+ {
+- kfree(iint->ima_hash);
+ iint->ima_hash = NULL;
+ iint->version = 0;
+ iint->flags = 0UL;
+@@ -80,6 +103,14 @@ static void iint_free(struct integrity_iint_cache *iint)
+ iint->ima_creds_status = INTEGRITY_UNKNOWN;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ iint->measured_pcrs = 0;
++ mutex_init(&iint->mutex);
++ iint_lockdep_annotate(iint, inode);
++}
++
++static void iint_free(struct integrity_iint_cache *iint)
++{
++ kfree(iint->ima_hash);
++ mutex_destroy(&iint->mutex);
+ kmem_cache_free(iint_cache, iint);
+ }
+
+@@ -104,6 +135,8 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+ if (!iint)
+ return NULL;
+
++ iint_init_always(iint, inode);
++
+ write_lock(&integrity_iint_lock);
+
+ p = &integrity_iint_tree.rb_node;
+@@ -153,25 +186,18 @@ void integrity_inode_free(struct inode *inode)
+ iint_free(iint);
+ }
+
+-static void init_once(void *foo)
++static void iint_init_once(void *foo)
+ {
+ struct integrity_iint_cache *iint = (struct integrity_iint_cache *) foo;
+
+ memset(iint, 0, sizeof(*iint));
+- iint->ima_file_status = INTEGRITY_UNKNOWN;
+- iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+- iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+- iint->ima_read_status = INTEGRITY_UNKNOWN;
+- iint->ima_creds_status = INTEGRITY_UNKNOWN;
+- iint->evm_status = INTEGRITY_UNKNOWN;
+- mutex_init(&iint->mutex);
+ }
+
+ static int __init integrity_iintcache_init(void)
+ {
+ iint_cache =
+ kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+- 0, SLAB_PANIC, init_once);
++ 0, SLAB_PANIC, iint_init_once);
+ return 0;
+ }
+ DEFINE_LSM(integrity) = {
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 452e80b541e54..597ea0c4d72f7 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -243,6 +243,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ {
+ const char *audit_cause = "failed";
+ struct inode *inode = file_inode(file);
++ struct inode *real_inode = d_real_inode(file_dentry(file));
+ const char *filename = file->f_path.dentry->d_name.name;
+ struct ima_max_digest_data hash;
+ struct kstat stat;
+@@ -302,6 +303,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
++ if (real_inode != inode) {
++ iint->real_ino = real_inode->i_ino;
++ iint->real_dev = real_inode->i_sb->s_dev;
++ }
+
+ /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+ if (!result)
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 365db0e43d7c2..cc1217ac2c6fa 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -25,6 +25,7 @@
+ #include <linux/xattr.h>
+ #include <linux/ima.h>
+ #include <linux/fs.h>
++#include <linux/iversion.h>
+
+ #include "ima.h"
+
+@@ -207,7 +208,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ u32 secid, char *buf, loff_t size, int mask,
+ enum ima_hooks func)
+ {
+- struct inode *inode = file_inode(file);
++ struct inode *backing_inode, *inode = file_inode(file);
+ struct integrity_iint_cache *iint = NULL;
+ struct ima_template_desc *template_desc = NULL;
+ char *pathbuf = NULL;
+@@ -284,6 +285,19 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ iint->measured_pcrs = 0;
+ }
+
++ /* Detect and re-evaluate changes made to the backing file. */
++ backing_inode = d_real_inode(file_dentry(file));
++ if (backing_inode != inode &&
++ (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
++ if (!IS_I_VERSION(backing_inode) ||
++ backing_inode->i_sb->s_dev != iint->real_dev ||
++ backing_inode->i_ino != iint->real_ino ||
++ !inode_eq_iversion(backing_inode, iint->version)) {
++ iint->flags &= ~IMA_DONE_MASK;
++ iint->measured_pcrs = 0;
++ }
++ }
++
+ /* Determine if already appraised/measured based on bitmask
+ * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+ * IMA_AUDIT, IMA_AUDITED)
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
+index d7553c93f5c0d..9561db7cf6b42 100644
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -164,6 +164,8 @@ struct integrity_iint_cache {
+ unsigned long flags;
+ unsigned long measured_pcrs;
+ unsigned long atomic_flags;
++ unsigned long real_ino;
++ dev_t real_dev;
+ enum integrity_status ima_file_status:4;
+ enum integrity_status ima_mmap_status:4;
+ enum integrity_status ima_bprm_status:4;
+diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
+index 85fb5c22529a7..fee1ab2c734d3 100644
+--- a/security/keys/trusted-keys/trusted_core.c
++++ b/security/keys/trusted-keys/trusted_core.c
+@@ -358,17 +358,17 @@ static int __init init_trusted(void)
+ if (!get_random)
+ get_random = kernel_get_random;
+
+- static_call_update(trusted_key_seal,
+- trusted_key_sources[i].ops->seal);
+- static_call_update(trusted_key_unseal,
+- trusted_key_sources[i].ops->unseal);
+- static_call_update(trusted_key_get_random,
+- get_random);
+- trusted_key_exit = trusted_key_sources[i].ops->exit;
+- migratable = trusted_key_sources[i].ops->migratable;
+-
+ ret = trusted_key_sources[i].ops->init();
+- if (!ret)
++ if (!ret) {
++ static_call_update(trusted_key_seal, trusted_key_sources[i].ops->seal);
++ static_call_update(trusted_key_unseal, trusted_key_sources[i].ops->unseal);
++ static_call_update(trusted_key_get_random, get_random);
++
++ trusted_key_exit = trusted_key_sources[i].ops->exit;
++ migratable = trusted_key_sources[i].ops->migratable;
++ }
++
++ if (!ret || ret != -ENODEV)
+ break;
+ }
+
+diff --git a/security/keys/trusted-keys/trusted_tee.c b/security/keys/trusted-keys/trusted_tee.c
+index ac3e270ade69b..aa3d477de6db5 100644
+--- a/security/keys/trusted-keys/trusted_tee.c
++++ b/security/keys/trusted-keys/trusted_tee.c
+@@ -65,24 +65,16 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+- struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
++ struct tee_shm *reg_shm = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+- reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+- p->key_len);
+- if (IS_ERR(reg_shm_in)) {
+- dev_err(pvt_data.dev, "key shm register failed\n");
+- return PTR_ERR(reg_shm_in);
+- }
+-
+- reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
+- sizeof(p->blob));
+- if (IS_ERR(reg_shm_out)) {
+- dev_err(pvt_data.dev, "blob shm register failed\n");
+- ret = PTR_ERR(reg_shm_out);
+- goto out;
++ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
++ sizeof(p->key) + sizeof(p->blob));
++ if (IS_ERR(reg_shm)) {
++ dev_err(pvt_data.dev, "shm register failed\n");
++ return PTR_ERR(reg_shm);
+ }
+
+ inv_arg.func = TA_CMD_SEAL;
+@@ -90,13 +82,13 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+- param[0].u.memref.shm = reg_shm_in;
++ param[0].u.memref.shm = reg_shm;
+ param[0].u.memref.size = p->key_len;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+- param[1].u.memref.shm = reg_shm_out;
++ param[1].u.memref.shm = reg_shm;
+ param[1].u.memref.size = sizeof(p->blob);
+- param[1].u.memref.shm_offs = 0;
++ param[1].u.memref.shm_offs = sizeof(p->key);
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+@@ -107,11 +99,7 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ p->blob_len = param[1].u.memref.size;
+ }
+
+-out:
+- if (reg_shm_out)
+- tee_shm_free(reg_shm_out);
+- if (reg_shm_in)
+- tee_shm_free(reg_shm_in);
++ tee_shm_free(reg_shm);
+
+ return ret;
+ }
+@@ -124,24 +112,16 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+- struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
++ struct tee_shm *reg_shm = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+- reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
+- p->blob_len);
+- if (IS_ERR(reg_shm_in)) {
+- dev_err(pvt_data.dev, "blob shm register failed\n");
+- return PTR_ERR(reg_shm_in);
+- }
+-
+- reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+- sizeof(p->key));
+- if (IS_ERR(reg_shm_out)) {
+- dev_err(pvt_data.dev, "key shm register failed\n");
+- ret = PTR_ERR(reg_shm_out);
+- goto out;
++ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
++ sizeof(p->key) + sizeof(p->blob));
++ if (IS_ERR(reg_shm)) {
++ dev_err(pvt_data.dev, "shm register failed\n");
++ return PTR_ERR(reg_shm);
+ }
+
+ inv_arg.func = TA_CMD_UNSEAL;
+@@ -149,11 +129,11 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+- param[0].u.memref.shm = reg_shm_in;
++ param[0].u.memref.shm = reg_shm;
+ param[0].u.memref.size = p->blob_len;
+- param[0].u.memref.shm_offs = 0;
++ param[0].u.memref.shm_offs = sizeof(p->key);
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+- param[1].u.memref.shm = reg_shm_out;
++ param[1].u.memref.shm = reg_shm;
+ param[1].u.memref.size = sizeof(p->key);
+ param[1].u.memref.shm_offs = 0;
+
+@@ -166,11 +146,7 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ p->key_len = param[1].u.memref.size;
+ }
+
+-out:
+- if (reg_shm_out)
+- tee_shm_free(reg_shm_out);
+- if (reg_shm_in)
+- tee_shm_free(reg_shm_in);
++ tee_shm_free(reg_shm);
+
+ return ret;
+ }
+diff --git a/sound/core/info.c b/sound/core/info.c
+index 0b2f04dcb5897..e2f302e55bbb2 100644
+--- a/sound/core/info.c
++++ b/sound/core/info.c
+@@ -56,7 +56,7 @@ struct snd_info_private_data {
+ };
+
+ static int snd_info_version_init(void);
+-static void snd_info_disconnect(struct snd_info_entry *entry);
++static void snd_info_clear_entries(struct snd_info_entry *entry);
+
+ /*
+
+@@ -569,11 +569,16 @@ void snd_info_card_disconnect(struct snd_card *card)
+ {
+ if (!card)
+ return;
+- mutex_lock(&info_mutex);
++
+ proc_remove(card->proc_root_link);
+- card->proc_root_link = NULL;
+ if (card->proc_root)
+- snd_info_disconnect(card->proc_root);
++ proc_remove(card->proc_root->p);
++
++ mutex_lock(&info_mutex);
++ if (card->proc_root)
++ snd_info_clear_entries(card->proc_root);
++ card->proc_root_link = NULL;
++ card->proc_root = NULL;
+ mutex_unlock(&info_mutex);
+ }
+
+@@ -745,15 +750,14 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
+ }
+ EXPORT_SYMBOL(snd_info_create_card_entry);
+
+-static void snd_info_disconnect(struct snd_info_entry *entry)
++static void snd_info_clear_entries(struct snd_info_entry *entry)
+ {
+ struct snd_info_entry *p;
+
+ if (!entry->p)
+ return;
+ list_for_each_entry(p, &entry->children, list)
+- snd_info_disconnect(p);
+- proc_remove(entry->p);
++ snd_info_clear_entries(p);
+ entry->p = NULL;
+ }
+
+@@ -770,8 +774,9 @@ void snd_info_free_entry(struct snd_info_entry * entry)
+ if (!entry)
+ return;
+ if (entry->p) {
++ proc_remove(entry->p);
+ mutex_lock(&info_mutex);
+- snd_info_disconnect(entry);
++ snd_info_clear_entries(entry);
+ mutex_unlock(&info_mutex);
+ }
+
+diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
+index 2633a4bb1d85d..214a0680524b0 100644
+--- a/sound/hda/hdac_stream.c
++++ b/sound/hda/hdac_stream.c
+@@ -354,8 +354,10 @@ struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
+ struct hdac_stream *res = NULL;
+
+ /* make a non-zero unique key for the substream */
+- int key = (substream->pcm->device << 16) | (substream->number << 2) |
+- (substream->stream + 1);
++ int key = (substream->number << 2) | (substream->stream + 1);
++
++ if (substream->pcm)
++ key |= (substream->pcm->device << 16);
+
+ spin_lock_irq(&bus->reg_lock);
+ list_for_each_entry(azx_dev, &bus->stream_list, list) {
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index 24a948baf1bc0..756fa0aa69bba 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -336,6 +336,12 @@ static const struct config_entry config_table[] = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
++ {
++ .ident = "Google firmware",
++ .matches = {
++ DMI_MATCH(DMI_BIOS_VERSION, "Google"),
++ }
++ },
+ {}
+ }
+ },
+diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
+index c6031f7440996..3c157b006a5a2 100644
+--- a/sound/pci/hda/cs35l41_hda.c
++++ b/sound/pci/hda/cs35l41_hda.c
+@@ -570,7 +570,7 @@ static void cs35l41_hda_play_done(struct device *dev)
+
+ dev_dbg(dev, "Play (Complete)\n");
+
+- cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 1, NULL,
++ cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 1,
+ cs35l41->firmware_running);
+ if (cs35l41->firmware_running) {
+ regmap_multi_reg_write(reg, cs35l41_hda_unmute_dsp,
+@@ -589,7 +589,7 @@ static void cs35l41_hda_pause_start(struct device *dev)
+ dev_dbg(dev, "Pause (Start)\n");
+
+ regmap_multi_reg_write(reg, cs35l41_hda_mute, ARRAY_SIZE(cs35l41_hda_mute));
+- cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 0, NULL,
++ cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 0,
+ cs35l41->firmware_running);
+ }
+
+@@ -1668,8 +1668,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
+ ret = component_add(cs35l41->dev, &cs35l41_hda_comp_ops);
+ if (ret) {
+ dev_err(cs35l41->dev, "Register component failed: %d\n", ret);
+- pm_runtime_disable(cs35l41->dev);
+- goto err;
++ goto err_pm;
+ }
+
+ dev_info(cs35l41->dev, "Cirrus Logic CS35L41 (%x), Revision: %02X\n", regid, reg_revid);
+@@ -1677,6 +1676,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
+ return 0;
+
+ err_pm:
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+ pm_runtime_put_noidle(cs35l41->dev);
+
+@@ -1695,6 +1695,7 @@ void cs35l41_hda_remove(struct device *dev)
+ struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(cs35l41->dev);
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+
+ if (cs35l41->halo_initialized)
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index ca765ac4765f4..75148485b7553 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2218,6 +2218,8 @@ static const struct snd_pci_quirk power_save_denylist[] = {
+ SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
+ /* https://bugs.launchpad.net/bugs/1821663 */
+ SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
++ /* KONTRON SinglePC may cause a stall at runtime resume */
++ SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
+ {}
+ };
+ #endif /* CONFIG_PM */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 9677c09cf7a98..758abe9dffd6d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1986,6 +1986,7 @@ enum {
+ ALC887_FIXUP_ASUS_AUDIO,
+ ALC887_FIXUP_ASUS_HMIC,
+ ALCS1200A_FIXUP_MIC_VREF,
++ ALC888VD_FIXUP_MIC_100VREF,
+ };
+
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -2539,6 +2540,13 @@ static const struct hda_fixup alc882_fixups[] = {
+ {}
+ }
+ },
++ [ALC888VD_FIXUP_MIC_100VREF] = {
++ .type = HDA_FIXUP_PINCTLS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x18, PIN_VREF100 }, /* headset mic */
++ {}
++ }
++ },
+ };
+
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+@@ -2608,6 +2616,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
+
+ SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
++ SND_PCI_QUIRK(0x10ec, 0x12d8, "iBase Elo Touch", ALC888VD_FIXUP_MIC_100VREF),
+ SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+@@ -3255,6 +3264,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
+ case 0x10ec0230:
+ case 0x10ec0236:
+ case 0x10ec0256:
++ case 0x10ec0257:
+ case 0x19e58326:
+ alc_write_coef_idx(codec, 0x48, 0x0);
+ alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
+@@ -3284,6 +3294,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
+ case 0x10ec0230:
+ case 0x10ec0236:
+ case 0x10ec0256:
++ case 0x10ec0257:
+ case 0x19e58326:
+ alc_write_coef_idx(codec, 0x48, 0xd011);
+ alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
+@@ -6495,6 +6506,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
+ case 0x10ec0236:
+ case 0x10ec0255:
+ case 0x10ec0256:
++ case 0x10ec0257:
+ case 0x19e58326:
+ alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
+ alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
+@@ -7262,8 +7274,10 @@ enum {
+ ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ ALC299_FIXUP_PREDATOR_SPK,
+ ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
++ ALC289_FIXUP_DELL_SPK1,
+ ALC289_FIXUP_DELL_SPK2,
+ ALC289_FIXUP_DUAL_SPK,
++ ALC289_FIXUP_RTK_AMP_DUAL_SPK,
+ ALC294_FIXUP_SPK2_TO_DAC1,
+ ALC294_FIXUP_ASUS_DUAL_SPK,
+ ALC285_FIXUP_THINKPAD_X1_GEN7,
+@@ -7363,6 +7377,8 @@ enum {
+ ALC287_FIXUP_THINKPAD_I2S_SPK,
+ ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
+ ALC2XX_FIXUP_HEADSET_MIC,
++ ALC289_FIXUP_DELL_CS35L41_SPI_2,
++ ALC294_FIXUP_CS35L41_I2C_2,
+ };
+
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -8589,6 +8605,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+ },
++ [ALC289_FIXUP_DELL_SPK1] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x14, 0x90170140 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE
++ },
+ [ALC289_FIXUP_DELL_SPK2] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -8604,6 +8629,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC289_FIXUP_DELL_SPK2
+ },
++ [ALC289_FIXUP_RTK_AMP_DUAL_SPK] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc285_fixup_speaker2_to_dac1,
++ .chained = true,
++ .chain_id = ALC289_FIXUP_DELL_SPK1
++ },
+ [ALC294_FIXUP_SPK2_TO_DAC1] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_speaker2_to_dac1,
+@@ -9471,6 +9502,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mic,
+ },
++ [ALC289_FIXUP_DELL_CS35L41_SPI_2] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = cs35l41_fixup_spi_two,
++ .chained = true,
++ .chain_id = ALC289_FIXUP_DUAL_SPK
++ },
++ [ALC294_FIXUP_CS35L41_I2C_2] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = cs35l41_fixup_i2c_two,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -9581,13 +9622,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
+- SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cc0, "Dell Oasis 13", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
++ SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cc5, "Dell Oasis 14", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -9720,6 +9763,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
++ SND_PCI_QUIRK(0x103c, 0x890e, "HP 255 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9755,6 +9799,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9788,12 +9833,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
+ SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK),
+ SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+@@ -9832,12 +9881,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
++ SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
+ SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1b93, "ASUS G614JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1043, 0x1c03, "ASUS UM3406HA", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x1043, 0x1c33, "ASUS UX5304MA", ALC245_FIXUP_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+@@ -9848,6 +9902,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+@@ -10707,22 +10762,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},
+ {0x21, 0x03211020}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+- {0x14, 0x90170110},
+- {0x21, 0x04211020}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+- {0x14, 0x90170110},
+- {0x21, 0x04211030}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+- ALC295_STANDARD_PINS,
+- {0x17, 0x21014020},
+- {0x18, 0x21a19030}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+- ALC295_STANDARD_PINS,
+- {0x17, 0x21014040},
+- {0x18, 0x21a19050}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+- ALC295_STANDARD_PINS),
+ SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC298_STANDARD_PINS,
+ {0x17, 0x90170110}),
+@@ -10766,6 +10805,9 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ {0x19, 0x40000000},
+ {0x1b, 0x40000000}),
++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
++ {0x19, 0x40000000},
++ {0x1b, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x19, 0x40000000},
+ {0x1a, 0x40000000}),
+diff --git a/sound/soc/codecs/cs35l41-lib.c b/sound/soc/codecs/cs35l41-lib.c
+index 4ec306cd2f476..2ec5fdc875b13 100644
+--- a/sound/soc/codecs/cs35l41-lib.c
++++ b/sound/soc/codecs/cs35l41-lib.c
+@@ -1192,8 +1192,28 @@ bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type)
+ }
+ EXPORT_SYMBOL_GPL(cs35l41_safe_reset);
+
++/*
++ * Enabling the CS35L41_SHD_BOOST_ACTV and CS35L41_SHD_BOOST_PASS shared boosts
++ * does also require a call to cs35l41_mdsync_up(), but not before getting the
++ * PLL Lock signal.
++ *
++ * PLL Lock seems to be triggered soon after snd_pcm_start() is executed and
++ * SNDRV_PCM_TRIGGER_START command is processed, which happens (long) after the
++ * SND_SOC_DAPM_PRE_PMU event handler is invoked as part of snd_pcm_prepare().
++ *
++ * This event handler is where cs35l41_global_enable() is normally called from,
++ * but waiting for PLL Lock here will time out. Increasing the wait duration
++ * will not help, as the only consequence of it would be to add an unnecessary
++ * delay in the invocation of snd_pcm_start().
++ *
++ * Trying to move the wait in the SNDRV_PCM_TRIGGER_START callback is not a
++ * solution either, as the trigger is executed in an IRQ-off atomic context.
++ *
++ * The current approach is to invoke cs35l41_mdsync_up() right after receiving
++ * the PLL Lock interrupt, in the IRQ handler.
++ */
+ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
+- int enable, struct completion *pll_lock, bool firmware_running)
++ int enable, bool firmware_running)
+ {
+ int ret;
+ unsigned int gpio1_func, pad_control, pwr_ctrl1, pwr_ctrl3, int_status, pup_pdn_mask;
+@@ -1203,11 +1223,6 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
+ {CS35L41_GPIO_PAD_CONTROL, 0},
+ {CS35L41_PWR_CTRL1, 0, 3000},
+ };
+- struct reg_sequence cs35l41_mdsync_up_seq[] = {
+- {CS35L41_PWR_CTRL3, 0},
+- {CS35L41_PWR_CTRL1, 0x00000000, 3000},
+- {CS35L41_PWR_CTRL1, 0x00000001, 3000},
+- };
+
+ pup_pdn_mask = enable ? CS35L41_PUP_DONE_MASK : CS35L41_PDN_DONE_MASK;
+
+@@ -1241,24 +1256,12 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
+ cs35l41_mdsync_down_seq[0].def = pwr_ctrl3;
+ cs35l41_mdsync_down_seq[1].def = pad_control;
+ cs35l41_mdsync_down_seq[2].def = pwr_ctrl1;
++
+ ret = regmap_multi_reg_write(regmap, cs35l41_mdsync_down_seq,
+ ARRAY_SIZE(cs35l41_mdsync_down_seq));
+- if (!enable)
+- break;
+-
+- if (!pll_lock)
+- return -EINVAL;
+-
+- ret = wait_for_completion_timeout(pll_lock, msecs_to_jiffies(1000));
+- if (ret == 0) {
+- ret = -ETIMEDOUT;
+- } else {
+- regmap_read(regmap, CS35L41_PWR_CTRL3, &pwr_ctrl3);
+- pwr_ctrl3 |= CS35L41_SYNC_EN_MASK;
+- cs35l41_mdsync_up_seq[0].def = pwr_ctrl3;
+- ret = regmap_multi_reg_write(regmap, cs35l41_mdsync_up_seq,
+- ARRAY_SIZE(cs35l41_mdsync_up_seq));
+- }
++ /* Activation to be completed later via cs35l41_mdsync_up() */
++ if (ret || enable)
++ return ret;
+
+ ret = regmap_read_poll_timeout(regmap, CS35L41_IRQ1_STATUS1,
+ int_status, int_status & pup_pdn_mask,
+@@ -1266,7 +1269,7 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
+ if (ret)
+ dev_err(dev, "Enable(%d) failed: %d\n", enable, ret);
+
+- // Clear PUP/PDN status
++ /* Clear PUP/PDN status */
+ regmap_write(regmap, CS35L41_IRQ1_STATUS1, pup_pdn_mask);
+ break;
+ case CS35L41_INT_BOOST:
+@@ -1348,6 +1351,17 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
+ }
+ EXPORT_SYMBOL_GPL(cs35l41_global_enable);
+
++/*
++ * To be called after receiving the IRQ Lock interrupt, in order to complete
++ * any shared boost activation initiated by cs35l41_global_enable().
++ */
++int cs35l41_mdsync_up(struct regmap *regmap)
++{
++ return regmap_update_bits(regmap, CS35L41_PWR_CTRL3,
++ CS35L41_SYNC_EN_MASK, CS35L41_SYNC_EN_MASK);
++}
++EXPORT_SYMBOL_GPL(cs35l41_mdsync_up);
++
+ int cs35l41_gpio_config(struct regmap *regmap, struct cs35l41_hw_cfg *hw_cfg)
+ {
+ struct cs35l41_gpio_cfg *gpio1 = &hw_cfg->gpio1;
+diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
+index 722b69a6de26c..5456e6bfa242f 100644
+--- a/sound/soc/codecs/cs35l41.c
++++ b/sound/soc/codecs/cs35l41.c
+@@ -386,10 +386,18 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ struct cs35l41_private *cs35l41 = data;
+ unsigned int status[4] = { 0, 0, 0, 0 };
+ unsigned int masks[4] = { 0, 0, 0, 0 };
+- int ret = IRQ_NONE;
+ unsigned int i;
++ int ret;
+
+- pm_runtime_get_sync(cs35l41->dev);
++ ret = pm_runtime_resume_and_get(cs35l41->dev);
++ if (ret < 0) {
++ dev_err(cs35l41->dev,
++ "pm_runtime_resume_and_get failed in %s: %d\n",
++ __func__, ret);
++ return IRQ_NONE;
++ }
++
++ ret = IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(status); i++) {
+ regmap_read(cs35l41->regmap,
+@@ -459,7 +467,19 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+
+ if (status[2] & CS35L41_PLL_LOCK) {
+ regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS3, CS35L41_PLL_LOCK);
+- complete(&cs35l41->pll_lock);
++
++ if (cs35l41->hw_cfg.bst_type == CS35L41_SHD_BOOST_ACTV ||
++ cs35l41->hw_cfg.bst_type == CS35L41_SHD_BOOST_PASS) {
++ ret = cs35l41_mdsync_up(cs35l41->regmap);
++ if (ret)
++ dev_err(cs35l41->dev, "MDSYNC-up failed: %d\n", ret);
++ else
++ dev_dbg(cs35l41->dev, "MDSYNC-up done\n");
++
++ dev_dbg(cs35l41->dev, "PUP-done status: %d\n",
++ !!(status[0] & CS35L41_PUP_DONE_MASK));
++ }
++
+ ret = IRQ_HANDLED;
+ }
+
+@@ -500,11 +520,11 @@ static int cs35l41_main_amp_event(struct snd_soc_dapm_widget *w,
+ ARRAY_SIZE(cs35l41_pup_patch));
+
+ ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type,
+- 1, &cs35l41->pll_lock, cs35l41->dsp.cs_dsp.running);
++ 1, cs35l41->dsp.cs_dsp.running);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type,
+- 0, &cs35l41->pll_lock, cs35l41->dsp.cs_dsp.running);
++ 0, cs35l41->dsp.cs_dsp.running);
+
+ regmap_multi_reg_write_bypassed(cs35l41->regmap,
+ cs35l41_pdn_patch,
+@@ -802,10 +822,6 @@ static const struct snd_pcm_hw_constraint_list cs35l41_constraints = {
+ static int cs35l41_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+ {
+- struct cs35l41_private *cs35l41 = snd_soc_component_get_drvdata(dai->component);
+-
+- reinit_completion(&cs35l41->pll_lock);
+-
+ if (substream->runtime)
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+@@ -1295,8 +1311,6 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
+ if (ret < 0)
+ goto err;
+
+- init_completion(&cs35l41->pll_lock);
+-
+ pm_runtime_set_autosuspend_delay(cs35l41->dev, 3000);
+ pm_runtime_use_autosuspend(cs35l41->dev);
+ pm_runtime_mark_last_busy(cs35l41->dev);
+@@ -1320,6 +1334,7 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
+ return 0;
+
+ err_pm:
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+ pm_runtime_put_noidle(cs35l41->dev);
+
+@@ -1336,6 +1351,7 @@ EXPORT_SYMBOL_GPL(cs35l41_probe);
+ void cs35l41_remove(struct cs35l41_private *cs35l41)
+ {
+ pm_runtime_get_sync(cs35l41->dev);
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+
+ regmap_write(cs35l41->regmap, CS35L41_IRQ1_MASK1, 0xFFFFFFFF);
+diff --git a/sound/soc/codecs/cs35l41.h b/sound/soc/codecs/cs35l41.h
+index 34d967d4372b2..c85cbc1dd333b 100644
+--- a/sound/soc/codecs/cs35l41.h
++++ b/sound/soc/codecs/cs35l41.h
+@@ -33,7 +33,6 @@ struct cs35l41_private {
+ int irq;
+ /* GPIO for /RST */
+ struct gpio_desc *reset_gpio;
+- struct completion pll_lock;
+ };
+
+ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *hw_cfg);
+diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
+index f9059780b7a7b..32d4ab2cd6724 100644
+--- a/sound/soc/codecs/cs35l56.c
++++ b/sound/soc/codecs/cs35l56.c
+@@ -772,9 +772,20 @@ static int cs35l56_component_probe(struct snd_soc_component *component)
+ {
+ struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
+ struct dentry *debugfs_root = component->debugfs_root;
++ unsigned short vendor, device;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cs35l56_tx_input_texts) != ARRAY_SIZE(cs35l56_tx_input_values));
+
++ if (!cs35l56->dsp.system_name &&
++ (snd_soc_card_get_pci_ssid(component->card, &vendor, &device) == 0)) {
++ cs35l56->dsp.system_name = devm_kasprintf(cs35l56->base.dev,
++ GFP_KERNEL,
++ "%04x%04x",
++ vendor, device);
++ if (!cs35l56->dsp.system_name)
++ return -ENOMEM;
++ }
++
+ if (!wait_for_completion_timeout(&cs35l56->init_completion,
+ msecs_to_jiffies(5000))) {
+ dev_err(cs35l56->base.dev, "%s: init_completion timed out\n", __func__);
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index 09eef6042aad6..20da1eaa4f1c7 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -877,18 +877,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
+ void *data)
+ {
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+- int ret = -ENOTSUPP;
+
+ if (hcp->hcd.ops->hook_plugged_cb) {
+ hcp->jack = jack;
+- ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
+- hcp->hcd.data,
+- plugged_cb,
+- component->dev);
+- if (ret)
+- hcp->jack = NULL;
++ return 0;
+ }
+- return ret;
++
++ return -ENOTSUPP;
+ }
+
+ static int hdmi_dai_spdif_probe(struct snd_soc_dai *dai)
+@@ -982,6 +977,21 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
+ return ret;
+ }
+
++static int hdmi_probe(struct snd_soc_component *component)
++{
++ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
++ int ret = 0;
++
++ if (hcp->hcd.ops->hook_plugged_cb) {
++ ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
++ hcp->hcd.data,
++ plugged_cb,
++ component->dev);
++ }
++
++ return ret;
++}
++
+ static void hdmi_remove(struct snd_soc_component *component)
+ {
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+@@ -992,6 +1002,7 @@ static void hdmi_remove(struct snd_soc_component *component)
+ }
+
+ static const struct snd_soc_component_driver hdmi_driver = {
++ .probe = hdmi_probe,
+ .remove = hdmi_remove,
+ .dapm_widgets = hdmi_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index fff4a8b862a73..7e21cec3c2fb9 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -1685,6 +1685,9 @@ static int wsa_macro_spk_boost_event(struct snd_soc_dapm_widget *w,
+ boost_path_cfg1 = CDC_WSA_RX1_RX_PATH_CFG1;
+ reg = CDC_WSA_RX1_RX_PATH_CTL;
+ reg_mix = CDC_WSA_RX1_RX_PATH_MIX_CTL;
++ } else {
++ dev_warn(component->dev, "Incorrect widget name in the driver\n");
++ return -EINVAL;
+ }
+
+ switch (event) {
+diff --git a/sound/soc/codecs/rt712-sdca.c b/sound/soc/codecs/rt712-sdca.c
+index 7077ff6ba1f4b..6954fbe7ec5f3 100644
+--- a/sound/soc/codecs/rt712-sdca.c
++++ b/sound/soc/codecs/rt712-sdca.c
+@@ -963,13 +963,6 @@ static int rt712_sdca_probe(struct snd_soc_component *component)
+ rt712_sdca_parse_dt(rt712, &rt712->slave->dev);
+ rt712->component = component;
+
+- if (!rt712->first_hw_init)
+- return 0;
+-
+- ret = pm_runtime_resume(component->dev);
+- if (ret < 0 && ret != -EACCES)
+- return ret;
+-
+ /* add SPK route */
+ if (rt712->hw_id != RT712_DEV_ID_713) {
+ snd_soc_add_component_controls(component,
+@@ -980,6 +973,13 @@ static int rt712_sdca_probe(struct snd_soc_component *component)
+ rt712_sdca_spk_dapm_routes, ARRAY_SIZE(rt712_sdca_spk_dapm_routes));
+ }
+
++ if (!rt712->first_hw_init)
++ return 0;
++
++ ret = pm_runtime_resume(component->dev);
++ if (ret < 0 && ret != -EACCES)
++ return ret;
++
+ return 0;
+ }
+
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index 197fae23762f5..cb83c569e18d6 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -1203,9 +1203,6 @@ static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
+ break;
+ }
+
+- snd_soc_component_write_field(component, WSA883X_DRE_CTL_1,
+- WSA883X_DRE_GAIN_EN_MASK,
+- WSA883X_DRE_GAIN_FROM_CSR);
+ if (wsa883x->port_enable[WSA883X_PORT_COMP])
+ snd_soc_component_write_field(component, WSA883X_DRE_CTL_0,
+ WSA883X_DRE_OFFSET_MASK,
+@@ -1218,9 +1215,6 @@ static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
+ snd_soc_component_write_field(component, WSA883X_PDM_WD_CTL,
+ WSA883X_PDM_EN_MASK,
+ WSA883X_PDM_ENABLE);
+- snd_soc_component_write_field(component, WSA883X_PA_FSM_CTL,
+- WSA883X_GLOBAL_PA_EN_MASK,
+- WSA883X_GLOBAL_PA_ENABLE);
+
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+@@ -1346,6 +1340,7 @@ static const struct snd_soc_dai_ops wsa883x_dai_ops = {
+ .hw_free = wsa883x_hw_free,
+ .mute_stream = wsa883x_digital_mute,
+ .set_stream = wsa883x_set_sdw_stream,
++ .mute_unmute_on_trigger = true,
+ };
+
+ static struct snd_soc_dai_driver wsa883x_dais[] = {
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index bab7d34cf585b..5f181b89838ac 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -41,6 +41,7 @@
+
+ /**
+ * struct codec_priv - CODEC private data
++ * @mclk: Main clock of the CODEC
+ * @mclk_freq: Clock rate of MCLK
+ * @free_freq: Clock rate of MCLK for hw_free()
+ * @mclk_id: MCLK (or main clock) id for set_sysclk()
+diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
+index ba62995c909ac..ec53bda46a467 100644
+--- a/sound/soc/fsl/fsl_easrc.c
++++ b/sound/soc/fsl/fsl_easrc.c
+@@ -1966,17 +1966,21 @@ static int fsl_easrc_probe(struct platform_device *pdev)
+ &fsl_easrc_dai, 1);
+ if (ret) {
+ dev_err(dev, "failed to register ASoC DAI\n");
+- return ret;
++ goto err_pm_disable;
+ }
+
+ ret = devm_snd_soc_register_component(dev, &fsl_asrc_component,
+ NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ASoC platform\n");
+- return ret;
++ goto err_pm_disable;
+ }
+
+ return 0;
++
++err_pm_disable:
++ pm_runtime_disable(&pdev->dev);
++ return ret;
+ }
+
+ static void fsl_easrc_remove(struct platform_device *pdev)
+diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
+index 9014978100207..3f7ccae3f6b1a 100644
+--- a/sound/soc/fsl/mpc5200_dma.c
++++ b/sound/soc/fsl/mpc5200_dma.c
+@@ -100,6 +100,9 @@ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream)
+
+ /**
+ * psc_dma_trigger: start and stop the DMA transfer.
++ * @component: triggered component
++ * @substream: triggered substream
++ * @cmd: triggered command
+ *
+ * This function is called by ALSA to start, stop, pause, and resume the DMA
+ * transfer of data.
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 842649501e303..24e966a2ac2be 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -1374,7 +1374,7 @@ static int create_sdw_dailink(struct snd_soc_card *card, int *link_index,
+ continue;
+
+ /* j reset after loop, adr_index only applies to first link */
+- for (; j < adr_link_next->num_adr; j++) {
++ for (; j < adr_link_next->num_adr && codec_dlc_index < codec_num; j++) {
+ const struct snd_soc_acpi_endpoint *endpoints;
+
+ endpoints = adr_link_next->adr_d[j].endpoints;
+@@ -1934,6 +1934,12 @@ static int mc_probe(struct platform_device *pdev)
+ for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
+ codec_info_list[i].amp_num = 0;
+
++ if (mach->mach_params.subsystem_id_set) {
++ snd_soc_card_set_pci_ssid(card,
++ mach->mach_params.subsystem_vendor,
++ mach->mach_params.subsystem_device);
++ }
++
+ ret = sof_card_dai_links_create(card);
+ if (ret < 0)
+ return ret;
+diff --git a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
+index 623e3bebb8884..4360b9f5ff2c7 100644
+--- a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
++++ b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
+@@ -58,6 +58,11 @@ static const struct snd_soc_dapm_route rt712_sdca_map[] = {
+ { "rt712 MIC2", NULL, "Headset Mic" },
+ };
+
++static const struct snd_soc_dapm_route rt713_sdca_map[] = {
++ { "Headphone", NULL, "rt713 HP" },
++ { "rt713 MIC2", NULL, "Headset Mic" },
++};
++
+ static const struct snd_kcontrol_new rt_sdca_jack_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+@@ -109,6 +114,9 @@ static int rt_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd)
+ } else if (strstr(component->name_prefix, "rt712")) {
+ ret = snd_soc_dapm_add_routes(&card->dapm, rt712_sdca_map,
+ ARRAY_SIZE(rt712_sdca_map));
++ } else if (strstr(component->name_prefix, "rt713")) {
++ ret = snd_soc_dapm_add_routes(&card->dapm, rt713_sdca_map,
++ ARRAY_SIZE(rt713_sdca_map));
+ } else {
+ dev_err(card->dev, "%s is not supported\n", component->name_prefix);
+ return -EINVAL;
+diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+index cdcbf04b8832f..5e2ec60e2954b 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+@@ -75,6 +75,39 @@ static struct snd_soc_acpi_mach *cht_ess8316_quirk(void *arg)
+ return arg;
+ }
+
++/*
++ * The Lenovo Yoga Tab 3 Pro YT3-X90, with Android factory OS has a buggy DSDT
++ * with the coded not being listed at all.
++ */
++static const struct dmi_system_id lenovo_yoga_tab3_x90[] = {
++ {
++ /* Lenovo Yoga Tab 3 Pro YT3-X90, codec missing from DSDT */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
++ },
++ },
++ { }
++};
++
++static struct snd_soc_acpi_mach cht_lenovo_yoga_tab3_x90_mach = {
++ .id = "10WM5102",
++ .drv_name = "bytcr_wm5102",
++ .fw_filename = "intel/fw_sst_22a8.bin",
++ .board = "bytcr_wm5102",
++ .sof_tplg_filename = "sof-cht-wm5102.tplg",
++};
++
++static struct snd_soc_acpi_mach *lenovo_yt3_x90_quirk(void *arg)
++{
++ if (dmi_check_system(lenovo_yoga_tab3_x90))
++ return &cht_lenovo_yoga_tab3_x90_mach;
++
++ /* Skip wildcard match snd_soc_acpi_intel_cherrytrail_machines[] entry */
++ return NULL;
++}
++
+ static const struct snd_soc_acpi_codecs rt5640_comp_ids = {
+ .num_codecs = 2,
+ .codecs = { "10EC5640", "10EC3276" },
+@@ -175,6 +208,16 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
+ .drv_name = "sof_pcm512x",
+ .sof_tplg_filename = "sof-cht-src-50khz-pcm512x.tplg",
+ },
++ /*
++ * Special case for the Lenovo Yoga Tab 3 Pro YT3-X90 where the DSDT
++ * misses the codec. Match on the SST id instead, lenovo_yt3_x90_quirk()
++ * will return a YT3 specific mach or NULL when called on other hw,
++ * skipping this entry.
++ */
++ {
++ .id = "808622A8",
++ .machine_quirk = lenovo_yt3_x90_quirk,
++ },
+
+ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ /*
+diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
+index 57ea815d3f041..b776c58dcf47a 100644
+--- a/sound/soc/intel/skylake/skl-sst-utils.c
++++ b/sound/soc/intel/skylake/skl-sst-utils.c
+@@ -299,6 +299,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
+ module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
+ if (!module->instance_id) {
+ ret = -ENOMEM;
++ kfree(module);
+ goto free_uuid_list;
+ }
+
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+index 9c11016f032c2..9777ba89e956c 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+@@ -1179,7 +1179,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ playback_codec = of_get_child_by_name(pdev->dev.of_node, "playback-codecs");
+ if (!playback_codec) {
+ ret = -EINVAL;
+- dev_err_probe(&pdev->dev, ret, "Property 'speaker-codecs' missing or invalid\n");
++ dev_err_probe(&pdev->dev, ret, "Property 'playback-codecs' missing or invalid\n");
+ goto err_playback_codec;
+ }
+
+@@ -1193,7 +1193,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ for_each_card_prelinks(card, i, dai_link) {
+ ret = mt8186_mt6366_card_set_be_link(card, dai_link, playback_codec, "I2S3");
+ if (ret) {
+- dev_err_probe(&pdev->dev, ret, "%s set speaker_codec fail\n",
++ dev_err_probe(&pdev->dev, ret, "%s set playback_codec fail\n",
+ dai_link->name);
+ goto err_probe;
+ }
+diff --git a/sound/soc/mediatek/mt8188/mt8188-mt6359.c b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+index 9017f48b6272b..f7e22abb75846 100644
+--- a/sound/soc/mediatek/mt8188/mt8188-mt6359.c
++++ b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+@@ -246,6 +246,11 @@ static const struct snd_soc_dapm_widget mt8188_mt6359_widgets[] = {
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SINK("HDMI"),
+ SND_SOC_DAPM_SINK("DP"),
++
++ /* dynamic pinctrl */
++ SND_SOC_DAPM_PINCTRL("ETDM_SPK_PIN", "aud_etdm_spk_on", "aud_etdm_spk_off"),
++ SND_SOC_DAPM_PINCTRL("ETDM_HP_PIN", "aud_etdm_hp_on", "aud_etdm_hp_off"),
++ SND_SOC_DAPM_PINCTRL("MTKAIF_PIN", "aud_mtkaif_on", "aud_mtkaif_off"),
+ };
+
+ static const struct snd_kcontrol_new mt8188_mt6359_controls[] = {
+@@ -267,6 +272,7 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ struct snd_soc_component *cmpnt_codec =
+ asoc_rtd_to_codec(rtd, 0)->component;
++ struct snd_soc_dapm_widget *pin_w = NULL, *w;
+ struct mtk_base_afe *afe;
+ struct mt8188_afe_private *afe_priv;
+ struct mtkaif_param *param;
+@@ -306,6 +312,18 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ return 0;
+ }
+
++ for_each_card_widgets(rtd->card, w) {
++ if (!strcmp(w->name, "MTKAIF_PIN")) {
++ pin_w = w;
++ break;
++ }
++ }
++
++ if (pin_w)
++ dapm_pinctrl_event(pin_w, NULL, SND_SOC_DAPM_PRE_PMU);
++ else
++ dev_dbg(afe->dev, "%s(), no pinmux widget, please check if default on\n", __func__);
++
+ pm_runtime_get_sync(afe->dev);
+ mt6359_mtkaif_calibration_enable(cmpnt_codec);
+
+@@ -403,6 +421,9 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ for (i = 0; i < MT8188_MTKAIF_MISO_NUM; i++)
+ param->mtkaif_phase_cycle[i] = mtkaif_phase_cycle[i];
+
++ if (pin_w)
++ dapm_pinctrl_event(pin_w, NULL, SND_SOC_DAPM_POST_PMD);
++
+ dev_dbg(afe->dev, "%s(), end, calibration ok %d\n",
+ __func__, param->mtkaif_calibration_ok);
+
+diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
+index 3f33f0630ad8a..9a828e55c4f9e 100644
+--- a/sound/soc/soc-dai.c
++++ b/sound/soc/soc-dai.c
+@@ -658,6 +658,10 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
+ ret = soc_dai_trigger(dai, substream, cmd);
+ if (ret < 0)
+ break;
++
++ if (dai->driver->ops && dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 0, substream->stream);
++
+ soc_dai_mark_push(dai, substream, trigger);
+ }
+ break;
+@@ -668,6 +672,9 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
+ if (rollback && !soc_dai_mark_match(dai, substream, trigger))
+ continue;
+
++ if (dai->driver->ops && dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 1, substream->stream);
++
+ r = soc_dai_trigger(dai, substream, cmd);
+ if (r < 0)
+ ret = r; /* use last ret */
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 312e555798315..85e3bbf7e5f0e 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3670,7 +3670,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
+ dapm_pinctrl_event(w, NULL, SND_SOC_DAPM_POST_PMD);
+ break;
+ case snd_soc_dapm_clock_supply:
+- w->clk = devm_clk_get(dapm->dev, w->name);
++ w->clk = devm_clk_get(dapm->dev, widget->name);
+ if (IS_ERR(w->clk)) {
+ ret = PTR_ERR(w->clk);
+ goto request_failed;
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 54704250c0a2c..511446a30c057 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -698,14 +698,12 @@ static int soc_pcm_clean(struct snd_soc_pcm_runtime *rtd,
+
+ if (!rollback) {
+ snd_soc_runtime_deactivate(rtd, substream->stream);
+- /* clear the corresponding DAIs parameters when going to be inactive */
+- for_each_rtd_dais(rtd, i, dai) {
+- if (snd_soc_dai_active(dai) == 0)
+- soc_pcm_set_dai_params(dai, NULL);
+
+- if (snd_soc_dai_stream_active(dai, substream->stream) == 0)
+- snd_soc_dai_digital_mute(dai, 1, substream->stream);
+- }
++ /* Make sure DAI parameters cleared if the DAI becomes inactive */
++ for_each_rtd_dais(rtd, i, dai)
++ if (snd_soc_dai_active(dai) == 0 &&
++ (dai->rate || dai->channels || dai->sample_bits))
++ soc_pcm_set_dai_params(dai, NULL);
+ }
+
+ for_each_rtd_dais(rtd, i, dai)
+@@ -898,8 +896,10 @@ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
+ snd_soc_dapm_stream_event(rtd, substream->stream,
+ SND_SOC_DAPM_STREAM_START);
+
+- for_each_rtd_dais(rtd, i, dai)
+- snd_soc_dai_digital_mute(dai, 0, substream->stream);
++ for_each_rtd_dais(rtd, i, dai) {
++ if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 0, substream->stream);
++ }
+
+ out:
+ return soc_pcm_ret(rtd, ret);
+@@ -936,6 +936,17 @@ static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd,
+
+ snd_soc_dpcm_mutex_assert_held(rtd);
+
++ /* clear the corresponding DAIs parameters when going to be inactive */
++ for_each_rtd_dais(rtd, i, dai) {
++ if (snd_soc_dai_active(dai) == 1)
++ soc_pcm_set_dai_params(dai, NULL);
++
++ if (snd_soc_dai_stream_active(dai, substream->stream) == 1) {
++ if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 1, substream->stream);
++ }
++ }
++
+ /* run the stream event */
+ snd_soc_dapm_stream_stop(rtd, substream->stream);
+
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index 2d1616b81485c..0938b259f7034 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -459,9 +459,10 @@ int snd_sof_device_remove(struct device *dev)
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ int ret;
++ bool aborted = false;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+- cancel_work_sync(&sdev->probe_work);
++ aborted = cancel_work_sync(&sdev->probe_work);
+
+ /*
+ * Unregister any registered client device first before IPC and debugfs
+@@ -487,6 +488,9 @@ int snd_sof_device_remove(struct device *dev)
+ snd_sof_free_debug(sdev);
+ snd_sof_remove(sdev);
+ sof_ops_free(sdev);
++ } else if (aborted) {
++ /* probe_work never ran */
++ sof_ops_free(sdev);
+ }
+
+ /* release firmware */
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index 7cb63e6b24dc9..c9c1d2ec7af25 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -895,7 +895,8 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
+ if (process->init_config == SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT) {
+ struct sof_ipc4_base_module_cfg_ext *base_cfg_ext;
+ u32 ext_size = struct_size(base_cfg_ext, pin_formats,
+- swidget->num_input_pins + swidget->num_output_pins);
++ size_add(swidget->num_input_pins,
++ swidget->num_output_pins));
+
+ base_cfg_ext = kzalloc(ext_size, GFP_KERNEL);
+ if (!base_cfg_ext) {
+diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
+index ab6eddd91bb77..1b09496733fb8 100644
+--- a/sound/soc/sof/ipc4.c
++++ b/sound/soc/sof/ipc4.c
+@@ -614,6 +614,9 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
+ case SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS:
+ sof_ipc4_mtrace_update_pos(sdev, SOF_IPC4_LOG_CORE_GET(ipc4_msg->primary));
+ break;
++ case SOF_IPC4_NOTIFY_EXCEPTION_CAUGHT:
++ snd_sof_dsp_panic(sdev, 0, true);
++ break;
+ default:
+ dev_dbg(sdev->dev, "Unhandled DSP message: %#x|%#x\n",
+ ipc4_msg->primary, ipc4_msg->extension);
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index e5405f854a910..563fe6f7789f7 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -1032,6 +1032,13 @@ int sof_machine_check(struct snd_sof_dev *sdev)
+ mach = snd_sof_machine_select(sdev);
+ if (mach) {
+ sof_pdata->machine = mach;
++
++ if (sof_pdata->subsystem_id_set) {
++ mach->mach_params.subsystem_vendor = sof_pdata->subsystem_vendor;
++ mach->mach_params.subsystem_device = sof_pdata->subsystem_device;
++ mach->mach_params.subsystem_id_set = true;
++ }
++
+ snd_sof_set_mach_params(mach, sdev);
+ return 0;
+ }
+diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
+index f5ece43d0ec24..69a2352f2e1a0 100644
+--- a/sound/soc/sof/sof-pci-dev.c
++++ b/sound/soc/sof/sof-pci-dev.c
+@@ -145,6 +145,13 @@ static const struct dmi_system_id community_key_platforms[] = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google"),
+ }
+ },
++ {
++ .ident = "Google firmware",
++ .callback = chromebook_use_community_key,
++ .matches = {
++ DMI_MATCH(DMI_BIOS_VERSION, "Google"),
++ }
++ },
+ {},
+ };
+
+@@ -214,6 +221,14 @@ int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ return ret;
+
+ sof_pdata->name = pci_name(pci);
++
++ /* PCI defines a vendor ID of 0xFFFF as invalid. */
++ if (pci->subsystem_vendor != 0xFFFF) {
++ sof_pdata->subsystem_vendor = pci->subsystem_vendor;
++ sof_pdata->subsystem_device = pci->subsystem_device;
++ sof_pdata->subsystem_id_set = true;
++ }
++
+ sof_pdata->desc = desc;
+ sof_pdata->dev = dev;
+
+diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
+index 666057d50ea0d..dd3f59bb72faf 100644
+--- a/sound/soc/ti/ams-delta.c
++++ b/sound/soc/ti/ams-delta.c
+@@ -303,7 +303,7 @@ static int cx81801_open(struct tty_struct *tty)
+ static void cx81801_close(struct tty_struct *tty)
+ {
+ struct snd_soc_component *component = tty->disc_data;
+- struct snd_soc_dapm_context *dapm = &component->card->dapm;
++ struct snd_soc_dapm_context *dapm;
+
+ del_timer_sync(&cx81801_timer);
+
+@@ -315,6 +315,8 @@ static void cx81801_close(struct tty_struct *tty)
+
+ v253_ops.close(tty);
+
++ dapm = &component->card->dapm;
++
+ /* Revert back to default audio input/output constellation */
+ snd_soc_dapm_mutex_lock(dapm);
+
+diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
+index fdabed5133e83..b399d86f22777 100644
+--- a/sound/soc/ti/omap-mcbsp.c
++++ b/sound/soc/ti/omap-mcbsp.c
+@@ -74,14 +74,16 @@ static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
+ return 0;
+ }
+
+- pm_runtime_put_sync(mcbsp->dev);
++ if (mcbsp->active)
++ pm_runtime_put_sync(mcbsp->dev);
+
+ r = clk_set_parent(mcbsp->fclk, fck_src);
+ if (r)
+ dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
+ src);
+
+- pm_runtime_get_sync(mcbsp->dev);
++ if (mcbsp->active)
++ pm_runtime_get_sync(mcbsp->dev);
+
+ clk_put(fck_src);
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 4e64842245e19..ab2b938502ebe 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2220,6 +2220,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x2ab6, /* T+A devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x2afd, /* McIntosh Laboratory, Inc. */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x2d87, /* Cayin device */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3336, /* HEM devices */
+diff --git a/tools/arch/parisc/include/uapi/asm/errno.h b/tools/arch/parisc/include/uapi/asm/errno.h
+index 87245c584784e..8d94739d75c67 100644
+--- a/tools/arch/parisc/include/uapi/asm/errno.h
++++ b/tools/arch/parisc/include/uapi/asm/errno.h
+@@ -75,7 +75,6 @@
+
+ /* We now return you to your regularly scheduled HPUX. */
+
+-#define ENOSYM 215 /* symbol does not exist in executable */
+ #define ENOTSOCK 216 /* Socket operation on non-socket */
+ #define EDESTADDRREQ 217 /* Destination address required */
+ #define EMSGSIZE 218 /* Message too long */
+@@ -101,7 +100,6 @@
+ #define ETIMEDOUT 238 /* Connection timed out */
+ #define ECONNREFUSED 239 /* Connection refused */
+ #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+-#define EREMOTERELEASE 240 /* Remote peer released connection */
+ #define EHOSTDOWN 241 /* Host is down */
+ #define EHOSTUNREACH 242 /* No route to host */
+
+diff --git a/tools/crypto/ccp/dbc.c b/tools/crypto/ccp/dbc.c
+index 37e813175642f..a807df0f05974 100644
+--- a/tools/crypto/ccp/dbc.c
++++ b/tools/crypto/ccp/dbc.c
+@@ -8,6 +8,7 @@
+ */
+
+ #include <assert.h>
++#include <errno.h>
+ #include <string.h>
+ #include <sys/ioctl.h>
+
+@@ -22,16 +23,14 @@ int get_nonce(int fd, void *nonce_out, void *signature)
+ struct dbc_user_nonce tmp = {
+ .auth_needed = !!signature,
+ };
+- int ret;
+
+ assert(nonce_out);
+
+ if (signature)
+ memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+- ret = ioctl(fd, DBCIOCNONCE, &tmp);
+- if (ret)
+- return ret;
++ if (ioctl(fd, DBCIOCNONCE, &tmp))
++ return errno;
+ memcpy(nonce_out, tmp.nonce, sizeof(tmp.nonce));
+
+ return 0;
+@@ -47,7 +46,9 @@ int set_uid(int fd, __u8 *uid, __u8 *signature)
+ memcpy(tmp.uid, uid, sizeof(tmp.uid));
+ memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+- return ioctl(fd, DBCIOCUID, &tmp);
++ if (ioctl(fd, DBCIOCUID, &tmp))
++ return errno;
++ return 0;
+ }
+
+ int process_param(int fd, int msg_index, __u8 *signature, int *data)
+@@ -63,10 +64,10 @@ int process_param(int fd, int msg_index, __u8 *signature, int *data)
+
+ memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+- ret = ioctl(fd, DBCIOCPARAM, &tmp);
+- if (ret)
+- return ret;
++ if (ioctl(fd, DBCIOCPARAM, &tmp))
++ return errno;
+
+ *data = tmp.param;
++ memcpy(signature, tmp.signature, sizeof(tmp.signature));
+ return 0;
+ }
+diff --git a/tools/crypto/ccp/dbc.py b/tools/crypto/ccp/dbc.py
+index 3f6a825ffc9e4..2b91415b19407 100644
+--- a/tools/crypto/ccp/dbc.py
++++ b/tools/crypto/ccp/dbc.py
+@@ -27,8 +27,7 @@ lib = ctypes.CDLL("./dbc_library.so", mode=ctypes.RTLD_GLOBAL)
+
+
+ def handle_error(code):
+- val = code * -1
+- raise OSError(val, os.strerror(val))
++ raise OSError(code, os.strerror(code))
+
+
+ def get_nonce(device, signature):
+@@ -58,7 +57,8 @@ def process_param(device, message, signature, data=None):
+ if type(message) != tuple:
+ raise ValueError("Expected message tuple")
+ arg = ctypes.c_int(data if data else 0)
+- ret = lib.process_param(device.fileno(), message[0], signature, ctypes.pointer(arg))
++ sig = ctypes.create_string_buffer(signature, len(signature))
++ ret = lib.process_param(device.fileno(), message[0], ctypes.pointer(sig), ctypes.pointer(arg))
+ if ret:
+ handle_error(ret)
+- return arg, signature
++ return arg.value, sig.value
+diff --git a/tools/crypto/ccp/test_dbc.py b/tools/crypto/ccp/test_dbc.py
+index 998bb3e3cd040..79de3638a01ab 100755
+--- a/tools/crypto/ccp/test_dbc.py
++++ b/tools/crypto/ccp/test_dbc.py
+@@ -4,6 +4,12 @@ import unittest
+ import os
+ import time
+ import glob
++import fcntl
++try:
++ import ioctl_opt as ioctl
++except ImportError:
++ ioctl = None
++ pass
+ from dbc import *
+
+ # Artificial delay between set commands
+@@ -27,8 +33,8 @@ def system_is_secured() -> bool:
+ class DynamicBoostControlTest(unittest.TestCase):
+ def __init__(self, data) -> None:
+ self.d = None
+- self.signature = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
+- self.uid = "1111111111111111"
++ self.signature = b"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
++ self.uid = b"1111111111111111"
+ super().__init__(data)
+
+ def setUp(self) -> None:
+@@ -64,13 +70,16 @@ class TestInvalidIoctls(DynamicBoostControlTest):
+ def setUp(self) -> None:
+ if not os.path.exists(DEVICE_NODE):
+ self.skipTest("system is unsupported")
++ if not ioctl:
++ self.skipTest("unable to test IOCTLs without ioctl_opt")
++
+ return super().setUp()
+
+ def test_invalid_nonce_ioctl(self) -> None:
+ """tries to call get_nonce ioctl with invalid data structures"""
+
+ # 0x1 (get nonce), and invalid data
+- INVALID1 = IOWR(ord("D"), 0x01, invalid_param)
++ INVALID1 = ioctl.IOWR(ord("D"), 0x01, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID1, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+@@ -79,7 +88,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
+ """tries to call set_uid ioctl with invalid data structures"""
+
+ # 0x2 (set uid), and invalid data
+- INVALID2 = IOW(ord("D"), 0x02, invalid_param)
++ INVALID2 = ioctl.IOW(ord("D"), 0x02, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID2, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+@@ -88,7 +97,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
+ """tries to call set_uid ioctl with invalid data structures"""
+
+ # 0x2 as RW (set uid), and invalid data
+- INVALID3 = IOWR(ord("D"), 0x02, invalid_param)
++ INVALID3 = ioctl.IOWR(ord("D"), 0x02, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID3, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+@@ -96,7 +105,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
+ def test_invalid_param_ioctl(self) -> None:
+ """tries to call param ioctl with invalid data structures"""
+ # 0x3 (param), and invalid data
+- INVALID4 = IOWR(ord("D"), 0x03, invalid_param)
++ INVALID4 = ioctl.IOWR(ord("D"), 0x03, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID4, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+@@ -104,7 +113,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
+ def test_invalid_call_ioctl(self) -> None:
+ """tries to call the DBC ioctl with invalid data structures"""
+ # 0x4, and invalid data
+- INVALID5 = IOWR(ord("D"), 0x04, invalid_param)
++ INVALID5 = ioctl.IOWR(ord("D"), 0x04, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID5, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+@@ -183,12 +192,12 @@ class TestUnFusedSystem(DynamicBoostControlTest):
+ # SOC power
+ soc_power_max = process_param(self.d, PARAM_GET_SOC_PWR_MAX, self.signature)
+ soc_power_min = process_param(self.d, PARAM_GET_SOC_PWR_MIN, self.signature)
+- self.assertGreater(soc_power_max.parameter, soc_power_min.parameter)
++ self.assertGreater(soc_power_max[0], soc_power_min[0])
+
+ # fmax
+ fmax_max = process_param(self.d, PARAM_GET_FMAX_MAX, self.signature)
+ fmax_min = process_param(self.d, PARAM_GET_FMAX_MIN, self.signature)
+- self.assertGreater(fmax_max.parameter, fmax_min.parameter)
++ self.assertGreater(fmax_max[0], fmax_min[0])
+
+ # cap values
+ keys = {
+@@ -199,7 +208,7 @@ class TestUnFusedSystem(DynamicBoostControlTest):
+ }
+ for k in keys:
+ result = process_param(self.d, keys[k], self.signature)
+- self.assertGreater(result.parameter, 0)
++ self.assertGreater(result[0], 0)
+
+ def test_get_invalid_param(self) -> None:
+ """fetch an invalid parameter"""
+@@ -217,17 +226,17 @@ class TestUnFusedSystem(DynamicBoostControlTest):
+ original = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+
+ # set the fmax
+- target = original.parameter - 100
++ target = original[0] - 100
+ process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, target)
+ time.sleep(SET_DELAY)
+ new = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+- self.assertEqual(new.parameter, target)
++ self.assertEqual(new[0], target)
+
+ # revert back to current
+- process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original.parameter)
++ process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original[0])
+ time.sleep(SET_DELAY)
+ cur = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+- self.assertEqual(cur.parameter, original.parameter)
++ self.assertEqual(cur[0], original[0])
+
+ def test_set_power_cap(self) -> None:
+ """get/set power cap limit"""
+@@ -235,17 +244,17 @@ class TestUnFusedSystem(DynamicBoostControlTest):
+ original = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+
+ # set the fmax
+- target = original.parameter - 10
++ target = original[0] - 10
+ process_param(self.d, PARAM_SET_PWR_CAP, self.signature, target)
+ time.sleep(SET_DELAY)
+ new = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+- self.assertEqual(new.parameter, target)
++ self.assertEqual(new[0], target)
+
+ # revert back to current
+- process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original.parameter)
++ process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original[0])
+ time.sleep(SET_DELAY)
+ cur = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+- self.assertEqual(cur.parameter, original.parameter)
++ self.assertEqual(cur[0], original[0])
+
+ def test_set_3d_graphics_mode(self) -> None:
+ """set/get 3d graphics mode"""
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 264eeb9c46a9f..318e2dad27e04 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -1421,7 +1421,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
+ if (error)
+ goto setval_error;
+
+- if (new_val->addr_family == ADDR_FAMILY_IPV6) {
++ if (new_val->addr_family & ADDR_FAMILY_IPV6) {
+ error = fprintf(nmfile, "\n[ipv6]\n");
+ if (error < 0)
+ goto setval_error;
+@@ -1455,14 +1455,18 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
+ if (error < 0)
+ goto setval_error;
+
+- error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way);
+- if (error < 0)
+- goto setval_error;
+-
+- error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr);
+- if (error < 0)
+- goto setval_error;
++ /* we do not want ipv4 addresses in ipv6 section and vice versa */
++ if (is_ipv6 != is_ipv4((char *)new_val->gate_way)) {
++ error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way);
++ if (error < 0)
++ goto setval_error;
++ }
+
++ if (is_ipv6 != is_ipv4((char *)new_val->dns_addr)) {
++ error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr);
++ if (error < 0)
++ goto setval_error;
++ }
+ fclose(nmfile);
+ fclose(ifcfg_file);
+
+diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
+index ae5a7a8249a20..440a91b35823b 100755
+--- a/tools/hv/hv_set_ifconfig.sh
++++ b/tools/hv/hv_set_ifconfig.sh
+@@ -53,7 +53,7 @@
+ # or "manual" if no boot-time protocol should be used)
+ #
+ # address1=ipaddr1/plen
+-# address=ipaddr2/plen
++# address2=ipaddr2/plen
+ #
+ # gateway=gateway1;gateway2
+ #
+@@ -61,7 +61,7 @@
+ #
+ # [ipv6]
+ # address1=ipaddr1/plen
+-# address2=ipaddr1/plen
++# address2=ipaddr2/plen
+ #
+ # gateway=gateway1;gateway2
+ #
+diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
+index 44bbf80f0cfdd..0d0a7a19d6f95 100644
+--- a/tools/iio/iio_generic_buffer.c
++++ b/tools/iio/iio_generic_buffer.c
+@@ -54,9 +54,12 @@ enum autochan {
+ static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+ {
+ unsigned int bytes = 0;
+- int i = 0;
++ int i = 0, max = 0;
++ unsigned int misalignment;
+
+ while (i < num_channels) {
++ if (channels[i].bytes > max)
++ max = channels[i].bytes;
+ if (bytes % channels[i].bytes == 0)
+ channels[i].location = bytes;
+ else
+@@ -66,6 +69,14 @@ static unsigned int size_from_channelarray(struct iio_channel_info *channels, in
+ bytes = channels[i].location + channels[i].bytes;
+ i++;
+ }
++ /*
++ * We want the data in next sample to also be properly aligned so
++ * we'll add padding at the end if needed. Adding padding only
++ * works for channel data which size is 2^n bytes.
++ */
++ misalignment = bytes % max;
++ if (misalignment)
++ bytes += max - misalignment;
+
+ return bytes;
+ }
+diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
+index 3c36aeade991e..370ed14b1ae09 100644
+--- a/tools/include/uapi/linux/prctl.h
++++ b/tools/include/uapi/linux/prctl.h
+@@ -283,7 +283,8 @@ struct prctl_mm_map {
+
+ /* Memory deny write / execute */
+ #define PR_SET_MDWE 65
+-# define PR_MDWE_REFUSE_EXEC_GAIN 1
++# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
++# define PR_MDWE_NO_INHERIT (1UL << 1)
+
+ #define PR_GET_MDWE 66
+
+diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
+index 3803479dbe106..1c13f8e88833b 100644
+--- a/tools/lib/bpf/bpf_tracing.h
++++ b/tools/lib/bpf/bpf_tracing.h
+@@ -362,8 +362,6 @@ struct pt_regs___arm64 {
+ #define __PT_PARM7_REG a6
+ #define __PT_PARM8_REG a7
+
+-/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
+-#define PT_REGS_SYSCALL_REGS(ctx) ctx
+ #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+ #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+ #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+diff --git a/tools/lib/perf/include/internal/rc_check.h b/tools/lib/perf/include/internal/rc_check.h
+index d5d771ccdc7b4..e88a6d8a0b0f9 100644
+--- a/tools/lib/perf/include/internal/rc_check.h
++++ b/tools/lib/perf/include/internal/rc_check.h
+@@ -9,8 +9,12 @@
+ * Enable reference count checking implicitly with leak checking, which is
+ * integrated into address sanitizer.
+ */
+-#if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
++#if defined(__SANITIZE_ADDRESS__) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+ #define REFCNT_CHECKING 1
++#elif defined(__has_feature)
++#if __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
++#define REFCNT_CHECKING 1
++#endif
+ #endif
+
+ /*
+diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
+index c54f7235c5d94..f40febdd6e36a 100644
+--- a/tools/objtool/objtool.c
++++ b/tools/objtool/objtool.c
+@@ -146,7 +146,5 @@ int main(int argc, const char **argv)
+ exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED);
+ pager_init(UNUSED);
+
+- objtool_run(argc, argv);
+-
+- return 0;
++ return objtool_run(argc, argv);
+ }
+diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
+index 3c36324712b6e..482d6c52e2edf 100644
+--- a/tools/perf/Documentation/perf-kwork.txt
++++ b/tools/perf/Documentation/perf-kwork.txt
+@@ -8,7 +8,7 @@ perf-kwork - Tool to trace/measure kernel work properties (latencies)
+ SYNOPSIS
+ --------
+ [verse]
+-'perf kwork' {record}
++'perf kwork' {record|report|latency|timehist}
+
+ DESCRIPTION
+ -----------
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index 37af6df7b978d..86569f230e60d 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -69,6 +69,10 @@ include ../scripts/utilities.mak
+ # Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
+ # for dwarf backtrace post unwind.
+ #
++# Define NO_LIBTRACEEVENT=1 if you don't want libtraceevent to be linked,
++# this will remove multiple features and tools, such as 'perf trace',
++# that need it to read tracefs event format files, etc.
++#
+ # Define NO_PERF_READ_VDSO32 if you do not want to build perf-read-vdso32
+ # for reading the 32-bit compatibility VDSO in 64-bit mode
+ #
+diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
+index 14bf7a8429e76..de2fbb7c56c32 100644
+--- a/tools/perf/builtin-kwork.c
++++ b/tools/perf/builtin-kwork.c
+@@ -406,12 +406,14 @@ static int work_push_atom(struct perf_kwork *kwork,
+
+ work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
+ if (work == NULL) {
+- free(atom);
++ atom_free(atom);
+ return -1;
+ }
+
+- if (!profile_event_match(kwork, work, sample))
++ if (!profile_event_match(kwork, work, sample)) {
++ atom_free(atom);
+ return 0;
++ }
+
+ if (dst_type < KWORK_TRACE_MAX) {
+ dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
+@@ -1692,9 +1694,10 @@ int cmd_kwork(int argc, const char **argv)
+ static struct perf_kwork kwork = {
+ .class_list = LIST_HEAD_INIT(kwork.class_list),
+ .tool = {
+- .mmap = perf_event__process_mmap,
+- .mmap2 = perf_event__process_mmap2,
+- .sample = perf_kwork__process_tracepoint_sample,
++ .mmap = perf_event__process_mmap,
++ .mmap2 = perf_event__process_mmap2,
++ .sample = perf_kwork__process_tracepoint_sample,
++ .ordered_events = true,
+ },
+ .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
+ .sort_list = LIST_HEAD_INIT(kwork.sort_list),
+diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
+index b141f21342740..0b4b4445c5207 100644
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -524,6 +524,7 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack)
+ struct map *kmap;
+ struct symbol *sym;
+ u64 ip;
++ const char *arch = perf_env__arch(machine->env);
+
+ if (list_empty(&callstack_filters))
+ return true;
+@@ -531,7 +532,21 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack)
+ for (int i = 0; i < max_stack_depth; i++) {
+ struct callstack_filter *filter;
+
+- if (!callstack || !callstack[i])
++ /*
++ * In powerpc, the callchain saved by kernel always includes
++ * first three entries as the NIP (next instruction pointer),
++ * LR (link register), and the contents of LR save area in the
++ * second stack frame. In certain scenarios its possible to have
++ * invalid kernel instruction addresses in either LR or the second
++ * stack frame's LR. In that case, kernel will store that address as
++ * zero.
++ *
++ * The below check will continue to look into callstack,
++ * incase first or second callstack index entry has 0
++ * address for powerpc.
++ */
++ if (!callstack || (!callstack[i] && (strcmp(arch, "powerpc") ||
++ (i != 1 && i != 2))))
+ break;
+
+ ip = callstack[i];
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 07b48f6df48eb..a3af805a1d572 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1622,7 +1622,7 @@ static int perf_stat_init_aggr_mode(void)
+ * taking the highest cpu number to be the size of
+ * the aggregation translate cpumap.
+ */
+- if (evsel_list->core.user_requested_cpus)
++ if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
+ nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
+ else
+ nr = 0;
+diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
+index 1e7e8901a4450..e2848a9d48487 100644
+--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
++++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
+@@ -1,362 +1,384 @@
+ [
+ {
++ "MetricName": "branch_miss_pred_rate",
+ "MetricExpr": "BR_MIS_PRED / BR_PRED",
+ "BriefDescription": "Branch predictor misprediction rate. May not count branches that are never resolved because they are in the misprediction shadow of an earlier branch",
+- "MetricGroup": "Branch Prediction",
+- "MetricName": "Misprediction"
++ "MetricGroup": "branch",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": "BR_MIS_PRED_RETIRED / BR_RETIRED",
+- "BriefDescription": "Branch predictor misprediction rate",
+- "MetricGroup": "Branch Prediction",
+- "MetricName": "Misprediction (retired)"
+- },
+- {
+- "MetricExpr": "BUS_ACCESS / ( BUS_CYCLES * 1)",
++ "MetricName": "bus_utilization",
++ "MetricExpr": "((BUS_ACCESS / (BUS_CYCLES * 1)) * 100)",
+ "BriefDescription": "Core-to-uncore bus utilization",
+ "MetricGroup": "Bus",
+- "MetricName": "Bus utilization"
++ "ScaleUnit": "1percent of bus cycles"
+ },
+ {
+- "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
+- "BriefDescription": "L1D cache miss rate",
+- "MetricGroup": "Cache",
+- "MetricName": "L1D cache miss"
++ "MetricName": "l1d_cache_miss_ratio",
++ "MetricExpr": "(L1D_CACHE_REFILL / L1D_CACHE)",
++ "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.",
++ "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness",
++ "ScaleUnit": "1per cache access"
++ },
++ {
++ "MetricName": "l1i_cache_miss_ratio",
++ "MetricExpr": "(L1I_CACHE_REFILL / L1I_CACHE)",
++ "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.",
++ "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness",
++ "ScaleUnit": "1per cache access"
+ },
+ {
++ "MetricName": "Miss_Ratio;l1d_cache_read_miss",
+ "MetricExpr": "L1D_CACHE_LMISS_RD / L1D_CACHE_RD",
+ "BriefDescription": "L1D cache read miss rate",
+ "MetricGroup": "Cache",
+- "MetricName": "L1D cache read miss"
++ "ScaleUnit": "1per cache read access"
+ },
+ {
+- "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
+- "BriefDescription": "L1I cache miss rate",
+- "MetricGroup": "Cache",
+- "MetricName": "L1I cache miss"
+- },
+- {
+- "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
+- "BriefDescription": "L2 cache miss rate",
+- "MetricGroup": "Cache",
+- "MetricName": "L2 cache miss"
++ "MetricName": "l2_cache_miss_ratio",
++ "MetricExpr": "(L2D_CACHE_REFILL / L2D_CACHE)",
++ "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
++ "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness",
++ "ScaleUnit": "1per cache access"
+ },
+ {
++ "MetricName": "l1i_cache_read_miss_rate",
+ "MetricExpr": "L1I_CACHE_LMISS / L1I_CACHE",
+ "BriefDescription": "L1I cache read miss rate",
+ "MetricGroup": "Cache",
+- "MetricName": "L1I cache read miss"
++ "ScaleUnit": "1per cache access"
+ },
+ {
++ "MetricName": "l2d_cache_read_miss_rate",
+ "MetricExpr": "L2D_CACHE_LMISS_RD / L2D_CACHE_RD",
+ "BriefDescription": "L2 cache read miss rate",
+ "MetricGroup": "Cache",
+- "MetricName": "L2 cache read miss"
++ "ScaleUnit": "1per cache read access"
+ },
+ {
+- "MetricExpr": "(L1D_CACHE_LMISS_RD * 1000) / INST_RETIRED",
++ "MetricName": "l1d_cache_miss_mpki",
++ "MetricExpr": "(L1D_CACHE_LMISS_RD * 1e3) / INST_RETIRED",
+ "BriefDescription": "Misses per thousand instructions (data)",
+ "MetricGroup": "Cache",
+- "MetricName": "MPKI data"
++ "ScaleUnit": "1MPKI"
+ },
+ {
+- "MetricExpr": "(L1I_CACHE_LMISS * 1000) / INST_RETIRED",
++ "MetricName": "l1i_cache_miss_mpki",
++ "MetricExpr": "(L1I_CACHE_LMISS * 1e3) / INST_RETIRED",
+ "BriefDescription": "Misses per thousand instructions (instruction)",
+ "MetricGroup": "Cache",
+- "MetricName": "MPKI instruction"
++ "ScaleUnit": "1MPKI"
+ },
+ {
+- "MetricExpr": "ASE_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "ASE mix"
++ "MetricName": "simd_percentage",
++ "MetricExpr": "((ASE_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "CRYPTO_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of crypto data processing operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "Crypto mix"
++ "MetricName": "crypto_percentage",
++ "MetricExpr": "((CRYPTO_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "VFP_SPEC / (duration_time *1000000000)",
++ "MetricName": "gflops",
++ "MetricExpr": "VFP_SPEC / (duration_time * 1e9)",
+ "BriefDescription": "Giga-floating point operations per second",
+- "MetricGroup": "Instruction",
+- "MetricName": "GFLOPS_ISSUED"
++ "MetricGroup": "InstructionMix"
+ },
+ {
+- "MetricExpr": "DP_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of integer data processing operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "Integer mix"
++ "MetricName": "integer_dp_percentage",
++ "MetricExpr": "((DP_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "INST_RETIRED / CPU_CYCLES",
+- "BriefDescription": "Instructions per cycle",
+- "MetricGroup": "Instruction",
+- "MetricName": "IPC"
++ "MetricName": "ipc",
++ "MetricExpr": "(INST_RETIRED / CPU_CYCLES)",
++ "BriefDescription": "This metric measures the number of instructions retired per cycle.",
++ "MetricGroup": "General",
++ "ScaleUnit": "1per cycle"
+ },
+ {
+- "MetricExpr": "LD_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of load operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "Load mix"
++ "MetricName": "load_percentage",
++ "MetricExpr": "((LD_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "LDST_SPEC/ OP_SPEC",
+- "BriefDescription": "Proportion of load & store operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "Load-store mix"
++ "MetricName": "load_store_spec_rate",
++ "MetricExpr": "((LDST_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "INST_RETIRED / (duration_time * 1000000)",
++ "MetricName": "retired_mips",
++ "MetricExpr": "INST_RETIRED / (duration_time * 1e6)",
+ "BriefDescription": "Millions of instructions per second",
+- "MetricGroup": "Instruction",
+- "MetricName": "MIPS_RETIRED"
++ "MetricGroup": "InstructionMix"
+ },
+ {
+- "MetricExpr": "INST_SPEC / (duration_time * 1000000)",
++ "MetricName": "spec_utilization_mips",
++ "MetricExpr": "INST_SPEC / (duration_time * 1e6)",
+ "BriefDescription": "Millions of instructions per second",
+- "MetricGroup": "Instruction",
+- "MetricName": "MIPS_UTILIZATION"
+- },
+- {
+- "MetricExpr": "PC_WRITE_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of software change of PC operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "PC write mix"
++ "MetricGroup": "PEutilization"
+ },
+ {
+- "MetricExpr": "ST_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of store operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "Store mix"
++ "MetricName": "pc_write_spec_rate",
++ "MetricExpr": "((PC_WRITE_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "VFP_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of FP operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "VFP mix"
++ "MetricName": "store_percentage",
++ "MetricExpr": "((ST_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "1 - (OP_RETIRED/ (CPU_CYCLES * 4))",
+- "BriefDescription": "Proportion of slots lost",
+- "MetricGroup": "Speculation / TDA",
+- "MetricName": "CPU lost"
++ "MetricName": "scalar_fp_percentage",
++ "MetricExpr": "((VFP_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "OP_RETIRED/ (CPU_CYCLES * 4)",
+- "BriefDescription": "Proportion of slots retiring",
+- "MetricGroup": "Speculation / TDA",
+- "MetricName": "CPU utilization"
++ "MetricName": "retired_rate",
++ "MetricExpr": "OP_RETIRED / OP_SPEC",
++ "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)",
++ "MetricGroup": "General",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": "OP_RETIRED - OP_SPEC",
+- "BriefDescription": "Operations lost due to misspeculation",
+- "MetricGroup": "Speculation / TDA",
+- "MetricName": "Operations lost"
++ "MetricName": "wasted",
++ "MetricExpr": "1 - (OP_RETIRED / (CPU_CYCLES * #slots))",
++ "BriefDescription": "Of all the micro-operations issued, what proportion are lost",
++ "MetricGroup": "General",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": "1 - (OP_RETIRED / OP_SPEC)",
+- "BriefDescription": "Proportion of operations lost",
+- "MetricGroup": "Speculation / TDA",
+- "MetricName": "Operations lost (ratio)"
++ "MetricName": "wasted_rate",
++ "MetricExpr": "1 - OP_RETIRED / OP_SPEC",
++ "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)",
++ "MetricGroup": "General",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": "OP_RETIRED / OP_SPEC",
+- "BriefDescription": "Proportion of operations retired",
+- "MetricGroup": "Speculation / TDA",
+- "MetricName": "Operations retired"
+- },
+- {
+- "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
++ "MetricName": "stall_backend_cache_rate",
++ "MetricExpr": "((STALL_BACKEND_CACHE / CPU_CYCLES) * 100)",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and cache miss",
+ "MetricGroup": "Stall",
+- "MetricName": "Stall backend cache cycles"
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
++ "MetricName": "stall_backend_resource_rate",
++ "MetricExpr": "((STALL_BACKEND_RESOURCE / CPU_CYCLES) * 100)",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and resource full",
+ "MetricGroup": "Stall",
+- "MetricName": "Stall backend resource cycles"
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
++ "MetricName": "stall_backend_tlb_rate",
++ "MetricExpr": "((STALL_BACKEND_TLB / CPU_CYCLES) * 100)",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and TLB miss",
+ "MetricGroup": "Stall",
+- "MetricName": "Stall backend tlb cycles"
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
++ "MetricName": "stall_frontend_cache_rate",
++ "MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)",
+ "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss",
+ "MetricGroup": "Stall",
+- "MetricName": "Stall frontend cache cycles"
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
++ "MetricName": "stall_frontend_tlb_rate",
++ "MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)",
+ "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss",
+ "MetricGroup": "Stall",
+- "MetricName": "Stall frontend tlb cycles"
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "DTLB_WALK / L1D_TLB",
+- "BriefDescription": "D-side walk per d-side translation request",
+- "MetricGroup": "TLB",
+- "MetricName": "DTLB walks"
++ "MetricName": "dtlb_walk_ratio",
++ "MetricExpr": "(DTLB_WALK / L1D_TLB)",
++ "BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.",
++ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
++ "ScaleUnit": "1per TLB access"
+ },
+ {
+- "MetricExpr": "ITLB_WALK / L1I_TLB",
+- "BriefDescription": "I-side walk per i-side translation request",
+- "MetricGroup": "TLB",
+- "MetricName": "ITLB walks"
++ "MetricName": "itlb_walk_ratio",
++ "MetricExpr": "(ITLB_WALK / L1I_TLB)",
++ "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.",
++ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
++ "ScaleUnit": "1per TLB access"
+ },
+ {
+- "MetricExpr": "STALL_SLOT_BACKEND / (CPU_CYCLES * 4)",
+- "BriefDescription": "Fraction of slots backend bound",
+- "MetricGroup": "TopDownL1",
+- "MetricName": "backend"
++ "ArchStdEvent": "backend_bound"
+ },
+ {
+- "MetricExpr": "1 - (retiring + lost + backend)",
+- "BriefDescription": "Fraction of slots frontend bound",
+- "MetricGroup": "TopDownL1",
+- "MetricName": "frontend"
++ "ArchStdEvent": "frontend_bound",
++ "MetricExpr": "100 - (retired_fraction + slots_lost_misspeculation_fraction + backend_bound)"
+ },
+ {
+- "MetricExpr": "((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * 4))",
++ "MetricName": "slots_lost_misspeculation_fraction",
++ "MetricExpr": "100 * ((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots))",
+ "BriefDescription": "Fraction of slots lost due to misspeculation",
+- "MetricGroup": "TopDownL1",
+- "MetricName": "lost"
++ "MetricGroup": "Default;TopdownL1",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "(OP_RETIRED / (CPU_CYCLES * 4))",
++ "MetricName": "retired_fraction",
++ "MetricExpr": "100 * (OP_RETIRED / (CPU_CYCLES * #slots))",
+ "BriefDescription": "Fraction of slots retiring, useful work",
+- "MetricGroup": "TopDownL1",
+- "MetricName": "retiring"
++ "MetricGroup": "Default;TopdownL1",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "backend - backend_memory",
++ "MetricName": "backend_core",
++ "MetricExpr": "(backend_bound / 100) - backend_memory",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to backend non-memory subsystem issues",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "backend_core"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE + STALL_BACKEND_MEM) / CPU_CYCLES ",
++ "MetricName": "backend_memory",
++ "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE) / CPU_CYCLES",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to backend memory subsystem issues (cache/tlb miss)",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "backend_memory"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": " (BR_MIS_PRED_RETIRED / GPC_FLUSH) * lost",
++ "MetricName": "branch_mispredict",
++ "MetricExpr": "(BR_MIS_PRED_RETIRED / GPC_FLUSH) * slots_lost_misspeculation_fraction",
+ "BriefDescription": "Fraction of slots lost due to branch misprediciton",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "branch_mispredict"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "frontend - frontend_latency",
++ "MetricName": "frontend_bandwidth",
++ "MetricExpr": "frontend_bound - frontend_latency",
+ "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "frontend_bandwidth"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "(STALL_FRONTEND - ((STALL_SLOT_FRONTEND - (frontend * CPU_CYCLES * 4)) / 4)) / CPU_CYCLES",
++ "MetricName": "frontend_latency",
++ "MetricExpr": "((STALL_FRONTEND - ((STALL_SLOT_FRONTEND - ((frontend_bound / 100) * CPU_CYCLES * #slots)) / #slots)) / CPU_CYCLES) * 100",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to frontend latency issues (cache/tlb miss); nothing to dispatch",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "frontend_latency"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "lost - branch_mispredict",
++ "MetricName": "other_miss_pred",
++ "MetricExpr": "slots_lost_misspeculation_fraction - branch_mispredict",
+ "BriefDescription": "Fraction of slots lost due to other/non-branch misprediction misspeculation",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "other_clears"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "(IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6)",
++ "MetricName": "pipe_utilization",
++ "MetricExpr": "100 * ((IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6))",
+ "BriefDescription": "Fraction of execute slots utilized",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "pipe_utilization"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "STALL_BACKEND_MEM / CPU_CYCLES",
++ "MetricName": "d_cache_l2_miss_rate",
++ "MetricExpr": "((STALL_BACKEND_MEM / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data L2 cache miss",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "d_cache_l2_miss"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
++ "MetricName": "d_cache_miss_rate",
++ "MetricExpr": "((STALL_BACKEND_CACHE / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data cache miss",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "d_cache_miss"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
++ "MetricName": "d_tlb_miss_rate",
++ "MetricExpr": "((STALL_BACKEND_TLB / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data TLB miss",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "d_tlb_miss"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "FSU_ISSUED / (CPU_CYCLES * 2)",
++ "MetricName": "fsu_pipe_utilization",
++ "MetricExpr": "((FSU_ISSUED / (CPU_CYCLES * 2)) * 100)",
+ "BriefDescription": "Fraction of FSU execute slots utilized",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "fsu_pipe_utilization"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
++ "MetricName": "i_cache_miss_rate",
++ "MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction cache miss",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "i_cache_miss"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": " STALL_FRONTEND_TLB / CPU_CYCLES ",
++ "MetricName": "i_tlb_miss_rate",
++ "MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction TLB miss",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "i_tlb_miss"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "IXU_NUM_UOPS_ISSUED / (CPU_CYCLES / 4)",
++ "MetricName": "ixu_pipe_utilization",
++ "MetricExpr": "((IXU_NUM_UOPS_ISSUED / (CPU_CYCLES * #slots)) * 100)",
+ "BriefDescription": "Fraction of IXU execute slots utilized",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "ixu_pipe_utilization"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "IDR_STALL_FLUSH / CPU_CYCLES",
++ "MetricName": "stall_recovery_rate",
++ "MetricExpr": "((IDR_STALL_FLUSH / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to flush recovery",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "recovery"
+- },
+- {
+- "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
+- "BriefDescription": "Fraction of cycles the CPU was stalled due to core resource shortage",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "resource"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "IDR_STALL_FSU_SCHED / CPU_CYCLES ",
++ "MetricName": "stall_fsu_sched_rate",
++ "MetricExpr": "((IDR_STALL_FSU_SCHED / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and FSU was full",
+- "MetricGroup": "TopDownL4",
+- "MetricName": "stall_fsu_sched"
++ "MetricGroup": "TopdownL4",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "IDR_STALL_IXU_SCHED / CPU_CYCLES ",
++ "MetricName": "stall_ixu_sched_rate",
++ "MetricExpr": "((IDR_STALL_IXU_SCHED / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and IXU was full",
+- "MetricGroup": "TopDownL4",
+- "MetricName": "stall_ixu_sched"
++ "MetricGroup": "TopdownL4",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "IDR_STALL_LOB_ID / CPU_CYCLES ",
++ "MetricName": "stall_lob_id_rate",
++ "MetricExpr": "((IDR_STALL_LOB_ID / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and LOB was full",
+- "MetricGroup": "TopDownL4",
+- "MetricName": "stall_lob_id"
++ "MetricGroup": "TopdownL4",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "IDR_STALL_ROB_ID / CPU_CYCLES",
++ "MetricName": "stall_rob_id_rate",
++ "MetricExpr": "((IDR_STALL_ROB_ID / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and ROB was full",
+- "MetricGroup": "TopDownL4",
+- "MetricName": "stall_rob_id"
++ "MetricGroup": "TopdownL4",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "IDR_STALL_SOB_ID / CPU_CYCLES ",
++ "MetricName": "stall_sob_id_rate",
++ "MetricExpr": "((IDR_STALL_SOB_ID / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and SOB was full",
+- "MetricGroup": "TopDownL4",
+- "MetricName": "stall_sob_id"
++ "MetricGroup": "TopdownL4",
++ "ScaleUnit": "1percent of cycles"
+ }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+index c606ae03cd27d..0e0253d0e7577 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+@@ -195,7 +195,7 @@
+ "BriefDescription": "Threshold counter exceeded a value of 128."
+ },
+ {
+- "EventCode": "0x400FA",
++ "EventCode": "0x500FA",
+ "EventName": "PM_RUN_INST_CMPL",
+ "BriefDescription": "PowerPC instruction completed while the run latch is set."
+ }
+diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+index 8fc62b8f667d8..e1f55fcfa0d02 100644
+--- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
++++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+@@ -48,6 +48,12 @@
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
++ {
++ "BriefDescription": "Uncore frequency per die [GHZ]",
++ "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
++ "MetricGroup": "SoC",
++ "MetricName": "UNCORE_FREQ"
++ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+@@ -652,7 +658,7 @@
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+- "MetricExpr": "64 * (arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@) / 1e6 / duration_time / 1e3",
++ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_system_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+@@ -690,6 +696,12 @@
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_system_smt_2t_utilization"
+ },
++ {
++ "BriefDescription": "Socket actual clocks when any core is active on that socket",
++ "MetricExpr": "cbox_0@event\\=0x0@",
++ "MetricGroup": "SoC",
++ "MetricName": "tma_info_system_socket_clks"
++ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
+diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
+index 01f70b8e705a8..21f4d9ba023d9 100644
+--- a/tools/perf/util/bpf_off_cpu.c
++++ b/tools/perf/util/bpf_off_cpu.c
+@@ -98,7 +98,7 @@ static void off_cpu_finish(void *arg __maybe_unused)
+ /* v5.18 kernel added prev_state arg, so it needs to check the signature */
+ static void check_sched_switch_args(void)
+ {
+- const struct btf *btf = bpf_object__btf(skel->obj);
++ const struct btf *btf = btf__load_vmlinux_btf();
+ const struct btf_type *t1, *t2, *t3;
+ u32 type_id;
+
+@@ -116,7 +116,8 @@ static void check_sched_switch_args(void)
+ return;
+
+ t3 = btf__type_by_id(btf, t2->type);
+- if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
++ /* btf_trace func proto has one more argument for the context */
++ if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
+ /* new format: pass prev_state as 4th arg */
+ skel->rodata->has_prev_state = true;
+ }
+diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+index 939ec769bf4a5..52c270330ae0d 100644
+--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
++++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+@@ -153,7 +153,7 @@ static inline
+ unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
+ {
+ unsigned int augmented_len = sizeof(*augmented_arg);
+- int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg);
++ int string_len = bpf_probe_read_user_str(&augmented_arg->value, arg_len, arg);
+
+ augmented_arg->size = augmented_arg->err = 0;
+ /*
+@@ -203,7 +203,7 @@ int sys_enter_connect(struct syscall_enter_args *args)
+ _Static_assert(is_power_of_2(sizeof(augmented_args->saddr)), "sizeof(augmented_args->saddr) needs to be a power of two");
+ socklen &= sizeof(augmented_args->saddr) - 1;
+
+- bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
++ bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
+
+ return augmented__output(args, augmented_args, len + socklen);
+ }
+@@ -221,7 +221,7 @@ int sys_enter_sendto(struct syscall_enter_args *args)
+
+ socklen &= sizeof(augmented_args->saddr) - 1;
+
+- bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
++ bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
+
+ return augmented__output(args, augmented_args, len + socklen);
+ }
+@@ -311,7 +311,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
+ if (augmented_args == NULL)
+ goto failure;
+
+- if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0)
++ if (bpf_probe_read_user(&augmented_args->__data, sizeof(*attr), attr) < 0)
+ goto failure;
+
+ attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
+@@ -325,7 +325,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
+ goto failure;
+
+ // Now that we read attr->size and tested it against the size limits, read it completely
+- if (bpf_probe_read(&augmented_args->__data, size, attr) < 0)
++ if (bpf_probe_read_user(&augmented_args->__data, size, attr) < 0)
+ goto failure;
+
+ return augmented__output(args, augmented_args, len + size);
+@@ -347,7 +347,7 @@ int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
+ if (size > sizeof(augmented_args->__data))
+ goto failure;
+
+- bpf_probe_read(&augmented_args->__data, size, rqtp_arg);
++ bpf_probe_read_user(&augmented_args->__data, size, rqtp_arg);
+
+ return augmented__output(args, augmented_args, len + size);
+ failure:
+@@ -385,7 +385,7 @@ int sys_enter(struct syscall_enter_args *args)
+ if (augmented_args == NULL)
+ return 1;
+
+- bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
++ bpf_probe_read_kernel(&augmented_args->args, sizeof(augmented_args->args), args);
+
+ /*
+ * Jump to syscall specific augmenter, even if the default one,
+@@ -406,7 +406,7 @@ int sys_exit(struct syscall_exit_args *args)
+ if (pid_filter__has(&pids_filtered, getpid()))
+ return 0;
+
+- bpf_probe_read(&exit_args, sizeof(exit_args), args);
++ bpf_probe_read_kernel(&exit_args, sizeof(exit_args), args);
+ /*
+ * Jump to syscall specific return augmenter, even if the default one,
+ * "!raw_syscalls:unaugmented" that will just return 1 to return the
+diff --git a/tools/perf/util/bpf_skel/vmlinux/.gitignore b/tools/perf/util/bpf_skel/vmlinux/.gitignore
+new file mode 100644
+index 0000000000000..49502c04183a2
+--- /dev/null
++++ b/tools/perf/util/bpf_skel/vmlinux/.gitignore
+@@ -0,0 +1 @@
++!vmlinux.h
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index 7ef43f72098e0..c779b9f2e6220 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -251,6 +251,9 @@ static struct evsel *evlist__dummy_event(struct evlist *evlist)
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ .size = sizeof(attr), /* to capture ABI version */
++ /* Avoid frequency mode for dummy events to avoid associated timers. */
++ .freq = 0,
++ .sample_period = 1,
+ };
+
+ return evsel__new_idx(&attr, evlist->core.nr_entries);
+@@ -277,8 +280,6 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
+ evsel->core.attr.exclude_kernel = 1;
+ evsel->core.attr.exclude_guest = 1;
+ evsel->core.attr.exclude_hv = 1;
+- evsel->core.attr.freq = 0;
+- evsel->core.attr.sample_period = 1;
+ evsel->core.system_wide = system_wide;
+ evsel->no_aux_samples = true;
+ evsel->name = strdup("dummy:u");
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index 3dc8a4968beb9..ac8c0ef48a7f3 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -2676,8 +2676,6 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+
+ /* If we have branch cycles always annotate them. */
+ if (bs && bs->nr && entries[0].flags.cycles) {
+- int i;
+-
+ bi = sample__resolve_bstack(sample, al);
+ if (bi) {
+ struct addr_map_symbol *prev = NULL;
+@@ -2692,7 +2690,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ * Note that perf stores branches reversed from
+ * program order!
+ */
+- for (i = bs->nr - 1; i >= 0; i--) {
++ for (int i = bs->nr - 1; i >= 0; i--) {
+ addr_map_symbol__account_cycles(&bi[i].from,
+ nonany_branch_mode ? NULL : prev,
+ bi[i].flags.cycles);
+@@ -2701,6 +2699,12 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ if (total_cycles)
+ *total_cycles += bi[i].flags.cycles;
+ }
++ for (unsigned int i = 0; i < bs->nr; i++) {
++ map__put(bi[i].to.ms.map);
++ maps__put(bi[i].to.ms.maps);
++ map__put(bi[i].from.ms.map);
++ maps__put(bi[i].from.ms.maps);
++ }
+ free(bi);
+ }
+ }
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index dbf0bc71a63be..f38893e0b0369 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -1512,9 +1512,11 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
+ } else if (ptq->state->flags & INTEL_PT_ASYNC) {
+ if (!ptq->state->to_ip)
+ ptq->flags = PERF_IP_FLAG_BRANCH |
++ PERF_IP_FLAG_ASYNC |
+ PERF_IP_FLAG_TRACE_END;
+ else if (ptq->state->from_nr && !ptq->state->to_nr)
+ ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
++ PERF_IP_FLAG_ASYNC |
+ PERF_IP_FLAG_VMEXIT;
+ else
+ ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 88f31b3a63acb..e6a8d758f6fe4 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -2624,16 +2624,18 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
+ save_lbr_cursor_node(thread, cursor, i);
+ }
+
+- /* Add LBR ip from first entries.to */
+- ip = entries[0].to;
+- flags = &entries[0].flags;
+- *branch_from = entries[0].from;
+- err = add_callchain_ip(thread, cursor, parent,
+- root_al, &cpumode, ip,
+- true, flags, NULL,
+- *branch_from);
+- if (err)
+- return err;
++ if (lbr_nr > 0) {
++ /* Add LBR ip from first entries.to */
++ ip = entries[0].to;
++ flags = &entries[0].flags;
++ *branch_from = entries[0].from;
++ err = add_callchain_ip(thread, cursor, parent,
++ root_al, &cpumode, ip,
++ true, flags, NULL,
++ *branch_from);
++ if (err)
++ return err;
++ }
+
+ return 0;
+ }
+diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
+index 39ffe8ceb3809..954b235e12e51 100644
+--- a/tools/perf/util/mem-events.c
++++ b/tools/perf/util/mem-events.c
+@@ -185,7 +185,6 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ {
+ int i = *argv_nr, k = 0;
+ struct perf_mem_event *e;
+- struct perf_pmu *pmu;
+
+ for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ e = perf_mem_events__ptr(j);
+@@ -202,6 +201,8 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ rec_argv[i++] = "-e";
+ rec_argv[i++] = perf_mem_events__name(j, NULL);
+ } else {
++ struct perf_pmu *pmu = NULL;
++
+ if (!e->supported) {
+ perf_mem_events__print_unsupport_hybrid(e, j);
+ return -1;
+diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
+index 21bfe7e0d9444..c3a86ef4b7cf3 100644
+--- a/tools/perf/util/parse-events.y
++++ b/tools/perf/util/parse-events.y
+@@ -79,7 +79,7 @@ static void free_list_evsel(struct list_head* list_evsel)
+ %type <str> PE_MODIFIER_BP
+ %type <str> PE_EVENT_NAME
+ %type <str> PE_DRV_CFG_TERM
+-%type <str> name_or_raw name_or_legacy
++%type <str> name_or_raw
+ %destructor { free ($$); } <str>
+ %type <term> event_term
+ %destructor { parse_events_term__delete ($$); } <term>
+@@ -104,6 +104,7 @@ static void free_list_evsel(struct list_head* list_evsel)
+ %type <list_evsel> groups
+ %destructor { free_list_evsel ($$); } <list_evsel>
+ %type <tracepoint_name> tracepoint_name
++%destructor { free ($$.sys); free ($$.event); } <tracepoint_name>
+ %type <hardware_term> PE_TERM_HW
+ %destructor { free ($$.str); } <hardware_term>
+
+@@ -679,8 +680,6 @@ event_term
+
+ name_or_raw: PE_RAW | PE_NAME | PE_LEGACY_CACHE
+
+-name_or_legacy: PE_NAME | PE_LEGACY_CACHE
+-
+ event_term:
+ PE_RAW
+ {
+@@ -695,7 +694,7 @@ PE_RAW
+ $$ = term;
+ }
+ |
+-name_or_raw '=' name_or_legacy
++name_or_raw '=' name_or_raw
+ {
+ struct parse_events_term *term;
+ int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3, &@1, &@3);
+@@ -775,7 +774,7 @@ PE_TERM_HW
+ $$ = term;
+ }
+ |
+-PE_TERM '=' name_or_legacy
++PE_TERM '=' name_or_raw
+ {
+ struct parse_events_term *term;
+ int err = parse_events_term__str(&term, (enum parse_events__term_type)$1,
+diff --git a/tools/power/cpupower/man/cpupower-powercap-info.1 b/tools/power/cpupower/man/cpupower-powercap-info.1
+index df3087000efb8..145d6f06fa72d 100644
+--- a/tools/power/cpupower/man/cpupower-powercap-info.1
++++ b/tools/power/cpupower/man/cpupower-powercap-info.1
+@@ -17,7 +17,7 @@ settings of all cores, see cpupower(1) how to choose specific cores.
+ .SH "DOCUMENTATION"
+
+ kernel sources:
+-Documentation/power/powercap/powercap.txt
++Documentation/power/powercap/powercap.rst
+
+
+ .SH "SEE ALSO"
+diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
+index 4a356a7067855..40ad221e88811 100755
+--- a/tools/power/pm-graph/sleepgraph.py
++++ b/tools/power/pm-graph/sleepgraph.py
+@@ -4151,7 +4151,7 @@ def parseKernelLog(data):
+ elif(re.match('Enabling non-boot CPUs .*', msg)):
+ # start of first cpu resume
+ cpu_start = ktime
+- elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)) \
++ elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) \
+ or re.match('psci: CPU(?P<cpu>[0-9]*) killed.*', msg)):
+ # end of a cpu suspend, start of the next
+ m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 9a10512e34078..785de89077de0 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -2180,7 +2180,7 @@ retry:
+ if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
+ if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
+ return -7;
+- } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
++ } else if (do_knl_cstates && soft_c1_residency_display(BIC_CPU_c6)) {
+ if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
+ return -7;
+ }
+@@ -5790,6 +5790,7 @@ void process_cpuid()
+ rapl_probe(family, model);
+ perf_limit_reasons_probe(family, model);
+ automatic_cstate_conversion_probe(family, model);
++ prewake_cstate_probe(family, model);
+
+ check_tcc_offset(model_orig);
+
+diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
+index fb6ab9cef84f7..b885462999022 100644
+--- a/tools/testing/cxl/test/cxl.c
++++ b/tools/testing/cxl/test/cxl.c
+@@ -831,7 +831,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
+ cxld->interleave_ways = 2;
+ else
+ cxld->interleave_ways = 1;
+- cxld->interleave_granularity = 256;
++ cxld->interleave_granularity = 4096;
+ cxld->hpa_range = (struct range) {
+ .start = base,
+ .end = base + size - 1,
+diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
+index 464fc39ed2776..68118c37f0b56 100644
+--- a/tools/testing/cxl/test/mem.c
++++ b/tools/testing/cxl/test/mem.c
+@@ -1450,11 +1450,11 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
+ mdata->mes.mds = mds;
+ cxl_mock_add_event_logs(&mdata->mes);
+
+- cxlmd = devm_cxl_add_memdev(cxlds);
++ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
+ if (IS_ERR(cxlmd))
+ return PTR_ERR(cxlmd);
+
+- rc = cxl_memdev_setup_fw_upload(mds);
++ rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
+ if (rc)
+ return rc;
+
+diff --git a/tools/testing/selftests/arm64/fp/za-fork.c b/tools/testing/selftests/arm64/fp/za-fork.c
+index b86cb1049497f..587b946482226 100644
+--- a/tools/testing/selftests/arm64/fp/za-fork.c
++++ b/tools/testing/selftests/arm64/fp/za-fork.c
+@@ -85,7 +85,7 @@ int main(int argc, char **argv)
+ */
+ ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0);
+ if (ret >= 0) {
+- ksft_test_result(fork_test(), "fork_test");
++ ksft_test_result(fork_test(), "fork_test\n");
+
+ } else {
+ ksft_print_msg("SME not supported\n");
+diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
+index 18cf7b17463d9..98dde091d2825 100644
+--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
++++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
+@@ -94,14 +94,8 @@ static struct {
+ { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
+ { "incorrect_head_off1", "bpf_list_head not found at offset=25" },
+ { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
+- { "pop_front_off",
+- "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
+- "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
+- "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
+- { "pop_back_off",
+- "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
+- "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
+- "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
++ { "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
++ { "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
+ };
+
+ static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
+diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
+index c7636e18b1ebd..aa9f67eb1c95b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
++++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
+@@ -61,6 +61,11 @@ void test_module_fentry_shadow(void)
+ int link_fd[2] = {};
+ __s32 btf_id[2] = {};
+
++ if (!env.has_testmod) {
++ test__skip();
++ return;
++ }
++
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ );
+diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+index 58fe2c586ed76..09c189761926c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
++++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+@@ -271,11 +271,11 @@ static void test_tailcall_count(const char *which)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+@@ -352,11 +352,11 @@ static void test_tailcall_4(void)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
+@@ -442,11 +442,11 @@ static void test_tailcall_5(void)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
+@@ -631,11 +631,11 @@ static void test_tailcall_bpf2bpf_2(void)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+@@ -805,11 +805,11 @@ static void test_tailcall_bpf2bpf_4(bool noise)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ i = 0;
+ val.noise = noise;
+@@ -872,7 +872,7 @@ static void test_tailcall_bpf2bpf_6(void)
+ ASSERT_EQ(topts.retval, 0, "tailcall retval");
+
+ data_fd = bpf_map__fd(obj->maps.bss);
+- if (!ASSERT_GE(map_fd, 0, "bss map fd"))
++ if (!ASSERT_GE(data_fd, 0, "bss map fd"))
+ goto out;
+
+ i = 0;
+diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
+index 38a57a2e70dbe..799fff4995d87 100644
+--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
++++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
+@@ -99,6 +99,9 @@
+ #elif defined(__TARGET_ARCH_arm64)
+ #define SYSCALL_WRAPPER 1
+ #define SYS_PREFIX "__arm64_"
++#elif defined(__TARGET_ARCH_riscv)
++#define SYSCALL_WRAPPER 1
++#define SYS_PREFIX "__riscv_"
+ #else
+ #define SYSCALL_WRAPPER 0
+ #define SYS_PREFIX "__se_"
+diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
+index f4c63daba2297..6438982b928bd 100644
+--- a/tools/testing/selftests/bpf/progs/linked_list_fail.c
++++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
+@@ -591,7 +591,9 @@ int pop_ptr_off(void *(*op)(void *head))
+ n = op(&p->head);
+ bpf_spin_unlock(&p->lock);
+
+- bpf_this_cpu_ptr(n);
++ if (!n)
++ return 0;
++ bpf_spin_lock((void *)n);
+ return 0;
+ }
+
+diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c
+index 5bc86af80a9ad..71735dbf33d4f 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_loops1.c
++++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c
+@@ -75,9 +75,10 @@ l0_%=: r0 += 1; \
+ " ::: __clobber_all);
+ }
+
+-SEC("tracepoint")
++SEC("socket")
+ __description("bounded loop, start in the middle")
+-__failure __msg("back-edge")
++__success
++__failure_unpriv __msg_unpriv("back-edge")
+ __naked void loop_start_in_the_middle(void)
+ {
+ asm volatile (" \
+@@ -136,7 +137,9 @@ l0_%=: exit; \
+
+ SEC("tracepoint")
+ __description("bounded recursion")
+-__failure __msg("back-edge")
++__failure
++/* verifier limitation in detecting max stack depth */
++__msg("the call stack of 8 frames is too deep !")
+ __naked void bounded_recursion(void)
+ {
+ asm volatile (" \
+diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
+index 77bd492c60248..2f9f6f250f171 100644
+--- a/tools/testing/selftests/bpf/test_progs.h
++++ b/tools/testing/selftests/bpf/test_progs.h
+@@ -417,6 +417,8 @@ int get_bpf_max_tramp_links(void);
+ #define SYS_NANOSLEEP_KPROBE_NAME "__s390x_sys_nanosleep"
+ #elif defined(__aarch64__)
+ #define SYS_NANOSLEEP_KPROBE_NAME "__arm64_sys_nanosleep"
++#elif defined(__riscv)
++#define SYS_NANOSLEEP_KPROBE_NAME "__riscv_sys_nanosleep"
+ #else
+ #define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep"
+ #endif
+diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
+index 1bdf2b43e49ea..3d5cd51071f04 100644
+--- a/tools/testing/selftests/bpf/verifier/calls.c
++++ b/tools/testing/selftests/bpf/verifier/calls.c
+@@ -442,7 +442,7 @@
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+- .errstr = "back-edge from insn 0 to 0",
++ .errstr = "the call stack of 9 frames is too deep",
+ .result = REJECT,
+ },
+ {
+@@ -799,7 +799,7 @@
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+- .errstr = "back-edge",
++ .errstr = "the call stack of 9 frames is too deep",
+ .result = REJECT,
+ },
+ {
+@@ -811,7 +811,7 @@
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+- .errstr = "back-edge",
++ .errstr = "the call stack of 9 frames is too deep",
+ .result = REJECT,
+ },
+ {
+diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
+index f9297900cea6d..78f19c255f20b 100644
+--- a/tools/testing/selftests/bpf/verifier/ld_imm64.c
++++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
+@@ -9,8 +9,8 @@
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+- .errstr = "invalid BPF_LD_IMM insn",
+- .errstr_unpriv = "R1 pointer comparison",
++ .errstr = "jump into the middle of ldimm64 insn 1",
++ .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
+ .result = REJECT,
+ },
+ {
+@@ -23,8 +23,8 @@
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+- .errstr = "invalid BPF_LD_IMM insn",
+- .errstr_unpriv = "R1 pointer comparison",
++ .errstr = "jump into the middle of ldimm64 insn 1",
++ .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
+ .result = REJECT,
+ },
+ {
+diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
+index e60cf4da8fb07..1c61e3c022cb8 100644
+--- a/tools/testing/selftests/clone3/clone3.c
++++ b/tools/testing/selftests/clone3/clone3.c
+@@ -196,7 +196,12 @@ int main(int argc, char *argv[])
+ CLONE3_ARGS_NO_TEST);
+
+ /* Do a clone3() in a new time namespace */
+- test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
++ if (access("/proc/self/ns/time", F_OK) == 0) {
++ test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
++ } else {
++ ksft_print_msg("Time namespaces are not supported\n");
++ ksft_test_result_skip("Skipping clone3() with CLONE_NEWTIME\n");
++ }
+
+ /* Do a clone3() with exit signal (SIGCHLD) in flags */
+ test_clone3(SIGCHLD, 0, -EINVAL, CLONE3_ARGS_NO_TEST);
+diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c
+index 9674a19396a32..7bc7af4eb2c17 100644
+--- a/tools/testing/selftests/efivarfs/create-read.c
++++ b/tools/testing/selftests/efivarfs/create-read.c
+@@ -32,8 +32,10 @@ int main(int argc, char **argv)
+ rc = read(fd, buf, sizeof(buf));
+ if (rc != 0) {
+ fprintf(stderr, "Reading a new var should return EOF\n");
++ close(fd);
+ return EXIT_FAILURE;
+ }
+
++ close(fd);
+ return EXIT_SUCCESS;
+ }
+diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
+index 5d52f64dfb430..7afe05e8c4d79 100644
+--- a/tools/testing/selftests/lkdtm/config
++++ b/tools/testing/selftests/lkdtm/config
+@@ -9,7 +9,6 @@ CONFIG_INIT_ON_FREE_DEFAULT_ON=y
+ CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+ CONFIG_UBSAN=y
+ CONFIG_UBSAN_BOUNDS=y
+-CONFIG_UBSAN_TRAP=y
+ CONFIG_STACKPROTECTOR_STRONG=y
+ CONFIG_SLUB_DEBUG=y
+ CONFIG_SLUB_DEBUG_ON=y
+diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
+index 607b8d7e3ea34..2f3a1b96da6e3 100644
+--- a/tools/testing/selftests/lkdtm/tests.txt
++++ b/tools/testing/selftests/lkdtm/tests.txt
+@@ -7,7 +7,7 @@ EXCEPTION
+ #EXHAUST_STACK Corrupts memory on failure
+ #CORRUPT_STACK Crashes entire system on success
+ #CORRUPT_STACK_STRONG Crashes entire system on success
+-ARRAY_BOUNDS
++ARRAY_BOUNDS call trace:|UBSAN: array-index-out-of-bounds
+ CORRUPT_LIST_ADD list_add corruption
+ CORRUPT_LIST_DEL list_del corruption
+ STACK_GUARD_PAGE_LEADING
+diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c
+index bc91bef5d254e..0c5e469ae38fa 100644
+--- a/tools/testing/selftests/mm/mdwe_test.c
++++ b/tools/testing/selftests/mm/mdwe_test.c
+@@ -168,13 +168,10 @@ TEST_F(mdwe, mmap_FIXED)
+ self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0);
+ ASSERT_NE(self->p, MAP_FAILED);
+
+- p = mmap(self->p + self->size, self->size, PROT_READ | PROT_EXEC,
++ /* MAP_FIXED unmaps the existing page before mapping which is allowed */
++ p = mmap(self->p, self->size, PROT_READ | PROT_EXEC,
+ self->flags | MAP_FIXED, 0, 0);
+- if (variant->enabled) {
+- EXPECT_EQ(p, MAP_FAILED);
+- } else {
+- EXPECT_EQ(p, self->p);
+- }
++ EXPECT_EQ(p, self->p);
+ }
+
+ TEST_F(mdwe, arm64_BTI)
+diff --git a/tools/testing/selftests/net/af_unix/diag_uid.c b/tools/testing/selftests/net/af_unix/diag_uid.c
+index 5b88f7129fea4..79a3dd75590e8 100644
+--- a/tools/testing/selftests/net/af_unix/diag_uid.c
++++ b/tools/testing/selftests/net/af_unix/diag_uid.c
+@@ -148,7 +148,6 @@ void receive_response(struct __test_metadata *_metadata,
+ .msg_iov = &iov,
+ .msg_iovlen = 1
+ };
+- struct unix_diag_req *udr;
+ struct nlmsghdr *nlh;
+ int ret;
+
+diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
+index 24b21b15ed3fb..6ff3e732f449f 100644
+--- a/tools/testing/selftests/net/cmsg_sender.c
++++ b/tools/testing/selftests/net/cmsg_sender.c
+@@ -416,9 +416,9 @@ int main(int argc, char *argv[])
+ {
+ struct addrinfo hints, *ai;
+ struct iovec iov[1];
++ unsigned char *buf;
+ struct msghdr msg;
+ char cbuf[1024];
+- char *buf;
+ int err;
+ int fd;
+
+diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
+index 9a8229abfa026..be4a30a0d02ae 100644
+--- a/tools/testing/selftests/net/ipsec.c
++++ b/tools/testing/selftests/net/ipsec.c
+@@ -2263,7 +2263,7 @@ static int check_results(void)
+
+ int main(int argc, char **argv)
+ {
+- unsigned int nr_process = 1;
++ long nr_process = 1;
+ int route_sock = -1, ret = KSFT_SKIP;
+ int test_desc_fd[2];
+ uint32_t route_seq;
+@@ -2284,7 +2284,7 @@ int main(int argc, char **argv)
+ exit_usage(argv);
+ }
+
+- if (nr_process > MAX_PROCESSES || !nr_process) {
++ if (nr_process > MAX_PROCESSES || nr_process < 1) {
+ printk("nr_process should be between [1; %u]",
+ MAX_PROCESSES);
+ exit_usage(argv);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index c7f9ebeebc2c5..d2043ec3bf6d6 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -18,6 +18,7 @@
+
+ #include <sys/ioctl.h>
+ #include <sys/poll.h>
++#include <sys/random.h>
+ #include <sys/sendfile.h>
+ #include <sys/stat.h>
+ #include <sys/socket.h>
+@@ -1125,15 +1126,11 @@ again:
+
+ static void init_rng(void)
+ {
+- int fd = open("/dev/urandom", O_RDONLY);
+ unsigned int foo;
+
+- if (fd > 0) {
+- int ret = read(fd, &foo, sizeof(foo));
+-
+- if (ret < 0)
+- srand(fd + foo);
+- close(fd);
++ if (getrandom(&foo, sizeof(foo), 0) == -1) {
++ perror("getrandom");
++ exit(1);
+ }
+
+ srand(foo);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c b/tools/testing/selftests/net/mptcp/mptcp_inq.c
+index 8672d898f8cda..218aac4673212 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_inq.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c
+@@ -18,6 +18,7 @@
+ #include <time.h>
+
+ #include <sys/ioctl.h>
++#include <sys/random.h>
+ #include <sys/socket.h>
+ #include <sys/types.h>
+ #include <sys/wait.h>
+@@ -519,15 +520,11 @@ static int client(int unixfd)
+
+ static void init_rng(void)
+ {
+- int fd = open("/dev/urandom", O_RDONLY);
+ unsigned int foo;
+
+- if (fd > 0) {
+- int ret = read(fd, &foo, sizeof(foo));
+-
+- if (ret < 0)
+- srand(fd + foo);
+- close(fd);
++ if (getrandom(&foo, sizeof(foo), 0) == -1) {
++ perror("getrandom");
++ exit(1);
+ }
+
+ srand(foo);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index dc895b7b94e19..8eec7d2c1fc69 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3237,7 +3237,7 @@ fastclose_tests()
+ if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
+ test_linkfail=1024 fastclose=server \
+ run_tests $ns1 $ns2 10.0.1.1
+- chk_join_nr 0 0 0
++ chk_join_nr 0 0 0 0 0 0 1
+ chk_fclose_nr 1 1 invert
+ chk_rst_nr 1 1
+ fi
+@@ -3289,6 +3289,7 @@ userspace_pm_rm_sf_addr_ns1()
+ local addr=$1
+ local id=$2
+ local tk sp da dp
++ local cnt_addr cnt_sf
+
+ tk=$(grep "type:1," "$evts_ns1" |
+ sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
+@@ -3298,11 +3299,13 @@ userspace_pm_rm_sf_addr_ns1()
+ sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
+ dp=$(grep "type:10" "$evts_ns1" |
+ sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
++ cnt_addr=$(rm_addr_count ${ns1})
++ cnt_sf=$(rm_sf_count ${ns1})
+ ip netns exec $ns1 ./pm_nl_ctl rem token $tk id $id
+ ip netns exec $ns1 ./pm_nl_ctl dsf lip "::ffff:$addr" \
+ lport $sp rip $da rport $dp token $tk
+- wait_rm_addr $ns1 1
+- wait_rm_sf $ns1 1
++ wait_rm_addr $ns1 "${cnt_addr}"
++ wait_rm_sf $ns1 "${cnt_sf}"
+ }
+
+ userspace_pm_add_sf()
+@@ -3324,17 +3327,20 @@ userspace_pm_rm_sf_addr_ns2()
+ local addr=$1
+ local id=$2
+ local tk da dp sp
++ local cnt_addr cnt_sf
+
+ tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+ da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
+ dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+ sp=$(grep "type:10" "$evts_ns2" |
+ sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
++ cnt_addr=$(rm_addr_count ${ns2})
++ cnt_sf=$(rm_sf_count ${ns2})
+ ip netns exec $ns2 ./pm_nl_ctl rem token $tk id $id
+ ip netns exec $ns2 ./pm_nl_ctl dsf lip $addr lport $sp \
+ rip $da rport $dp token $tk
+- wait_rm_addr $ns2 1
+- wait_rm_sf $ns2 1
++ wait_rm_addr $ns2 "${cnt_addr}"
++ wait_rm_sf $ns2 "${cnt_sf}"
+ }
+
+ userspace_tests()
+@@ -3417,7 +3423,7 @@ userspace_tests()
+ continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ set_userspace_pm $ns1
+ pm_nl_set_limits $ns2 1 1
+- speed=10 \
++ speed=5 \
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
+ wait_mpj $ns1
+@@ -3438,7 +3444,7 @@ userspace_tests()
+ continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 0 1
+- speed=10 \
++ speed=5 \
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
+ wait_mpj $ns2
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index f838dd370f6af..b3b2dc5a630cf 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -2048,7 +2048,7 @@ run_test() {
+ case $ret in
+ 0)
+ all_skipped=false
+- [ $exitcode=$ksft_skip ] && exitcode=0
++ [ $exitcode -eq $ksft_skip ] && exitcode=0
+ ;;
+ $ksft_skip)
+ [ $all_skipped = true ] && exitcode=$ksft_skip
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index ef90aca4cc96a..bced422b78f72 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -7,7 +7,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
+ nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
+ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+ conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \
+- conntrack_sctp_collision.sh
++ conntrack_sctp_collision.sh xt_string.sh
+
+ HOSTPKG_CONFIG := pkg-config
+
+diff --git a/tools/testing/selftests/netfilter/xt_string.sh b/tools/testing/selftests/netfilter/xt_string.sh
+new file mode 100755
+index 0000000000000..1802653a47287
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/xt_string.sh
+@@ -0,0 +1,128 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++# return code to signal skipped test
++ksft_skip=4
++rc=0
++
++if ! iptables --version >/dev/null 2>&1; then
++ echo "SKIP: Test needs iptables"
++ exit $ksft_skip
++fi
++if ! ip -V >/dev/null 2>&1; then
++ echo "SKIP: Test needs iproute2"
++ exit $ksft_skip
++fi
++if ! nc -h >/dev/null 2>&1; then
++ echo "SKIP: Test needs netcat"
++ exit $ksft_skip
++fi
++
++pattern="foo bar baz"
++patlen=11
++hdrlen=$((20 + 8)) # IPv4 + UDP
++ns="ns-$(mktemp -u XXXXXXXX)"
++trap 'ip netns del $ns' EXIT
++ip netns add "$ns"
++ip -net "$ns" link add d0 type dummy
++ip -net "$ns" link set d0 up
++ip -net "$ns" addr add 10.1.2.1/24 dev d0
++
++#ip netns exec "$ns" tcpdump -npXi d0 &
++#tcpdump_pid=$!
++#trap 'kill $tcpdump_pid; ip netns del $ns' EXIT
++
++add_rule() { # (alg, from, to)
++ ip netns exec "$ns" \
++ iptables -A OUTPUT -o d0 -m string \
++ --string "$pattern" --algo $1 --from $2 --to $3
++}
++showrules() { # ()
++ ip netns exec "$ns" iptables -v -S OUTPUT | grep '^-A'
++}
++zerorules() {
++ ip netns exec "$ns" iptables -Z OUTPUT
++}
++countrule() { # (pattern)
++ showrules | grep -c -- "$*"
++}
++send() { # (offset)
++ ( for ((i = 0; i < $1 - $hdrlen; i++)); do
++ printf " "
++ done
++ printf "$pattern"
++ ) | ip netns exec "$ns" nc -w 1 -u 10.1.2.2 27374
++}
++
++add_rule bm 1000 1500
++add_rule bm 1400 1600
++add_rule kmp 1000 1500
++add_rule kmp 1400 1600
++
++zerorules
++send 0
++send $((1000 - $patlen))
++if [ $(countrule -c 0 0) -ne 4 ]; then
++ echo "FAIL: rules match data before --from"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1000
++send $((1400 - $patlen))
++if [ $(countrule -c 2) -ne 2 ]; then
++ echo "FAIL: only two rules should match at low offset"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send $((1500 - $patlen))
++if [ $(countrule -c 1) -ne 4 ]; then
++ echo "FAIL: all rules should match at end of packet"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1495
++if [ $(countrule -c 1) -ne 1 ]; then
++ echo "FAIL: only kmp with proper --to should match pattern spanning fragments"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1500
++if [ $(countrule -c 1) -ne 2 ]; then
++ echo "FAIL: two rules should match pattern at start of second fragment"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send $((1600 - $patlen))
++if [ $(countrule -c 1) -ne 2 ]; then
++ echo "FAIL: two rules should match pattern at end of largest --to"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send $((1600 - $patlen + 1))
++if [ $(countrule -c 1) -ne 0 ]; then
++ echo "FAIL: no rules should match pattern extending largest --to"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1600
++if [ $(countrule -c 1) -ne 0 ]; then
++ echo "FAIL: no rule should match pattern past largest --to"
++ showrules
++ ((rc--))
++fi
++
++exit $rc
+diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+index 4e86f927880c3..01cc37bf611c3 100644
+--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+@@ -62,7 +62,7 @@ static void error_report(struct error *err, const char *test_name)
+ break;
+
+ case PIDFD_PASS:
+- ksft_test_result_pass("%s test: Passed\n");
++ ksft_test_result_pass("%s test: Passed\n", test_name);
+ break;
+
+ default:
+diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
+index 00a07e7c571cd..c081ae91313aa 100644
+--- a/tools/testing/selftests/pidfd/pidfd_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_test.c
+@@ -381,13 +381,13 @@ static int test_pidfd_send_signal_syscall_support(void)
+
+ static void *test_pidfd_poll_exec_thread(void *priv)
+ {
+- ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
++ ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
+ getpid(), syscall(SYS_gettid));
+ ksft_print_msg("Child Thread: doing exec of sleep\n");
+
+ execl("/bin/sleep", "sleep", str(CHILD_THREAD_MIN_WAIT), (char *)NULL);
+
+- ksft_print_msg("Child Thread: DONE. pid %d tid %d\n",
++ ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n",
+ getpid(), syscall(SYS_gettid));
+ return NULL;
+ }
+@@ -427,7 +427,7 @@ static int child_poll_exec_test(void *args)
+ {
+ pthread_t t1;
+
+- ksft_print_msg("Child (pidfd): starting. pid %d tid %d\n", getpid(),
++ ksft_print_msg("Child (pidfd): starting. pid %d tid %ld\n", getpid(),
+ syscall(SYS_gettid));
+ pthread_create(&t1, NULL, test_pidfd_poll_exec_thread, NULL);
+ /*
+@@ -480,10 +480,10 @@ static void test_pidfd_poll_exec(int use_waitpid)
+
+ static void *test_pidfd_poll_leader_exit_thread(void *priv)
+ {
+- ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
++ ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
+ getpid(), syscall(SYS_gettid));
+ sleep(CHILD_THREAD_MIN_WAIT);
+- ksft_print_msg("Child Thread: DONE. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
++ ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
+ return NULL;
+ }
+
+@@ -492,7 +492,7 @@ static int child_poll_leader_exit_test(void *args)
+ {
+ pthread_t t1, t2;
+
+- ksft_print_msg("Child: starting. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
++ ksft_print_msg("Child: starting. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
+ pthread_create(&t1, NULL, test_pidfd_poll_leader_exit_thread, NULL);
+ pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL);
+
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index 5073dbc961258..2deac2031de9e 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+
+-CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
++CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2 -D_GNU_SOURCE
+ CFLAGS += $(KHDR_INCLUDES)
+
+ TEST_GEN_PROGS := resctrl_tests
+diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
+index d3cbb829ff6a7..a0318bd3a63d8 100644
+--- a/tools/testing/selftests/resctrl/cache.c
++++ b/tools/testing/selftests/resctrl/cache.c
+@@ -205,10 +205,11 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
+ * cache_val: execute benchmark and measure LLC occupancy resctrl
+ * and perf cache miss for the benchmark
+ * @param: parameters passed to cache_val()
++ * @span: buffer size for the benchmark
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+-int cat_val(struct resctrl_val_param *param)
++int cat_val(struct resctrl_val_param *param, size_t span)
+ {
+ int memflush = 1, operation = 0, ret = 0;
+ char *resctrl_val = param->resctrl_val;
+@@ -245,7 +246,7 @@ int cat_val(struct resctrl_val_param *param)
+ if (ret)
+ break;
+
+- if (run_fill_buf(param->span, memflush, operation, true)) {
++ if (run_fill_buf(span, memflush, operation, true)) {
+ fprintf(stderr, "Error-running fill buffer\n");
+ ret = -1;
+ goto pe_close;
+diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
+index 3848dfb46aba4..224ba8544d8af 100644
+--- a/tools/testing/selftests/resctrl/cat_test.c
++++ b/tools/testing/selftests/resctrl/cat_test.c
+@@ -41,7 +41,7 @@ static int cat_setup(struct resctrl_val_param *p)
+ return ret;
+ }
+
+-static int check_results(struct resctrl_val_param *param)
++static int check_results(struct resctrl_val_param *param, size_t span)
+ {
+ char *token_array[8], temp[512];
+ unsigned long sum_llc_perf_miss = 0;
+@@ -76,7 +76,7 @@ static int check_results(struct resctrl_val_param *param)
+ fclose(fp);
+ no_of_bits = count_bits(param->mask);
+
+- return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64,
++ return show_cache_info(sum_llc_perf_miss, no_of_bits, span / 64,
+ MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
+ get_vendor() == ARCH_INTEL, false);
+ }
+@@ -96,6 +96,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ char cbm_mask[256];
+ int count_of_bits;
+ char pipe_message;
++ size_t span;
+
+ /* Get default cbm mask for L3/L2 cache */
+ ret = get_cbm_mask(cache_type, cbm_mask);
+@@ -140,7 +141,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ /* Set param values for parent thread which will be allocated bitmask
+ * with (max_bits - n) bits
+ */
+- param.span = cache_size * (count_of_bits - n) / count_of_bits;
++ span = cache_size * (count_of_bits - n) / count_of_bits;
+ strcpy(param.ctrlgrp, "c2");
+ strcpy(param.mongrp, "m2");
+ strcpy(param.filename, RESULT_FILE_NAME2);
+@@ -162,23 +163,17 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ param.mask = l_mask_1;
+ strcpy(param.ctrlgrp, "c1");
+ strcpy(param.mongrp, "m1");
+- param.span = cache_size * n / count_of_bits;
++ span = cache_size * n / count_of_bits;
+ strcpy(param.filename, RESULT_FILE_NAME1);
+ param.num_of_runs = 0;
+ param.cpu_no = sibling_cpu_no;
+- } else {
+- ret = signal_handler_register();
+- if (ret) {
+- kill(bm_pid, SIGKILL);
+- goto out;
+- }
+ }
+
+ remove(param.filename);
+
+- ret = cat_val(&param);
++ ret = cat_val(&param, span);
+ if (ret == 0)
+- ret = check_results(&param);
++ ret = check_results(&param, span);
+
+ if (bm_pid == 0) {
+ /* Tell parent that child is ready */
+@@ -208,10 +203,8 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ }
+ close(pipefd[0]);
+ kill(bm_pid, SIGKILL);
+- signal_handler_unregister();
+ }
+
+-out:
+ cat_test_cleanup();
+
+ return ret;
+diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
+index cb2197647c6cd..50bdbce9fba95 100644
+--- a/tools/testing/selftests/resctrl/cmt_test.c
++++ b/tools/testing/selftests/resctrl/cmt_test.c
+@@ -27,7 +27,7 @@ static int cmt_setup(struct resctrl_val_param *p)
+ return 0;
+ }
+
+-static int check_results(struct resctrl_val_param *param, int no_of_bits)
++static int check_results(struct resctrl_val_param *param, size_t span, int no_of_bits)
+ {
+ char *token_array[8], temp[512];
+ unsigned long sum_llc_occu_resc = 0;
+@@ -58,7 +58,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
+ }
+ fclose(fp);
+
+- return show_cache_info(sum_llc_occu_resc, no_of_bits, param->span,
++ return show_cache_info(sum_llc_occu_resc, no_of_bits, span,
+ MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
+ true, true);
+ }
+@@ -68,16 +68,17 @@ void cmt_test_cleanup(void)
+ remove(RESULT_FILE_NAME);
+ }
+
+-int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
++int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd)
+ {
++ const char * const *cmd = benchmark_cmd;
++ const char *new_cmd[BENCHMARK_ARGS];
+ unsigned long cache_size = 0;
+ unsigned long long_mask;
++ char *span_str = NULL;
+ char cbm_mask[256];
+ int count_of_bits;
+- int ret;
+-
+- if (!validate_resctrl_feature_request(CMT_STR))
+- return -1;
++ size_t span;
++ int ret, i;
+
+ ret = get_cbm_mask("L3", cbm_mask);
+ if (ret)
+@@ -105,24 +106,36 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+ .cpu_no = cpu_no,
+ .filename = RESULT_FILE_NAME,
+ .mask = ~(long_mask << n) & long_mask,
+- .span = cache_size * n / count_of_bits,
+ .num_of_runs = 0,
+ .setup = cmt_setup,
+ };
+
+- if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
+- sprintf(benchmark_cmd[1], "%zu", param.span);
++ span = cache_size * n / count_of_bits;
++
++ if (strcmp(cmd[0], "fill_buf") == 0) {
++ /* Duplicate the command to be able to replace span in it */
++ for (i = 0; benchmark_cmd[i]; i++)
++ new_cmd[i] = benchmark_cmd[i];
++ new_cmd[i] = NULL;
++
++ ret = asprintf(&span_str, "%zu", span);
++ if (ret < 0)
++ return -1;
++ new_cmd[1] = span_str;
++ cmd = new_cmd;
++ }
+
+ remove(RESULT_FILE_NAME);
+
+- ret = resctrl_val(benchmark_cmd, &param);
++ ret = resctrl_val(cmd, &param);
+ if (ret)
+ goto out;
+
+- ret = check_results(&param, n);
++ ret = check_results(&param, span, n);
+
+ out:
+ cmt_test_cleanup();
++ free(span_str);
+
+ return ret;
+ }
+diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
+index 4d2f145804b83..d3bf4368341ec 100644
+--- a/tools/testing/selftests/resctrl/mba_test.c
++++ b/tools/testing/selftests/resctrl/mba_test.c
+@@ -12,7 +12,7 @@
+
+ #define RESULT_FILE_NAME "result_mba"
+ #define NUM_OF_RUNS 5
+-#define MAX_DIFF_PERCENT 5
++#define MAX_DIFF_PERCENT 8
+ #define ALLOCATION_MAX 100
+ #define ALLOCATION_MIN 10
+ #define ALLOCATION_STEP 10
+@@ -141,7 +141,7 @@ void mba_test_cleanup(void)
+ remove(RESULT_FILE_NAME);
+ }
+
+-int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
++int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd)
+ {
+ struct resctrl_val_param param = {
+ .resctrl_val = MBA_STR,
+@@ -149,7 +149,7 @@ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
+ .mongrp = "m1",
+ .cpu_no = cpu_no,
+ .filename = RESULT_FILE_NAME,
+- .bw_report = bw_report,
++ .bw_report = "reads",
+ .setup = mba_setup
+ };
+ int ret;
+diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
+index c7de6f5977f69..d3c0d30c676a7 100644
+--- a/tools/testing/selftests/resctrl/mbm_test.c
++++ b/tools/testing/selftests/resctrl/mbm_test.c
+@@ -11,7 +11,7 @@
+ #include "resctrl.h"
+
+ #define RESULT_FILE_NAME "result_mbm"
+-#define MAX_DIFF_PERCENT 5
++#define MAX_DIFF_PERCENT 8
+ #define NUM_OF_RUNS 5
+
+ static int
+@@ -109,16 +109,15 @@ void mbm_test_cleanup(void)
+ remove(RESULT_FILE_NAME);
+ }
+
+-int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd)
++int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd)
+ {
+ struct resctrl_val_param param = {
+ .resctrl_val = MBM_STR,
+ .ctrlgrp = "c1",
+ .mongrp = "m1",
+- .span = span,
+ .cpu_no = cpu_no,
+ .filename = RESULT_FILE_NAME,
+- .bw_report = bw_report,
++ .bw_report = "reads",
+ .setup = mbm_setup
+ };
+ int ret;
+@@ -129,7 +128,7 @@ int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd
+ if (ret)
+ goto out;
+
+- ret = check_results(span);
++ ret = check_results(DEFAULT_SPAN);
+
+ out:
+ mbm_test_cleanup();
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index 838d1a438f335..8578a8b4e1459 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -1,5 +1,4 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-#define _GNU_SOURCE
+ #ifndef RESCTRL_H
+ #define RESCTRL_H
+ #include <stdio.h>
+@@ -28,16 +27,16 @@
+ #define RESCTRL_PATH "/sys/fs/resctrl"
+ #define PHYS_ID_PATH "/sys/devices/system/cpu/cpu"
+ #define INFO_PATH "/sys/fs/resctrl/info"
+-#define L3_PATH "/sys/fs/resctrl/info/L3"
+-#define MB_PATH "/sys/fs/resctrl/info/MB"
+-#define L3_MON_PATH "/sys/fs/resctrl/info/L3_MON"
+-#define L3_MON_FEATURES_PATH "/sys/fs/resctrl/info/L3_MON/mon_features"
+
+ #define ARCH_INTEL 1
+ #define ARCH_AMD 2
+
+ #define END_OF_TESTS 1
+
++#define BENCHMARK_ARGS 64
++
++#define DEFAULT_SPAN (250 * MB)
++
+ #define PARENT_EXIT(err_msg) \
+ do { \
+ perror(err_msg); \
+@@ -52,7 +51,6 @@
+ * @ctrlgrp: Name of the control monitor group (con_mon grp)
+ * @mongrp: Name of the monitor group (mon grp)
+ * @cpu_no: CPU number to which the benchmark would be binded
+- * @span: Memory bytes accessed in each benchmark iteration
+ * @filename: Name of file to which the o/p should be written
+ * @bw_report: Bandwidth report type (reads vs writes)
+ * @setup: Call back function to setup test environment
+@@ -62,7 +60,6 @@ struct resctrl_val_param {
+ char ctrlgrp[64];
+ char mongrp[64];
+ int cpu_no;
+- size_t span;
+ char filename[64];
+ char *bw_report;
+ unsigned long mask;
+@@ -86,7 +83,7 @@ int get_resource_id(int cpu_no, int *resource_id);
+ int mount_resctrlfs(void);
+ int umount_resctrlfs(void);
+ int validate_bw_report_request(char *bw_report);
+-bool validate_resctrl_feature_request(const char *resctrl_val);
++bool validate_resctrl_feature_request(const char *resource, const char *feature);
+ char *fgrep(FILE *inf, const char *str);
+ int taskset_benchmark(pid_t bm_pid, int cpu_no);
+ void run_benchmark(int signum, siginfo_t *info, void *ucontext);
+@@ -97,21 +94,21 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
+ int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
+ int group_fd, unsigned long flags);
+ int run_fill_buf(size_t span, int memflush, int op, bool once);
+-int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param);
+-int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd);
++int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param);
++int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd);
+ void tests_cleanup(void);
+ void mbm_test_cleanup(void);
+-int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
++int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd);
+ void mba_test_cleanup(void);
+ int get_cbm_mask(char *cache_type, char *cbm_mask);
+ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
+ void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
+ int signal_handler_register(void);
+ void signal_handler_unregister(void);
+-int cat_val(struct resctrl_val_param *param);
++int cat_val(struct resctrl_val_param *param, size_t span);
+ void cat_test_cleanup(void);
+ int cat_perf_miss_val(int cpu_no, int no_of_bits, char *cache_type);
+-int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd);
++int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd);
+ unsigned int count_bits(unsigned long n);
+ void cmt_test_cleanup(void);
+ int get_core_sibling(int cpu_no);
+diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
+index d511daeb6851e..31373b69e675d 100644
+--- a/tools/testing/selftests/resctrl/resctrl_tests.c
++++ b/tools/testing/selftests/resctrl/resctrl_tests.c
+@@ -10,9 +10,6 @@
+ */
+ #include "resctrl.h"
+
+-#define BENCHMARK_ARGS 64
+-#define BENCHMARK_ARG_SIZE 64
+-
+ static int detect_vendor(void)
+ {
+ FILE *inf = fopen("/proc/cpuinfo", "r");
+@@ -70,72 +67,98 @@ void tests_cleanup(void)
+ cat_test_cleanup();
+ }
+
+-static void run_mbm_test(char **benchmark_cmd, size_t span,
+- int cpu_no, char *bw_report)
++static int test_prepare(void)
+ {
+ int res;
+
+- ksft_print_msg("Starting MBM BW change ...\n");
++ res = signal_handler_register();
++ if (res) {
++ ksft_print_msg("Failed to register signal handler\n");
++ return res;
++ }
+
+ res = mount_resctrlfs();
+ if (res) {
+- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
++ signal_handler_unregister();
++ ksft_print_msg("Failed to mount resctrl FS\n");
++ return res;
++ }
++ return 0;
++}
++
++static void test_cleanup(void)
++{
++ umount_resctrlfs();
++ signal_handler_unregister();
++}
++
++static void run_mbm_test(const char * const *benchmark_cmd, int cpu_no)
++{
++ int res;
++
++ ksft_print_msg("Starting MBM BW change ...\n");
++
++ if (test_prepare()) {
++ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
+ return;
+ }
+
+- if (!validate_resctrl_feature_request(MBM_STR) || (get_vendor() != ARCH_INTEL)) {
++ if (!validate_resctrl_feature_request("L3_MON", "mbm_total_bytes") ||
++ !validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") ||
++ (get_vendor() != ARCH_INTEL)) {
+ ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n");
+- goto umount;
++ goto cleanup;
+ }
+
+- res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
++ res = mbm_bw_change(cpu_no, benchmark_cmd);
+ ksft_test_result(!res, "MBM: bw change\n");
+ if ((get_vendor() == ARCH_INTEL) && res)
+ ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
+
+-umount:
+- umount_resctrlfs();
++cleanup:
++ test_cleanup();
+ }
+
+-static void run_mba_test(char **benchmark_cmd, int cpu_no, char *bw_report)
++static void run_mba_test(const char * const *benchmark_cmd, int cpu_no)
+ {
+ int res;
+
+ ksft_print_msg("Starting MBA Schemata change ...\n");
+
+- res = mount_resctrlfs();
+- if (res) {
+- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
++ if (test_prepare()) {
++ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
+ return;
+ }
+
+- if (!validate_resctrl_feature_request(MBA_STR) || (get_vendor() != ARCH_INTEL)) {
++ if (!validate_resctrl_feature_request("MB", NULL) ||
++ !validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") ||
++ (get_vendor() != ARCH_INTEL)) {
+ ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n");
+- goto umount;
++ goto cleanup;
+ }
+
+- res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
++ res = mba_schemata_change(cpu_no, benchmark_cmd);
+ ksft_test_result(!res, "MBA: schemata change\n");
+
+-umount:
+- umount_resctrlfs();
++cleanup:
++ test_cleanup();
+ }
+
+-static void run_cmt_test(char **benchmark_cmd, int cpu_no)
++static void run_cmt_test(const char * const *benchmark_cmd, int cpu_no)
+ {
+ int res;
+
+ ksft_print_msg("Starting CMT test ...\n");
+
+- res = mount_resctrlfs();
+- if (res) {
+- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
++ if (test_prepare()) {
++ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
+ return;
+ }
+
+- if (!validate_resctrl_feature_request(CMT_STR)) {
++ if (!validate_resctrl_feature_request("L3_MON", "llc_occupancy") ||
++ !validate_resctrl_feature_request("L3", NULL)) {
+ ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n");
+- goto umount;
++ goto cleanup;
+ }
+
+ res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
+@@ -143,8 +166,8 @@ static void run_cmt_test(char **benchmark_cmd, int cpu_no)
+ if ((get_vendor() == ARCH_INTEL) && res)
+ ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
+
+-umount:
+- umount_resctrlfs();
++cleanup:
++ test_cleanup();
+ }
+
+ static void run_cat_test(int cpu_no, int no_of_bits)
+@@ -153,33 +176,32 @@ static void run_cat_test(int cpu_no, int no_of_bits)
+
+ ksft_print_msg("Starting CAT test ...\n");
+
+- res = mount_resctrlfs();
+- if (res) {
+- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
++ if (test_prepare()) {
++ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
+ return;
+ }
+
+- if (!validate_resctrl_feature_request(CAT_STR)) {
++ if (!validate_resctrl_feature_request("L3", NULL)) {
+ ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n");
+- goto umount;
++ goto cleanup;
+ }
+
+ res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
+ ksft_test_result(!res, "CAT: test\n");
+
+-umount:
+- umount_resctrlfs();
++cleanup:
++ test_cleanup();
+ }
+
+ int main(int argc, char **argv)
+ {
+ bool has_ben = false, mbm_test = true, mba_test = true, cmt_test = true;
+- char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
+- char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
+ int c, cpu_no = 1, argc_new = argc, i, no_of_bits = 0;
++ const char *benchmark_cmd[BENCHMARK_ARGS];
+ int ben_ind, ben_count, tests = 0;
+- size_t span = 250 * MB;
++ char *span_str = NULL;
+ bool cat_test = true;
++ int ret;
+
+ for (i = 0; i < argc; i++) {
+ if (strcmp(argv[i], "-b") == 0) {
+@@ -255,28 +277,26 @@ int main(int argc, char **argv)
+ return ksft_exit_skip("Not running as root. Skipping...\n");
+
+ if (has_ben) {
++ if (argc - ben_ind >= BENCHMARK_ARGS)
++ ksft_exit_fail_msg("Too long benchmark command.\n");
++
+ /* Extract benchmark command from command line. */
+- for (i = ben_ind; i < argc; i++) {
+- benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
+- sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
+- }
++ for (i = 0; i < argc - ben_ind; i++)
++ benchmark_cmd[i] = argv[i + ben_ind];
+ benchmark_cmd[ben_count] = NULL;
+ } else {
+ /* If no benchmark is given by "-b" argument, use fill_buf. */
+- for (i = 0; i < 5; i++)
+- benchmark_cmd[i] = benchmark_cmd_area[i];
+-
+- strcpy(benchmark_cmd[0], "fill_buf");
+- sprintf(benchmark_cmd[1], "%zu", span);
+- strcpy(benchmark_cmd[2], "1");
+- strcpy(benchmark_cmd[3], "0");
+- strcpy(benchmark_cmd[4], "false");
++ benchmark_cmd[0] = "fill_buf";
++ ret = asprintf(&span_str, "%u", DEFAULT_SPAN);
++ if (ret < 0)
++ ksft_exit_fail_msg("Out of memory!\n");
++ benchmark_cmd[1] = span_str;
++ benchmark_cmd[2] = "1";
++ benchmark_cmd[3] = "0";
++ benchmark_cmd[4] = "false";
+ benchmark_cmd[5] = NULL;
+ }
+
+- sprintf(bw_report, "reads");
+- sprintf(bm_type, "fill_buf");
+-
+ if (!check_resctrlfs_support())
+ return ksft_exit_skip("resctrl FS does not exist. Enable X86_CPU_RESCTRL config option.\n");
+
+@@ -288,10 +308,10 @@ int main(int argc, char **argv)
+ ksft_set_plan(tests ? : 4);
+
+ if (mbm_test)
+- run_mbm_test(benchmark_cmd, span, cpu_no, bw_report);
++ run_mbm_test(benchmark_cmd, cpu_no);
+
+ if (mba_test)
+- run_mba_test(benchmark_cmd, cpu_no, bw_report);
++ run_mba_test(benchmark_cmd, cpu_no);
+
+ if (cmt_test)
+ run_cmt_test(benchmark_cmd, cpu_no);
+@@ -299,5 +319,6 @@ int main(int argc, char **argv)
+ if (cat_test)
+ run_cat_test(cpu_no, no_of_bits);
+
++ free(span_str);
+ ksft_finished();
+ }
+diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
+index f0f6c5f6e98b9..b8ca6fa40b3bf 100644
+--- a/tools/testing/selftests/resctrl/resctrl_val.c
++++ b/tools/testing/selftests/resctrl/resctrl_val.c
+@@ -468,7 +468,9 @@ pid_t bm_pid, ppid;
+
+ void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
+ {
+- kill(bm_pid, SIGKILL);
++ /* Only kill child after bm_pid is set after fork() */
++ if (bm_pid)
++ kill(bm_pid, SIGKILL);
+ umount_resctrlfs();
+ tests_cleanup();
+ ksft_print_msg("Ending\n\n");
+@@ -482,9 +484,11 @@ void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
+ */
+ int signal_handler_register(void)
+ {
+- struct sigaction sigact;
++ struct sigaction sigact = {};
+ int ret = 0;
+
++ bm_pid = 0;
++
+ sigact.sa_sigaction = ctrlc_handler;
+ sigemptyset(&sigact.sa_mask);
+ sigact.sa_flags = SA_SIGINFO;
+@@ -504,7 +508,7 @@ int signal_handler_register(void)
+ */
+ void signal_handler_unregister(void)
+ {
+- struct sigaction sigact;
++ struct sigaction sigact = {};
+
+ sigact.sa_handler = SIG_DFL;
+ sigemptyset(&sigact.sa_mask);
+@@ -629,7 +633,7 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+-int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
++int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param)
+ {
+ char *resctrl_val = param->resctrl_val;
+ unsigned long bw_resc_start = 0;
+@@ -706,28 +710,30 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+
+ ksft_print_msg("Benchmark PID: %d\n", bm_pid);
+
+- ret = signal_handler_register();
+- if (ret)
+- goto out;
+-
+- value.sival_ptr = benchmark_cmd;
++ /*
++ * The cast removes constness but nothing mutates benchmark_cmd within
++ * the context of this process. At the receiving process, it becomes
++ * argv, which is mutable, on exec() but that's after fork() so it
++ * doesn't matter for the process running the tests.
++ */
++ value.sival_ptr = (void *)benchmark_cmd;
+
+ /* Taskset benchmark to specified cpu */
+ ret = taskset_benchmark(bm_pid, param->cpu_no);
+ if (ret)
+- goto unregister;
++ goto out;
+
+ /* Write benchmark to specified control&monitoring grp in resctrl FS */
+ ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
+ resctrl_val);
+ if (ret)
+- goto unregister;
++ goto out;
+
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+ !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ ret = initialize_mem_bw_imc();
+ if (ret)
+- goto unregister;
++ goto out;
+
+ initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
+ param->cpu_no, resctrl_val);
+@@ -742,7 +748,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ sizeof(pipe_message)) {
+ perror("# failed reading message from child process");
+ close(pipefd[0]);
+- goto unregister;
++ goto out;
+ }
+ }
+ close(pipefd[0]);
+@@ -751,7 +757,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ if (sigqueue(bm_pid, SIGUSR1, value) == -1) {
+ perror("# sigqueue SIGUSR1 to child");
+ ret = errno;
+- goto unregister;
++ goto out;
+ }
+
+ /* Give benchmark enough time to fully run */
+@@ -780,8 +786,6 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ }
+ }
+
+-unregister:
+- signal_handler_unregister();
+ out:
+ kill(bm_pid, SIGKILL);
+
+diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
+index bd36ee2066020..3a8111362d262 100644
+--- a/tools/testing/selftests/resctrl/resctrlfs.c
++++ b/tools/testing/selftests/resctrl/resctrlfs.c
+@@ -8,6 +8,8 @@
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
++#include <limits.h>
++
+ #include "resctrl.h"
+
+ static int find_resctrl_mount(char *buffer)
+@@ -604,63 +606,46 @@ char *fgrep(FILE *inf, const char *str)
+
+ /*
+ * validate_resctrl_feature_request - Check if requested feature is valid.
+- * @resctrl_val: Requested feature
++ * @resource: Required resource (e.g., MB, L3, L2, L3_MON, etc.)
++ * @feature: Required monitor feature (in mon_features file). Can only be
++ * set for L3_MON. Must be NULL for all other resources.
+ *
+- * Return: True if the feature is supported, else false. False is also
+- * returned if resctrl FS is not mounted.
++ * Return: True if the resource/feature is supported, else false. False is
++ * also returned if resctrl FS is not mounted.
+ */
+-bool validate_resctrl_feature_request(const char *resctrl_val)
++bool validate_resctrl_feature_request(const char *resource, const char *feature)
+ {
++ char res_path[PATH_MAX];
+ struct stat statbuf;
+- bool found = false;
+ char *res;
+ FILE *inf;
+ int ret;
+
+- if (!resctrl_val)
++ if (!resource)
+ return false;
+
+ ret = find_resctrl_mount(NULL);
+ if (ret)
+ return false;
+
+- if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+- if (!stat(L3_PATH, &statbuf))
+- return true;
+- } else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+- if (!stat(MB_PATH, &statbuf))
+- return true;
+- } else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+- !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+- if (!stat(L3_MON_PATH, &statbuf)) {
+- inf = fopen(L3_MON_FEATURES_PATH, "r");
+- if (!inf)
+- return false;
+-
+- if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+- res = fgrep(inf, "llc_occupancy");
+- if (res) {
+- found = true;
+- free(res);
+- }
+- }
+-
+- if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
+- res = fgrep(inf, "mbm_total_bytes");
+- if (res) {
+- free(res);
+- res = fgrep(inf, "mbm_local_bytes");
+- if (res) {
+- found = true;
+- free(res);
+- }
+- }
+- }
+- fclose(inf);
+- }
+- }
++ snprintf(res_path, sizeof(res_path), "%s/%s", INFO_PATH, resource);
++
++ if (stat(res_path, &statbuf))
++ return false;
++
++ if (!feature)
++ return true;
++
++ snprintf(res_path, sizeof(res_path), "%s/%s/mon_features", INFO_PATH, resource);
++ inf = fopen(res_path, "r");
++ if (!inf)
++ return false;
++
++ res = fgrep(inf, feature);
++ free(res);
++ fclose(inf);
+
+- return found;
++ return !!res;
+ }
+
+ int filter_dmesg(void)
+diff --git a/tools/testing/selftests/x86/lam.c b/tools/testing/selftests/x86/lam.c
+index eb0e46905bf9d..8f9b06d9ce039 100644
+--- a/tools/testing/selftests/x86/lam.c
++++ b/tools/testing/selftests/x86/lam.c
+@@ -573,7 +573,7 @@ int do_uring(unsigned long lam)
+ char path[PATH_MAX] = {0};
+
+ /* get current process path */
+- if (readlink("/proc/self/exe", path, PATH_MAX) <= 0)
++ if (readlink("/proc/self/exe", path, PATH_MAX - 1) <= 0)
+ return 1;
+
+ int file_fd = open(path, O_RDONLY);
+@@ -680,14 +680,14 @@ static int handle_execve(struct testcases *test)
+ perror("Fork failed.");
+ ret = 1;
+ } else if (pid == 0) {
+- char path[PATH_MAX];
++ char path[PATH_MAX] = {0};
+
+ /* Set LAM mode in parent process */
+ if (set_lam(lam) != 0)
+ return 1;
+
+ /* Get current binary's path and the binary was run by execve */
+- if (readlink("/proc/self/exe", path, PATH_MAX) <= 0)
++ if (readlink("/proc/self/exe", path, PATH_MAX - 1) <= 0)
+ exit(-1);
+
+ /* run binary to get LAM mode and return to parent process */
+diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
+index 90718c2fd4ea9..5dc7767039f6f 100644
+--- a/tools/testing/vsock/vsock_test.c
++++ b/tools/testing/vsock/vsock_test.c
+@@ -392,11 +392,12 @@ static void test_stream_msg_peek_server(const struct test_opts *opts)
+ }
+
+ #define SOCK_BUF_SIZE (2 * 1024 * 1024)
+-#define MAX_MSG_SIZE (32 * 1024)
++#define MAX_MSG_PAGES 4
+
+ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
+ {
+ unsigned long curr_hash;
++ size_t max_msg_size;
+ int page_size;
+ int msg_count;
+ int fd;
+@@ -412,7 +413,8 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
+
+ curr_hash = 0;
+ page_size = getpagesize();
+- msg_count = SOCK_BUF_SIZE / MAX_MSG_SIZE;
++ max_msg_size = MAX_MSG_PAGES * page_size;
++ msg_count = SOCK_BUF_SIZE / max_msg_size;
+
+ for (int i = 0; i < msg_count; i++) {
+ ssize_t send_size;
+@@ -423,7 +425,7 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
+ /* Use "small" buffers and "big" buffers. */
+ if (i & 1)
+ buf_size = page_size +
+- (rand() % (MAX_MSG_SIZE - page_size));
++ (rand() % (max_msg_size - page_size));
+ else
+ buf_size = 1 + (rand() % page_size);
+
+@@ -479,7 +481,6 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
+ unsigned long remote_hash;
+ unsigned long curr_hash;
+ int fd;
+- char buf[MAX_MSG_SIZE];
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+
+@@ -507,8 +508,13 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
+ control_writeln("SRVREADY");
+ /* Wait, until peer sends whole data. */
+ control_expectln("SENDDONE");
+- iov.iov_base = buf;
+- iov.iov_len = sizeof(buf);
++ iov.iov_len = MAX_MSG_PAGES * getpagesize();
++ iov.iov_base = malloc(iov.iov_len);
++ if (!iov.iov_base) {
++ perror("malloc");
++ exit(EXIT_FAILURE);
++ }
++
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+@@ -533,6 +539,7 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
+ curr_hash += hash_djb2(msg.msg_iov[0].iov_base, recv_size);
+ }
+
++ free(iov.iov_base);
+ close(fd);
+ remote_hash = control_readulong();
+
+diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
+index 623a38908ed5b..c769d7b3842c0 100644
+--- a/tools/tracing/rtla/src/utils.c
++++ b/tools/tracing/rtla/src/utils.c
+@@ -538,7 +538,7 @@ static const int find_mount(const char *fs, char *mp, int sizeof_mp)
+ {
+ char mount_point[MAX_PATH];
+ char type[100];
+- int found;
++ int found = 0;
+ FILE *fp;
+
+ fp = fopen("/proc/mounts", "r");