summaryrefslogtreecommitdiff
path: root/system
diff options
context:
space:
mode:
authorA. Wilcox <AWilcox@Wilcox-Tech.com>2023-01-11 23:24:55 -0600
committerA. Wilcox <AWilcox@Wilcox-Tech.com>2023-01-11 23:24:55 -0600
commit43f441f061afc988c04506a939d0f571b0e73fdb (patch)
tree37b59d1c379d0967fe32ba0e433d65aa603749c7 /system
parentfc4df5a4b79d327309147c6201fcb9eab1d8623a (diff)
downloadpackages-awilfox/aarch64-exp.tar.gz
packages-awilfox/aarch64-exp.tar.bz2
packages-awilfox/aarch64-exp.tar.xz
packages-awilfox/aarch64-exp.zip
system/easy-kernel: aarch64 experimentawilfox/aarch64-exp
* Update to 6.1.3-mc0 This is UNOFFICIAL. * Support M1 and Qualcomm SoCs
Diffstat (limited to 'system')
-rw-r--r--system/easy-kernel/0100-linux-6.1.3.patch49142
-rw-r--r--system/easy-kernel/0120-XATTR_USER_PREFIX.patch33
-rw-r--r--system/easy-kernel/0122-link-security-restrictions.patch29
-rw-r--r--system/easy-kernel/0200-x86-compile.patch4
-rw-r--r--system/easy-kernel/0502-gcc9-kcflags.patch188
-rw-r--r--system/easy-kernel/1000-version.patch10
-rw-r--r--system/easy-kernel/APKBUILD24
-rw-r--r--system/easy-kernel/config-aarch641774
8 files changed, 50562 insertions, 642 deletions
diff --git a/system/easy-kernel/0100-linux-6.1.3.patch b/system/easy-kernel/0100-linux-6.1.3.patch
new file mode 100644
index 000000000..c85462c45
--- /dev/null
+++ b/system/easy-kernel/0100-linux-6.1.3.patch
@@ -0,0 +1,49142 @@
+diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
+index 8e2c2c405db22..3becc9a82bdf6 100644
+--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
++++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
+@@ -22,6 +22,7 @@ Date: Oct 25, 2019
+ KernelVersion: 5.6.0
+ Contact: dmaengine@vger.kernel.org
+ Description: The largest number of work descriptors in a batch.
++ It's not visible when the device does not support batch.
+
+ What: /sys/bus/dsa/devices/dsa<m>/max_work_queues_size
+ Date: Oct 25, 2019
+@@ -49,6 +50,8 @@ Description: The total number of read buffers supported by this device.
+ The read buffers represent resources within the DSA
+ implementation, and these resources are allocated by engines to
+ support operations. See DSA spec v1.2 9.2.4 Total Read Buffers.
++ It's not visible when the device does not support Read Buffer
++ allocation control.
+
+ What: /sys/bus/dsa/devices/dsa<m>/max_transfer_size
+ Date: Oct 25, 2019
+@@ -122,6 +125,8 @@ Contact: dmaengine@vger.kernel.org
+ Description: The maximum number of read buffers that may be in use at
+ one time by operations that access low bandwidth memory in the
+ device. See DSA spec v1.2 9.2.8 GENCFG on Global Read Buffer Limit.
++ It's not visible when the device does not support Read Buffer
++ allocation control.
+
+ What: /sys/bus/dsa/devices/dsa<m>/cmd_status
+ Date: Aug 28, 2020
+@@ -205,6 +210,7 @@ KernelVersion: 5.10.0
+ Contact: dmaengine@vger.kernel.org
+ Description: The max batch size for this workqueue. Cannot exceed device
+ max batch size. Configurable parameter.
++ It's not visible when the device does not support batch.
+
+ What: /sys/bus/dsa/devices/wq<m>.<n>/ats_disable
+ Date: Nov 13, 2020
+@@ -250,6 +256,8 @@ KernelVersion: 5.17.0
+ Contact: dmaengine@vger.kernel.org
+ Description: Enable the use of global read buffer limit for the group. See DSA
+ spec v1.2 9.2.18 GRPCFG Use Global Read Buffer Limit.
++ It's not visible when the device does not support Read Buffer
++ allocation control.
+
+ What: /sys/bus/dsa/devices/group<m>.<n>/read_buffers_allowed
+ Date: Dec 10, 2021
+@@ -258,6 +266,8 @@ Contact: dmaengine@vger.kernel.org
+ Description: Indicates max number of read buffers that may be in use at one time
+ by all engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read
+ Buffers Allowed.
++ It's not visible when the device does not support Read Buffer
++ allocation control.
+
+ What: /sys/bus/dsa/devices/group<m>.<n>/read_buffers_reserved
+ Date: Dec 10, 2021
+@@ -266,6 +276,8 @@ Contact: dmaengine@vger.kernel.org
+ Description: Indicates the number of Read Buffers reserved for the use of
+ engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read Buffers
+ Reserved.
++ It's not visible when the device does not support Read Buffer
++ allocation control.
+
+ What: /sys/bus/dsa/devices/group<m>.<n>/desc_progress_limit
+ Date: Sept 14, 2022
+diff --git a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
+index d76cd3946434d..e9ef69aef20b1 100644
+--- a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
++++ b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
+@@ -5,6 +5,9 @@ Contact: linux-mtd@lists.infradead.org
+ Description: (RO) The JEDEC ID of the SPI NOR flash as reported by the
+ flash device.
+
++ The attribute is not present if the flash doesn't support
++ the "Read JEDEC ID" command (9Fh). This is the case for
++ non-JEDEC compliant flashes.
+
+ What: /sys/bus/spi/devices/.../spi-nor/manufacturer
+ Date: April 2021
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 98d1b198b2b4c..c2c64c1b706ff 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1314,6 +1314,29 @@ watchdog work to be queued by the watchdog timer function, otherwise the NMI
+ watchdog — if enabled — can detect a hard lockup condition.
+
+
++split_lock_mitigate (x86 only)
++==============================
++
++On x86, each "split lock" imposes a system-wide performance penalty. On larger
++systems, large numbers of split locks from unprivileged users can result in
++denials of service to well-behaved and potentially more important users.
++
++The kernel mitigates these bad users by detecting split locks and imposing
++penalties: forcing them to wait and only allowing one core to execute split
++locks at a time.
++
++These mitigations can make those bad applications unbearably slow. Setting
++split_lock_mitigate=0 may restore some application performance, but will also
++increase system exposure to denial of service attacks from split lock users.
++
++= ===================================================================
++0 Disable the mitigation mode - just warns the split lock on kernel log
++ and exposes the system to denials of service from the split lockers.
++1 Enable the mitigation mode (this is the default) - penalizes the split
++ lockers with intentional performance degradation.
++= ===================================================================
++
++
+ stack_erasing
+ =============
+
+diff --git a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
+index 02e605fac408d..9ddba7f2e7aa6 100644
+--- a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
++++ b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
+@@ -473,9 +473,6 @@ patternProperties:
+ Specifies whether the event is to be interpreted as a key (1)
+ or a switch (5).
+
+- required:
+- - linux,code
+-
+ additionalProperties: false
+
+ dependencies:
+@@ -501,7 +498,7 @@ patternProperties:
+
+ azoteq,slider-size:
+ $ref: /schemas/types.yaml#/definitions/uint32
+- minimum: 0
++ minimum: 1
+ maximum: 65535
+ description:
+ Specifies the slider's one-dimensional resolution, equal to the
+@@ -575,9 +572,9 @@ patternProperties:
+ linux,code: true
+
+ azoteq,gesture-max-ms:
+- multipleOf: 4
++ multipleOf: 16
+ minimum: 0
+- maximum: 1020
++ maximum: 4080
+ description:
+ Specifies the length of time (in ms) within which a tap, swipe
+ or flick gesture must be completed in order to be acknowledged
+@@ -585,9 +582,9 @@ patternProperties:
+ gesture applies to all remaining swipe or flick gestures.
+
+ azoteq,gesture-min-ms:
+- multipleOf: 4
++ multipleOf: 16
+ minimum: 0
+- maximum: 124
++ maximum: 496
+ description:
+ Specifies the length of time (in ms) for which a tap gesture must
+ be held in order to be acknowledged by the device.
+@@ -620,9 +617,6 @@ patternProperties:
+ GPIO, they must all be of the same type (proximity, touch or
+ slider gesture).
+
+- required:
+- - linux,code
+-
+ additionalProperties: false
+
+ required:
+@@ -693,6 +687,7 @@ allOf:
+ properties:
+ azoteq,slider-size:
+ multipleOf: 16
++ minimum: 16
+ maximum: 4080
+
+ azoteq,top-speed:
+@@ -935,14 +930,14 @@ examples:
+
+ event-tap {
+ linux,code = <KEY_PLAYPAUSE>;
+- azoteq,gesture-max-ms = <600>;
+- azoteq,gesture-min-ms = <24>;
++ azoteq,gesture-max-ms = <400>;
++ azoteq,gesture-min-ms = <32>;
+ };
+
+ event-flick-pos {
+ linux,code = <KEY_NEXTSONG>;
+- azoteq,gesture-max-ms = <600>;
+- azoteq,gesture-dist = <816>;
++ azoteq,gesture-max-ms = <800>;
++ azoteq,gesture-dist = <800>;
+ };
+
+ event-flick-neg {
+diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
+index 6a3e3ede1ede7..777f2da52f1ed 100644
+--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
++++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
+@@ -98,6 +98,10 @@ properties:
+ type: object
+ $ref: /schemas/regulator/qcom,spmi-regulator.yaml#
+
++ pwm:
++ type: object
++ $ref: /schemas/leds/leds-qcom-lpg.yaml#
++
+ patternProperties:
+ "^adc@[0-9a-f]+$":
+ type: object
+@@ -123,10 +127,6 @@ patternProperties:
+ type: object
+ $ref: /schemas/power/reset/qcom,pon.yaml#
+
+- "pwm@[0-9a-f]+$":
+- type: object
+- $ref: /schemas/leds/leds-qcom-lpg.yaml#
+-
+ "^rtc@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/rtc/qcom-pm8xxx-rtc.yaml#
+diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+index 376e739bcad40..49b4f7a32e71e 100644
+--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
++++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+@@ -14,9 +14,6 @@ description: |+
+ This PCIe host controller is based on the Synopsys DesignWare PCIe IP
+ and thus inherits all the common properties defined in snps,dw-pcie.yaml.
+
+-allOf:
+- - $ref: /schemas/pci/snps,dw-pcie.yaml#
+-
+ properties:
+ compatible:
+ enum:
+@@ -61,7 +58,7 @@ properties:
+ - const: pcie
+ - const: pcie_bus
+ - const: pcie_phy
+- - const: pcie_inbound_axi for imx6sx-pcie, pcie_aux for imx8mq-pcie
++ - enum: [ pcie_inbound_axi, pcie_aux ]
+
+ num-lanes:
+ const: 1
+@@ -175,6 +172,47 @@ required:
+ - clocks
+ - clock-names
+
++allOf:
++ - $ref: /schemas/pci/snps,dw-pcie.yaml#
++ - if:
++ properties:
++ compatible:
++ contains:
++ const: fsl,imx6sx-pcie
++ then:
++ properties:
++ clock-names:
++ items:
++ - {}
++ - {}
++ - {}
++ - const: pcie_inbound_axi
++ - if:
++ properties:
++ compatible:
++ contains:
++ const: fsl,imx8mq-pcie
++ then:
++ properties:
++ clock-names:
++ items:
++ - {}
++ - {}
++ - {}
++ - const: pcie_aux
++ - if:
++ properties:
++ compatible:
++ not:
++ contains:
++ enum:
++ - fsl,imx6sx-pcie
++ - fsl,imx8mq-pcie
++ then:
++ properties:
++ clock-names:
++ maxItems: 3
++
+ unevaluatedProperties: false
+
+ examples:
+diff --git a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml
+index 48ed227fc5b9e..53da2edd7c9ab 100644
+--- a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml
++++ b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml
+@@ -36,7 +36,7 @@ properties:
+ - const: mpu
+
+ interrupts:
+- maxItems: 1
++ maxItems: 2
+
+ clocks:
+ items:
+@@ -94,8 +94,9 @@ examples:
+ #interrupt-cells = <1>;
+ ranges = <0x81000000 0 0x40000000 0 0x40000000 0 0x00010000>,
+ <0x82000000 0 0x50000000 0 0x50000000 0 0x20000000>;
+- interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+- interrupt-names = "intr";
++ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "msi", "intr";
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map =
+ <0 0 0 1 &gic GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH
+diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml
+index 89b8f3dd67a19..3342847dcb19a 100644
+--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml
++++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml
+@@ -87,6 +87,8 @@ patternProperties:
+ "wifi_led" "led" 1, 2
+ "i2c" "i2c" 3, 4
+ "uart1_0" "uart" 7, 8, 9, 10
++ "uart1_rx_tx" "uart" 42, 43
++ "uart1_cts_rts" "uart" 44, 45
+ "pcie_clk" "pcie" 9
+ "pcie_wake" "pcie" 10
+ "spi1_0" "spi" 11, 12, 13, 14
+@@ -98,9 +100,11 @@ patternProperties:
+ "emmc_45" "emmc" 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32
+ "spi1_1" "spi" 23, 24, 25, 26
+- "uart1_2" "uart" 29, 30, 31, 32
++ "uart1_2_rx_tx" "uart" 29, 30
++ "uart1_2_cts_rts" "uart" 31, 32
+ "uart1_1" "uart" 23, 24, 25, 26
+- "uart2_0" "uart" 29, 30, 31, 32
++ "uart2_0_rx_tx" "uart" 29, 30
++ "uart2_0_cts_rts" "uart" 31, 32
+ "spi0" "spi" 33, 34, 35, 36
+ "spi0_wp_hold" "spi" 37, 38
+ "uart1_3_rx_tx" "uart" 35, 36
+@@ -157,7 +161,7 @@ patternProperties:
+ then:
+ properties:
+ groups:
+- enum: [emmc, emmc_rst]
++ enum: [emmc_45, emmc_51]
+ - if:
+ properties:
+ function:
+@@ -221,8 +225,12 @@ patternProperties:
+ then:
+ properties:
+ groups:
+- enum: [uart1_0, uart1_1, uart1_2, uart1_3_rx_tx,
+- uart1_3_cts_rts, uart2_0, uart2_1, uart0, uart1, uart2]
++ items:
++ enum: [uart1_0, uart1_rx_tx, uart1_cts_rts, uart1_1,
++ uart1_2_rx_tx, uart1_2_cts_rts, uart1_3_rx_tx,
++ uart1_3_cts_rts, uart2_0_rx_tx, uart2_0_cts_rts,
++ uart2_1, uart0, uart1, uart2]
++ maxItems: 2
+ - if:
+ properties:
+ function:
+@@ -356,6 +364,27 @@ examples:
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <2>;
+
++ pcie_pins: pcie-pins {
++ mux {
++ function = "pcie";
++ groups = "pcie_clk", "pcie_wake", "pcie_pereset";
++ };
++ };
++
++ pwm_pins: pwm-pins {
++ mux {
++ function = "pwm";
++ groups = "pwm0", "pwm1_0";
++ };
++ };
++
++ spi0_pins: spi0-pins {
++ mux {
++ function = "spi";
++ groups = "spi0", "spi0_wp_hold";
++ };
++ };
++
+ uart1_pins: uart1-pins {
+ mux {
+ function = "uart";
+@@ -363,6 +392,13 @@ examples:
+ };
+ };
+
++ uart1_3_pins: uart1-3-pins {
++ mux {
++ function = "uart";
++ groups = "uart1_3_rx_tx", "uart1_3_cts_rts";
++ };
++ };
++
+ uart2_pins: uart2-pins {
+ mux {
+ function = "uart";
+diff --git a/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml b/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml
+index a7fae1772a81b..cd8e9a8907f84 100644
+--- a/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml
++++ b/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml
+@@ -30,7 +30,9 @@ properties:
+ maxItems: 1
+
+ "#pwm-cells":
+- const: 2
++ enum: [2, 3]
++ description:
++ The only flag supported by the controller is PWM_POLARITY_INVERTED.
+
+ microchip,sync-update-mask:
+ description: |
+diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
+index 5d6ea66a863fe..1f75feec3dec6 100644
+--- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
++++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
+@@ -109,7 +109,7 @@ audio-codec@1{
+ reg = <1 0>;
+ interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "intr2"
+- reset-gpios = <&msmgpio 64 0>;
++ reset-gpios = <&msmgpio 64 GPIO_ACTIVE_LOW>;
+ slim-ifc-dev = <&wc9335_ifd>;
+ clock-names = "mclk", "native";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>,
+diff --git a/Documentation/devicetree/bindings/sound/rt5682.txt b/Documentation/devicetree/bindings/sound/rt5682.txt
+index c5f2b8febceec..6b87db68337c2 100644
+--- a/Documentation/devicetree/bindings/sound/rt5682.txt
++++ b/Documentation/devicetree/bindings/sound/rt5682.txt
+@@ -46,7 +46,7 @@ Optional properties:
+
+ - realtek,dmic-clk-driving-high : Set the high driving of the DMIC clock out.
+
+-- #sound-dai-cells: Should be set to '<0>'.
++- #sound-dai-cells: Should be set to '<1>'.
+
+ Pins on the device (for linking into audio routes) for RT5682:
+
+diff --git a/Documentation/driver-api/spi.rst b/Documentation/driver-api/spi.rst
+index f64cb666498aa..f28887045049d 100644
+--- a/Documentation/driver-api/spi.rst
++++ b/Documentation/driver-api/spi.rst
+@@ -25,8 +25,8 @@ hardware, which may be as simple as a set of GPIO pins or as complex as
+ a pair of FIFOs connected to dual DMA engines on the other side of the
+ SPI shift register (maximizing throughput). Such drivers bridge between
+ whatever bus they sit on (often the platform bus) and SPI, and expose
+-the SPI side of their device as a :c:type:`struct spi_master
+-<spi_master>`. SPI devices are children of that master,
++the SPI side of their device as a :c:type:`struct spi_controller
++<spi_controller>`. SPI devices are children of that master,
+ represented as a :c:type:`struct spi_device <spi_device>` and
+ manufactured from :c:type:`struct spi_board_info
+ <spi_board_info>` descriptors which are usually provided by
+diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst
+index 17779a2772e51..5f6454b9dbd4d 100644
+--- a/Documentation/fault-injection/fault-injection.rst
++++ b/Documentation/fault-injection/fault-injection.rst
+@@ -83,9 +83,7 @@ configuration of fault-injection capabilities.
+ - /sys/kernel/debug/fail*/times:
+
+ specifies how many times failures may happen at most. A value of -1
+- means "no limit". Note, though, that this file only accepts unsigned
+- values. So, if you want to specify -1, you better use 'printf' instead
+- of 'echo', e.g.: $ printf %#x -1 > times
++ means "no limit".
+
+ - /sys/kernel/debug/fail*/space:
+
+@@ -284,7 +282,7 @@ Application Examples
+ echo Y > /sys/kernel/debug/$FAILTYPE/task-filter
+ echo 10 > /sys/kernel/debug/$FAILTYPE/probability
+ echo 100 > /sys/kernel/debug/$FAILTYPE/interval
+- printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times
++ echo -1 > /sys/kernel/debug/$FAILTYPE/times
+ echo 0 > /sys/kernel/debug/$FAILTYPE/space
+ echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
+ echo Y > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
+@@ -338,7 +336,7 @@ Application Examples
+ echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+ echo 10 > /sys/kernel/debug/$FAILTYPE/probability
+ echo 100 > /sys/kernel/debug/$FAILTYPE/interval
+- printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times
++ echo -1 > /sys/kernel/debug/$FAILTYPE/times
+ echo 0 > /sys/kernel/debug/$FAILTYPE/space
+ echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
+ echo Y > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
+@@ -369,7 +367,7 @@ Application Examples
+ echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+ echo 100 > /sys/kernel/debug/$FAILTYPE/probability
+ echo 0 > /sys/kernel/debug/$FAILTYPE/interval
+- printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times
++ echo -1 > /sys/kernel/debug/$FAILTYPE/times
+ echo 0 > /sys/kernel/debug/$FAILTYPE/space
+ echo 1 > /sys/kernel/debug/$FAILTYPE/verbose
+
+diff --git a/Documentation/security/keys/trusted-encrypted.rst b/Documentation/security/keys/trusted-encrypted.rst
+index 0bfb4c3397489..9bc9db8ec6517 100644
+--- a/Documentation/security/keys/trusted-encrypted.rst
++++ b/Documentation/security/keys/trusted-encrypted.rst
+@@ -350,7 +350,8 @@ Load an encrypted key "evm" from saved blob::
+
+ Instantiate an encrypted key "evm" using user-provided decrypted data::
+
+- $ keyctl add encrypted evm "new default user:kmk 32 `cat evm_decrypted_data.blob`" @u
++ $ evmkey=$(dd if=/dev/urandom bs=1 count=32 | xxd -c32 -p)
++ $ keyctl add encrypted evm "new default user:kmk 32 $evmkey" @u
+ 794890253
+
+ $ keyctl print 794890253
+diff --git a/Documentation/trace/kprobes.rst b/Documentation/trace/kprobes.rst
+index 48cf778a24680..fc7ce76eab655 100644
+--- a/Documentation/trace/kprobes.rst
++++ b/Documentation/trace/kprobes.rst
+@@ -131,8 +131,7 @@ For example, if the function is non-recursive and is called with a
+ spinlock held, maxactive = 1 should be enough. If the function is
+ non-recursive and can never relinquish the CPU (e.g., via a semaphore
+ or preemption), NR_CPUS should be enough. If maxactive <= 0, it is
+-set to a default value. If CONFIG_PREEMPT is enabled, the default
+-is max(10, 2*NR_CPUS). Otherwise, the default is NR_CPUS.
++set to a default value: max(10, 2*NR_CPUS).
+
+ It's not a disaster if you set maxactive too low; you'll just miss
+ some probes. In the kretprobe struct, the nmissed field is set to
+diff --git a/Makefile b/Makefile
+index 997b677222920..a69d14983a489 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 0
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 8f138e580d1ae..81599f5c17b0f 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -635,7 +635,7 @@ config ARCH_SUPPORTS_SHADOW_CALL_STACK
+ config SHADOW_CALL_STACK
+ bool "Shadow Call Stack"
+ depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
+- depends on DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
++ depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
+ help
+ This option enables the compiler's Shadow Call Stack, which
+ uses a shadow stack to protect function return addresses from
+diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
+index fdc485d7787a6..084c27cb0c707 100644
+--- a/arch/alpha/include/asm/thread_info.h
++++ b/arch/alpha/include/asm/thread_info.h
+@@ -75,7 +75,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
+
+ /* Work to do on interrupt/exception return. */
+ #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+- _TIF_NOTIFY_RESUME)
++ _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL)
+
+ /* Work to do on any return to userspace. */
+ #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
+diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
+index e227f3a29a43c..c41a5a9c3b9f2 100644
+--- a/arch/alpha/kernel/entry.S
++++ b/arch/alpha/kernel/entry.S
+@@ -469,8 +469,10 @@ entSys:
+ #ifdef CONFIG_AUDITSYSCALL
+ lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ and $3, $6, $3
+-#endif
+ bne $3, strace
++#else
++ blbs $3, strace /* check for SYSCALL_TRACE in disguise */
++#endif
+ beq $4, 1f
+ ldq $27, 0($5)
+ 1: jsr $26, ($27), sys_ni_syscall
+diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
+index 9dc928859ad33..2013a5ccecd31 100644
+--- a/arch/arm/boot/dts/armada-370.dtsi
++++ b/arch/arm/boot/dts/armada-370.dtsi
+@@ -84,7 +84,7 @@
+
+ pcie2: pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x80000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
+index 929deaf312a55..c310ef26d1cce 100644
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -592,7 +592,7 @@
+
+ pcie1: pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi
+index ce1dddb2269b0..e94f22b0e9b5e 100644
+--- a/arch/arm/boot/dts/armada-380.dtsi
++++ b/arch/arm/boot/dts/armada-380.dtsi
+@@ -89,7 +89,7 @@
+ /* x1 port */
+ pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -118,7 +118,7 @@
+ /* x1 port */
+ pcie@3,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
+index 72ac807cae259..0c1f238e4c306 100644
+--- a/arch/arm/boot/dts/armada-385-turris-omnia.dts
++++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
+@@ -23,6 +23,12 @@
+ stdout-path = &uart0;
+ };
+
++ aliases {
++ ethernet0 = &eth0;
++ ethernet1 = &eth1;
++ ethernet2 = &eth2;
++ };
++
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>; /* 1024 MB */
+@@ -483,7 +489,17 @@
+ };
+ };
+
+- /* port 6 is connected to eth0 */
++ ports@6 {
++ reg = <6>;
++ label = "cpu";
++ ethernet = <&eth0>;
++ phy-mode = "rgmii-id";
++
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ };
++ };
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi
+index 83392b92dae28..be8d607c59b21 100644
+--- a/arch/arm/boot/dts/armada-385.dtsi
++++ b/arch/arm/boot/dts/armada-385.dtsi
+@@ -93,7 +93,7 @@
+ /* x1 port */
+ pcie2: pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -121,7 +121,7 @@
+ /* x1 port */
+ pcie3: pcie@3,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -152,7 +152,7 @@
+ */
+ pcie4: pcie@4,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++ assigned-addresses = <0x82002000 0 0x48000 0 0x2000>;
+ reg = <0x2000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
+index 923b035a3ab38..9d1cac49c022f 100644
+--- a/arch/arm/boot/dts/armada-39x.dtsi
++++ b/arch/arm/boot/dts/armada-39x.dtsi
+@@ -463,7 +463,7 @@
+ /* x1 port */
+ pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -492,7 +492,7 @@
+ /* x1 port */
+ pcie@3,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -524,7 +524,7 @@
+ */
+ pcie@4,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++ assigned-addresses = <0x82002000 0 0x48000 0 0x2000>;
+ reg = <0x2000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+index bf9360f41e0a6..5ea9d509cd308 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+@@ -107,7 +107,7 @@
+
+ pcie2: pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -135,7 +135,7 @@
+
+ pcie3: pcie@3,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -163,7 +163,7 @@
+
+ pcie4: pcie@4,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
++ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
+ reg = <0x2000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -191,7 +191,7 @@
+
+ pcie5: pcie@5,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
++ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+ reg = <0x2800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+index 0714af52e6075..6c6fbb9faf5ac 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+@@ -122,7 +122,7 @@
+
+ pcie2: pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -150,7 +150,7 @@
+
+ pcie3: pcie@3,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -178,7 +178,7 @@
+
+ pcie4: pcie@4,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
++ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
+ reg = <0x2000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -206,7 +206,7 @@
+
+ pcie5: pcie@5,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
++ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+ reg = <0x2800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -234,7 +234,7 @@
+
+ pcie6: pcie@6,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
++ assigned-addresses = <0x82003000 0 0x84000 0 0x2000>;
+ reg = <0x3000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -262,7 +262,7 @@
+
+ pcie7: pcie@7,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
++ assigned-addresses = <0x82003800 0 0x88000 0 0x2000>;
+ reg = <0x3800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -290,7 +290,7 @@
+
+ pcie8: pcie@8,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
++ assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>;
+ reg = <0x4000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -318,7 +318,7 @@
+
+ pcie9: pcie@9,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
++ assigned-addresses = <0x82004800 0 0x42000 0 0x2000>;
+ reg = <0x4800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+index a6a2bc3b855c2..fcc890e3ad735 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+@@ -162,16 +162,9 @@
+ #size-cells = <1>;
+ ranges;
+
+- /* LPC FW cycle bridge region requires natural alignment */
+- flash_memory: region@b8000000 {
+- no-map;
+- reg = <0xb8000000 0x04000000>; /* 64M */
+- };
+-
+- /* 48MB region from the end of flash to start of vga memory */
+- ramoops@bc000000 {
++ ramoops@b3e00000 {
+ compatible = "ramoops";
+- reg = <0xbc000000 0x200000>; /* 16 * (4 * 0x8000) */
++ reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */
+ record-size = <0x8000>;
+ console-size = <0x8000>;
+ ftrace-size = <0x8000>;
+@@ -179,6 +172,12 @@
+ max-reason = <3>; /* KMSG_DUMP_EMERG */
+ };
+
++ /* LPC FW cycle bridge region requires natural alignment */
++ flash_memory: region@b4000000 {
++ no-map;
++ reg = <0xb4000000 0x04000000>; /* 64M */
++ };
++
+ /* VGA region is dictated by hardware strapping */
+ vga_memory: region@bf000000 {
+ no-map;
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+index bf59a9962379d..4879da4cdbd25 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+@@ -95,14 +95,9 @@
+ #size-cells = <1>;
+ ranges;
+
+- flash_memory: region@b8000000 {
+- no-map;
+- reg = <0xb8000000 0x04000000>; /* 64M */
+- };
+-
+- ramoops@bc000000 {
++ ramoops@b3e00000 {
+ compatible = "ramoops";
+- reg = <0xbc000000 0x200000>; /* 16 * (4 * 0x8000) */
++ reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */
+ record-size = <0x8000>;
+ console-size = <0x8000>;
+ ftrace-size = <0x8000>;
+@@ -110,6 +105,13 @@
+ max-reason = <3>; /* KMSG_DUMP_EMERG */
+ };
+
++ /* LPC FW cycle bridge region requires natural alignment */
++ flash_memory: region@b4000000 {
++ no-map;
++ reg = <0xb4000000 0x04000000>; /* 64M */
++ };
++
++ /* VGA region is dictated by hardware strapping */
+ vga_memory: region@bf000000 {
+ no-map;
+ compatible = "shared-dma-pool";
+diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
+index 00a36fba2fd23..9aee3cfd3e981 100644
+--- a/arch/arm/boot/dts/dove.dtsi
++++ b/arch/arm/boot/dts/dove.dtsi
+@@ -139,7 +139,7 @@
+ pcie1: pcie@2 {
+ device_type = "pci";
+ status = "disabled";
+- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x80000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ clocks = <&gate_clk 5>;
+ marvell,pcie-port = <1>;
+diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts
+index d10669fcd527d..9e9eba8bad5e4 100644
+--- a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts
++++ b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts
+@@ -366,7 +366,7 @@
+ spi-max-frequency = <20000000>;
+ spi-rx-bus-width = <2>;
+ label = "bmc";
+- partitions@80000000 {
++ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts
+index 491606c4f044d..2a394cc15284c 100644
+--- a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts
++++ b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts
+@@ -142,7 +142,7 @@
+ reg = <0>;
+ spi-rx-bus-width = <2>;
+
+- partitions@80000000 {
++ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+diff --git a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts
+index a0c2d76526258..f7b38bee039bc 100644
+--- a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts
++++ b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts
+@@ -388,7 +388,7 @@
+ spi-max-frequency = <5000000>;
+ spi-rx-bus-width = <2>;
+ label = "bmc";
+- partitions@80000000 {
++ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -422,7 +422,7 @@
+ reg = <1>;
+ spi-max-frequency = <5000000>;
+ spi-rx-bus-width = <2>;
+- partitions@88000000 {
++ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -447,7 +447,7 @@
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+ spi-rx-bus-width = <2>;
+- partitions@A0000000 {
++ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+diff --git a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts
+index 3dad32834e5ea..f53d45fa1de87 100644
+--- a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts
++++ b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts
+@@ -74,7 +74,7 @@
+ spi-rx-bus-width = <2>;
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+- partitions@80000000 {
++ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -135,7 +135,7 @@
+ spi-rx-bus-width = <2>;
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+- partitions@A0000000 {
++ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+diff --git a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts
+index 132e702281fc5..87359ab05db3e 100644
+--- a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts
++++ b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts
+@@ -107,7 +107,7 @@
+ reg = <0>;
+ spi-rx-bus-width = <2>;
+
+- partitions@80000000 {
++ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -146,7 +146,7 @@
+ reg = <1>;
+ npcm,fiu-rx-bus-width = <2>;
+
+- partitions@88000000 {
++ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -173,7 +173,7 @@
+ reg = <0>;
+ spi-rx-bus-width = <2>;
+
+- partitions@A0000000 {
++ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
+index 942aa2278355d..a39b940d58532 100644
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
+@@ -1615,7 +1615,7 @@
+ };
+
+ etb@1a01000 {
+- compatible = "coresight-etb10", "arm,primecell";
++ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0x1a01000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
+index fd41243a0b2c0..9d5a04a46b14e 100644
+--- a/arch/arm/boot/dts/spear600.dtsi
++++ b/arch/arm/boot/dts/spear600.dtsi
+@@ -47,7 +47,7 @@
+ compatible = "arm,pl110", "arm,primecell";
+ reg = <0xfc200000 0x1000>;
+ interrupt-parent = <&vic1>;
+- interrupts = <12>;
++ interrupts = <13>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts
+index 2e3c9fbb4eb36..275167f26fd9d 100644
+--- a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts
++++ b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts
+@@ -13,7 +13,6 @@
+ /dts-v1/;
+
+ #include "stm32mp157.dtsi"
+-#include "stm32mp15xc.dtsi"
+ #include "stm32mp15xx-dhcor-som.dtsi"
+ #include "stm32mp15xx-dhcor-avenger96.dtsi"
+
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+index 90933077d66de..b6957cbdeff5f 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+@@ -100,7 +100,7 @@
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+- gpios = <&gpioz 3 GPIO_ACTIVE_HIGH>;
++ gpio = <&gpioz 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+ };
+diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
+index 41b2e8abc9e69..708816caf859c 100644
+--- a/arch/arm/mach-mmp/time.c
++++ b/arch/arm/mach-mmp/time.c
+@@ -43,18 +43,21 @@
+ static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE;
+
+ /*
+- * FIXME: the timer needs some delay to stablize the counter capture
++ * Read the timer through the CVWR register. Delay is required after requesting
++ * a read. The CR register cannot be directly read due to metastability issues
++ * documented in the PXA168 software manual.
+ */
+ static inline uint32_t timer_read(void)
+ {
+- int delay = 100;
++ uint32_t val;
++ int delay = 3;
+
+ __raw_writel(1, mmp_timer_base + TMR_CVWR(1));
+
+ while (delay--)
+- cpu_relax();
++ val = __raw_readl(mmp_timer_base + TMR_CVWR(1));
+
+- return __raw_readl(mmp_timer_base + TMR_CVWR(1));
++ return val;
+ }
+
+ static u64 notrace mmp_read_sched_clock(void)
+diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
+index 51a63b29d4045..a4d195e9eb8c8 100644
+--- a/arch/arm64/boot/dts/apple/t8103.dtsi
++++ b/arch/arm64/boot/dts/apple/t8103.dtsi
+@@ -412,7 +412,7 @@
+ resets = <&ps_ans2>;
+ };
+
+- pcie0_dart_0: dart@681008000 {
++ pcie0_dart_0: iommu@681008000 {
+ compatible = "apple,t8103-dart";
+ reg = <0x6 0x81008000 0x0 0x4000>;
+ #iommu-cells = <1>;
+@@ -421,7 +421,7 @@
+ power-domains = <&ps_apcie_gp>;
+ };
+
+- pcie0_dart_1: dart@682008000 {
++ pcie0_dart_1: iommu@682008000 {
+ compatible = "apple,t8103-dart";
+ reg = <0x6 0x82008000 0x0 0x4000>;
+ #iommu-cells = <1>;
+@@ -430,7 +430,7 @@
+ power-domains = <&ps_apcie_gp>;
+ };
+
+- pcie0_dart_2: dart@683008000 {
++ pcie0_dart_2: iommu@683008000 {
+ compatible = "apple,t8103-dart";
+ reg = <0x6 0x83008000 0x0 0x4000>;
+ #iommu-cells = <1>;
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+index ada164d423f3d..200f97e1c4c9c 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+@@ -125,9 +125,12 @@
+ /delete-property/ mrvl,i2c-fast-mode;
+ status = "okay";
+
++ /* MCP7940MT-I/MNY RTC */
+ rtc@6f {
+ compatible = "microchip,mcp7940x";
+ reg = <0x6f>;
++ interrupt-parent = <&gpiosb>;
++ interrupts = <5 0>; /* GPIO2_5 */
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+index 9b1af9c801308..d31a194124c91 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+@@ -26,14 +26,14 @@
+ stdout-path = "serial0:921600n8";
+ };
+
+- cpus_fixed_vproc0: fixedregulator@0 {
++ cpus_fixed_vproc0: regulator-vproc-buck0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vproc_buck0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+- cpus_fixed_vproc1: fixedregulator@1 {
++ cpus_fixed_vproc1: regulator-vproc-buck1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vproc_buck1";
+ regulator-min-microvolt = <1000000>;
+@@ -50,7 +50,7 @@
+ id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>;
+ };
+
+- usb_p0_vbus: regulator@2 {
++ usb_p0_vbus: regulator-usb-p0-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "p0_vbus";
+ regulator-min-microvolt = <5000000>;
+@@ -59,7 +59,7 @@
+ enable-active-high;
+ };
+
+- usb_p1_vbus: regulator@3 {
++ usb_p1_vbus: regulator-usb-p1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "p1_vbus";
+ regulator-min-microvolt = <5000000>;
+@@ -68,7 +68,7 @@
+ enable-active-high;
+ };
+
+- usb_p2_vbus: regulator@4 {
++ usb_p2_vbus: regulator-usb-p2-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "p2_vbus";
+ regulator-min-microvolt = <5000000>;
+@@ -77,7 +77,7 @@
+ enable-active-high;
+ };
+
+- usb_p3_vbus: regulator@5 {
++ usb_p3_vbus: regulator-usb-p3-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "p3_vbus";
+ regulator-min-microvolt = <5000000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+index e6d7453e56e0e..1ac0b2cf3d406 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+@@ -160,70 +160,70 @@
+ #clock-cells = <0>;
+ };
+
+- clk26m: oscillator@0 {
++ clk26m: oscillator-26m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ clock-output-names = "clk26m";
+ };
+
+- clk32k: oscillator@1 {
++ clk32k: oscillator-32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "clk32k";
+ };
+
+- clkfpc: oscillator@2 {
++ clkfpc: oscillator-50m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = "clkfpc";
+ };
+
+- clkaud_ext_i_0: oscillator@3 {
++ clkaud_ext_i_0: oscillator-aud0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <6500000>;
+ clock-output-names = "clkaud_ext_i_0";
+ };
+
+- clkaud_ext_i_1: oscillator@4 {
++ clkaud_ext_i_1: oscillator-aud1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <196608000>;
+ clock-output-names = "clkaud_ext_i_1";
+ };
+
+- clkaud_ext_i_2: oscillator@5 {
++ clkaud_ext_i_2: oscillator-aud2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <180633600>;
+ clock-output-names = "clkaud_ext_i_2";
+ };
+
+- clki2si0_mck_i: oscillator@6 {
++ clki2si0_mck_i: oscillator-i2s0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <30000000>;
+ clock-output-names = "clki2si0_mck_i";
+ };
+
+- clki2si1_mck_i: oscillator@7 {
++ clki2si1_mck_i: oscillator-i2s1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <30000000>;
+ clock-output-names = "clki2si1_mck_i";
+ };
+
+- clki2si2_mck_i: oscillator@8 {
++ clki2si2_mck_i: oscillator-i2s2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <30000000>;
+ clock-output-names = "clki2si2_mck_i";
+ };
+
+- clktdmin_mclk_i: oscillator@9 {
++ clktdmin_mclk_i: oscillator-mclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <30000000>;
+@@ -266,7 +266,7 @@
+ reg = <0 0x10005000 0 0x1000>;
+ };
+
+- pio: pinctrl@10005000 {
++ pio: pinctrl@1000b000 {
+ compatible = "mediatek,mt2712-pinctrl";
+ reg = <0 0x1000b000 0 0x1000>;
+ mediatek,pctl-regmap = <&syscfg_pctl_a>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt6779.dtsi b/arch/arm64/boot/dts/mediatek/mt6779.dtsi
+index 9bdf5145966c5..dde9ce137b4f1 100644
+--- a/arch/arm64/boot/dts/mediatek/mt6779.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt6779.dtsi
+@@ -88,14 +88,14 @@
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW 0>;
+ };
+
+- clk26m: oscillator@0 {
++ clk26m: oscillator-26m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ clock-output-names = "clk26m";
+ };
+
+- clk32k: oscillator@1 {
++ clk32k: oscillator-32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+@@ -117,7 +117,7 @@
+ compatible = "simple-bus";
+ ranges;
+
+- gic: interrupt-controller@0c000000 {
++ gic: interrupt-controller@c000000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <4>;
+ interrupt-parent = <&gic>;
+@@ -138,7 +138,7 @@
+
+ };
+
+- sysirq: intpol-controller@0c53a650 {
++ sysirq: intpol-controller@c53a650 {
+ compatible = "mediatek,mt6779-sysirq",
+ "mediatek,mt6577-sysirq";
+ interrupt-controller;
+diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
+index 15616231022a2..c3677d77e0a45 100644
+--- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
+@@ -95,7 +95,7 @@
+ };
+ };
+
+- clk26m: oscillator@0 {
++ clk26m: oscillator-26m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+index 72e0d9722e07a..35e01fa2d314b 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+@@ -14,7 +14,7 @@
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+- clk40m: oscillator@0 {
++ clk40m: oscillator-40m {
+ compatible = "fixed-clock";
+ clock-frequency = <40000000>;
+ #clock-cells = <0>;
+@@ -112,6 +112,12 @@
+ #clock-cells = <1>;
+ };
+
++ wed_pcie: wed-pcie@10003000 {
++ compatible = "mediatek,mt7986-wed-pcie",
++ "syscon";
++ reg = <0 0x10003000 0 0x10>;
++ };
++
+ topckgen: topckgen@1001b000 {
+ compatible = "mediatek,mt7986-topckgen", "syscon";
+ reg = <0 0x1001B000 0 0x1000>;
+@@ -168,7 +174,7 @@
+ #clock-cells = <1>;
+ };
+
+- trng: trng@1020f000 {
++ trng: rng@1020f000 {
+ compatible = "mediatek,mt7986-rng",
+ "mediatek,mt7623-rng";
+ reg = <0 0x1020f000 0 0x100>;
+@@ -228,12 +234,6 @@
+ #reset-cells = <1>;
+ };
+
+- wed_pcie: wed-pcie@10003000 {
+- compatible = "mediatek,mt7986-wed-pcie",
+- "syscon";
+- reg = <0 0x10003000 0 0x10>;
+- };
+-
+ wed0: wed@15010000 {
+ compatible = "mediatek,mt7986-wed",
+ "syscon";
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index a70b669c49baa..402136bfd5350 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1678,7 +1678,7 @@
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "job", "mmu", "gpu";
+
+- clocks = <&topckgen CLK_TOP_MFGPLL_CK>;
++ clocks = <&mfgcfg CLK_MFG_BG3D>;
+
+ power-domains =
+ <&spm MT8183_POWER_DOMAIN_MFG_CORE0>,
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index 905d1a90b406c..0b85b5874a4f9 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -36,7 +36,7 @@
+ enable-method = "psci";
+ performance-domains = <&performance 0>;
+ clock-frequency = <1701000000>;
+- capacity-dmips-mhz = <578>;
++ capacity-dmips-mhz = <308>;
+ cpu-idle-states = <&cpu_off_l &cluster_off_l>;
+ next-level-cache = <&l2_0>;
+ #cooling-cells = <2>;
+@@ -49,7 +49,7 @@
+ enable-method = "psci";
+ performance-domains = <&performance 0>;
+ clock-frequency = <1701000000>;
+- capacity-dmips-mhz = <578>;
++ capacity-dmips-mhz = <308>;
+ cpu-idle-states = <&cpu_off_l &cluster_off_l>;
+ next-level-cache = <&l2_0>;
+ #cooling-cells = <2>;
+@@ -62,7 +62,7 @@
+ enable-method = "psci";
+ performance-domains = <&performance 0>;
+ clock-frequency = <1701000000>;
+- capacity-dmips-mhz = <578>;
++ capacity-dmips-mhz = <308>;
+ cpu-idle-states = <&cpu_off_l &cluster_off_l>;
+ next-level-cache = <&l2_0>;
+ #cooling-cells = <2>;
+@@ -75,7 +75,7 @@
+ enable-method = "psci";
+ performance-domains = <&performance 0>;
+ clock-frequency = <1701000000>;
+- capacity-dmips-mhz = <578>;
++ capacity-dmips-mhz = <308>;
+ cpu-idle-states = <&cpu_off_l &cluster_off_l>;
+ next-level-cache = <&l2_0>;
+ #cooling-cells = <2>;
+diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+index 8ee1529683a34..ec8dfb3d1c6d6 100644
+--- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
++++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+@@ -17,7 +17,7 @@
+ };
+
+ firmware {
+- optee: optee@4fd00000 {
++ optee: optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+@@ -209,7 +209,7 @@
+ };
+ };
+
+- i2c0_pins_a: i2c0@0 {
++ i2c0_pins_a: i2c0 {
+ pins1 {
+ pinmux = <MT8516_PIN_58_SDA0__FUNC_SDA0_0>,
+ <MT8516_PIN_59_SCL0__FUNC_SCL0_0>;
+@@ -217,7 +217,7 @@
+ };
+ };
+
+- i2c2_pins_a: i2c2@0 {
++ i2c2_pins_a: i2c2 {
+ pins1 {
+ pinmux = <MT8516_PIN_60_SDA2__FUNC_SDA2_0>,
+ <MT8516_PIN_61_SCL2__FUNC_SCL2_0>;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index 0170bfa8a4679..dfe2cf2f4b218 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -1965,7 +1965,7 @@
+
+ bus-range = <0x0 0xff>;
+
+- ranges = <0x43000000 0x35 0x40000000 0x35 0x40000000 0x2 0xe8000000>, /* prefetchable memory (11904 MB) */
++ ranges = <0x43000000 0x35 0x40000000 0x35 0x40000000 0x2 0xc0000000>, /* prefetchable memory (11264 MB) */
+ <0x02000000 0x0 0x40000000 0x38 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */
+ <0x01000000 0x0 0x2c100000 0x00 0x2c100000 0x0 0x00100000>; /* downstream I/O (1 MB) */
+
+@@ -2178,7 +2178,7 @@
+ bus-range = <0x0 0xff>;
+
+ ranges = <0x43000000 0x21 0x00000000 0x21 0x00000000 0x0 0x28000000>, /* prefetchable memory (640 MB) */
+- <0x02000000 0x0 0x40000000 0x21 0xe8000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */
++ <0x02000000 0x0 0x40000000 0x21 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */
+ <0x01000000 0x0 0x34100000 0x00 0x34100000 0x0 0x00100000>; /* downstream I/O (1 MB) */
+
+ interconnects = <&mc TEGRA234_MEMORY_CLIENT_PCIE3R &emc>,
+@@ -2336,7 +2336,7 @@
+
+ bus-range = <0x0 0xff>;
+
+- ranges = <0x43000000 0x27 0x40000000 0x27 0x40000000 0x3 0xe8000000>, /* prefetchable memory (16000 MB) */
++ ranges = <0x43000000 0x28 0x00000000 0x28 0x00000000 0x3 0x28000000>, /* prefetchable memory (12928 MB) */
+ <0x02000000 0x0 0x40000000 0x2b 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */
+ <0x01000000 0x0 0x3a100000 0x00 0x3a100000 0x0 0x00100000>; /* downstream I/O (1 MB) */
+
+@@ -2442,7 +2442,7 @@
+
+ bus-range = <0x0 0xff>;
+
+- ranges = <0x43000000 0x2e 0x40000000 0x2e 0x40000000 0x3 0xe8000000>, /* prefetchable memory (16000 MB) */
++ ranges = <0x43000000 0x30 0x00000000 0x30 0x00000000 0x2 0x28000000>, /* prefetchable memory (8832 MB) */
+ <0x02000000 0x0 0x40000000 0x32 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */
+ <0x01000000 0x0 0x3e100000 0x00 0x3e100000 0x0 0x00100000>; /* downstream I/O (1 MB) */
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
+index 1ba2eca33c7b6..6a716c83e5f1d 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
++++ b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
+@@ -37,6 +37,8 @@
+
+ &blsp1_spi1 {
+ cs-select = <0>;
++ pinctrl-0 = <&spi_0_pins>;
++ pinctrl-names = "default";
+ status = "okay";
+
+ flash@0 {
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index a831064700ee8..9743cb270639d 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1345,7 +1345,7 @@
+ };
+
+ mpss: remoteproc@4080000 {
+- compatible = "qcom,msm8916-mss-pil", "qcom,q6v5-pil";
++ compatible = "qcom,msm8916-mss-pil";
+ reg = <0x04080000 0x100>,
+ <0x04020000 0x040>;
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index aba7176443919..1107befc3b091 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -144,82 +144,92 @@
+ /* Nominal fmax for now */
+ opp-307200000 {
+ opp-hz = /bits/ 64 <307200000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-422400000 {
+ opp-hz = /bits/ 64 <422400000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-480000000 {
+ opp-hz = /bits/ 64 <480000000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-556800000 {
+ opp-hz = /bits/ 64 <556800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-652800000 {
+ opp-hz = /bits/ 64 <652800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-729600000 {
+ opp-hz = /bits/ 64 <729600000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-844800000 {
+ opp-hz = /bits/ 64 <844800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-960000000 {
+ opp-hz = /bits/ 64 <960000000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1036800000 {
+ opp-hz = /bits/ 64 <1036800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1113600000 {
+ opp-hz = /bits/ 64 <1113600000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1190400000 {
+ opp-hz = /bits/ 64 <1190400000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1228800000 {
+ opp-hz = /bits/ 64 <1228800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1324800000 {
+ opp-hz = /bits/ 64 <1324800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x5>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1363200000 {
++ opp-hz = /bits/ 64 <1363200000>;
++ opp-supported-hw = <0x2>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1401600000 {
+ opp-hz = /bits/ 64 <1401600000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x5>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1478400000 {
+ opp-hz = /bits/ 64 <1478400000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x1>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1497600000 {
++ opp-hz = /bits/ 64 <1497600000>;
++ opp-supported-hw = <0x04>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1593600000 {
+ opp-hz = /bits/ 64 <1593600000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x1>;
+ clock-latency-ns = <200000>;
+ };
+ };
+@@ -232,127 +242,137 @@
+ /* Nominal fmax for now */
+ opp-307200000 {
+ opp-hz = /bits/ 64 <307200000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-403200000 {
+ opp-hz = /bits/ 64 <403200000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-480000000 {
+ opp-hz = /bits/ 64 <480000000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-556800000 {
+ opp-hz = /bits/ 64 <556800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-652800000 {
+ opp-hz = /bits/ 64 <652800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-729600000 {
+ opp-hz = /bits/ 64 <729600000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-806400000 {
+ opp-hz = /bits/ 64 <806400000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-883200000 {
+ opp-hz = /bits/ 64 <883200000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-940800000 {
+ opp-hz = /bits/ 64 <940800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1036800000 {
+ opp-hz = /bits/ 64 <1036800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1113600000 {
+ opp-hz = /bits/ 64 <1113600000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1190400000 {
+ opp-hz = /bits/ 64 <1190400000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1248000000 {
+ opp-hz = /bits/ 64 <1248000000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1324800000 {
+ opp-hz = /bits/ 64 <1324800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1401600000 {
+ opp-hz = /bits/ 64 <1401600000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1478400000 {
+ opp-hz = /bits/ 64 <1478400000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1555200000 {
+ opp-hz = /bits/ 64 <1555200000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1632000000 {
+ opp-hz = /bits/ 64 <1632000000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1708800000 {
+ opp-hz = /bits/ 64 <1708800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1785600000 {
+ opp-hz = /bits/ 64 <1785600000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x7>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1804800000 {
++ opp-hz = /bits/ 64 <1804800000>;
++ opp-supported-hw = <0x6>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1824000000 {
+ opp-hz = /bits/ 64 <1824000000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x1>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1900800000 {
++ opp-hz = /bits/ 64 <1900800000>;
++ opp-supported-hw = <0x4>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1920000000 {
+ opp-hz = /bits/ 64 <1920000000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x1>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1996800000 {
+ opp-hz = /bits/ 64 <1996800000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x1>;
+ clock-latency-ns = <200000>;
+ };
+ opp-2073600000 {
+ opp-hz = /bits/ 64 <2073600000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x1>;
+ clock-latency-ns = <200000>;
+ };
+ opp-2150400000 {
+ opp-hz = /bits/ 64 <2150400000>;
+- opp-supported-hw = <0x77>;
++ opp-supported-hw = <0x1>;
+ clock-latency-ns = <200000>;
+ };
+ };
+@@ -1213,17 +1233,17 @@
+ compatible = "operating-points-v2";
+
+ /*
+- * 624Mhz and 560Mhz are only available on speed
+- * bin (1 << 0). All the rest are available on
+- * all bins of the hardware
++ * 624Mhz is only available on speed bins 0 and 3.
++ * 560Mhz is only available on speed bins 0, 2 and 3.
++ * All the rest are available on all bins of the hardware.
+ */
+ opp-624000000 {
+ opp-hz = /bits/ 64 <624000000>;
+- opp-supported-hw = <0x01>;
++ opp-supported-hw = <0x09>;
+ };
+ opp-560000000 {
+ opp-hz = /bits/ 64 <560000000>;
+- opp-supported-hw = <0x01>;
++ opp-supported-hw = <0x0d>;
+ };
+ opp-510000000 {
+ opp-hz = /bits/ 64 <510000000>;
+@@ -3342,7 +3362,7 @@
+ interrupt-names = "intr1", "intr2";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+- reset-gpios = <&tlmm 64 GPIO_ACTIVE_HIGH>;
++ reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>;
+
+ slim-ifc-dev = <&tasha_ifd>;
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8996pro.dtsi b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi
+new file mode 100644
+index 0000000000000..63e1b4ec7a360
+--- /dev/null
++++ b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi
+@@ -0,0 +1,266 @@
++// SPDX-License-Identifier: BSD-3-Clause
++/*
++ * Copyright (c) 2022, Linaro Limited
++ */
++
++#include "msm8996.dtsi"
++
++/ {
++ /delete-node/ opp-table-cluster0;
++ /delete-node/ opp-table-cluster1;
++
++ /*
++ * On MSM8996 Pro the cpufreq driver shifts speed bins into the high
++ * nibble of supported hw, so speed bin 0 becomes 0x10, speed bin 1
++ * becomes 0x20, speed 2 becomes 0x40.
++ */
++
++ cluster0_opp: opp-table-cluster0 {
++ compatible = "operating-points-v2-kryo-cpu";
++ nvmem-cells = <&speedbin_efuse>;
++ opp-shared;
++
++ opp-307200000 {
++ opp-hz = /bits/ 64 <307200000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-384000000 {
++ opp-hz = /bits/ 64 <384000000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-460800000 {
++ opp-hz = /bits/ 64 <460800000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-537600000 {
++ opp-hz = /bits/ 64 <537600000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-614400000 {
++ opp-hz = /bits/ 64 <614400000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-691200000 {
++ opp-hz = /bits/ 64 <691200000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-768000000 {
++ opp-hz = /bits/ 64 <768000000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-844800000 {
++ opp-hz = /bits/ 64 <844800000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-902400000 {
++ opp-hz = /bits/ 64 <902400000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-979200000 {
++ opp-hz = /bits/ 64 <979200000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1056000000 {
++ opp-hz = /bits/ 64 <1056000000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1132800000 {
++ opp-hz = /bits/ 64 <1132800000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1209600000 {
++ opp-hz = /bits/ 64 <1209600000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1286400000 {
++ opp-hz = /bits/ 64 <1286400000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1363200000 {
++ opp-hz = /bits/ 64 <1363200000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1440000000 {
++ opp-hz = /bits/ 64 <1440000000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1516800000 {
++ opp-hz = /bits/ 64 <1516800000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1593600000 {
++ opp-hz = /bits/ 64 <1593600000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1996800000 {
++ opp-hz = /bits/ 64 <1996800000>;
++ opp-supported-hw = <0x20>;
++ clock-latency-ns = <200000>;
++ };
++ opp-2188800000 {
++ opp-hz = /bits/ 64 <2188800000>;
++ opp-supported-hw = <0x10>;
++ clock-latency-ns = <200000>;
++ };
++ };
++
++ cluster1_opp: opp-table-cluster1 {
++ compatible = "operating-points-v2-kryo-cpu";
++ nvmem-cells = <&speedbin_efuse>;
++ opp-shared;
++
++ opp-307200000 {
++ opp-hz = /bits/ 64 <307200000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-384000000 {
++ opp-hz = /bits/ 64 <384000000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-460800000 {
++ opp-hz = /bits/ 64 <460800000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-537600000 {
++ opp-hz = /bits/ 64 <537600000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-614400000 {
++ opp-hz = /bits/ 64 <614400000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-691200000 {
++ opp-hz = /bits/ 64 <691200000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-748800000 {
++ opp-hz = /bits/ 64 <748800000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-825600000 {
++ opp-hz = /bits/ 64 <825600000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-902400000 {
++ opp-hz = /bits/ 64 <902400000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-979200000 {
++ opp-hz = /bits/ 64 <979200000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1056000000 {
++ opp-hz = /bits/ 64 <1056000000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1132800000 {
++ opp-hz = /bits/ 64 <1132800000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1209600000 {
++ opp-hz = /bits/ 64 <1209600000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1286400000 {
++ opp-hz = /bits/ 64 <1286400000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1363200000 {
++ opp-hz = /bits/ 64 <1363200000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1440000000 {
++ opp-hz = /bits/ 64 <1440000000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1516800000 {
++ opp-hz = /bits/ 64 <1516800000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1593600000 {
++ opp-hz = /bits/ 64 <1593600000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1670400000 {
++ opp-hz = /bits/ 64 <1670400000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1747200000 {
++ opp-hz = /bits/ 64 <1747200000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1824000000 {
++ opp-hz = /bits/ 64 <1824000000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1900800000 {
++ opp-hz = /bits/ 64 <1900800000>;
++ opp-supported-hw = <0x70>;
++ clock-latency-ns = <200000>;
++ };
++ opp-1977600000 {
++ opp-hz = /bits/ 64 <1977600000>;
++ opp-supported-hw = <0x30>;
++ clock-latency-ns = <200000>;
++ };
++ opp-2054400000 {
++ opp-hz = /bits/ 64 <2054400000>;
++ opp-supported-hw = <0x30>;
++ clock-latency-ns = <200000>;
++ };
++ opp-2150400000 {
++ opp-hz = /bits/ 64 <2150400000>;
++ opp-supported-hw = <0x30>;
++ clock-latency-ns = <200000>;
++ };
++ opp-2246400000 {
++ opp-hz = /bits/ 64 <2246400000>;
++ opp-supported-hw = <0x10>;
++ clock-latency-ns = <200000>;
++ };
++ opp-2342400000 {
++ opp-hz = /bits/ 64 <2342400000>;
++ opp-supported-hw = <0x10>;
++ clock-latency-ns = <200000>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/qcom/pm6350.dtsi b/arch/arm64/boot/dts/qcom/pm6350.dtsi
+index ecf9b99191828..68245d78d2b93 100644
+--- a/arch/arm64/boot/dts/qcom/pm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm6350.dtsi
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2021, Luca Weiss <luca@z3ntu.xyz>
+ */
+
++#include <dt-bindings/input/input.h>
+ #include <dt-bindings/spmi/spmi.h>
+
+ &spmi_bus {
+diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
+index e1622b16c08bd..02a69ac0149b2 100644
+--- a/arch/arm64/boot/dts/qcom/pm660.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
+@@ -163,7 +163,7 @@
+ qcom,pre-scaling = <1 3>;
+ };
+
+- vcoin: vcoin@83 {
++ vcoin: vcoin@85 {
+ reg = <ADC5_VCOIN>;
+ qcom,decimation = <1024>;
+ qcom,pre-scaling = <1 3>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
+index 1bd6c7dcd9e91..bfab67f4a7c9c 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
+@@ -194,6 +194,12 @@ ap_ts_pen_1v8: &i2c4 {
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ function = "mi2s_1";
+ };
++
++ pinconf {
++ pins = "gpio49", "gpio50", "gpio51", "gpio52";
++ drive-strength = <2>;
++ bias-pull-down;
++ };
+ };
+
+ &ts_reset_l {
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dts b/arch/arm64/boot/dts/qcom/sc7280-idp.dts
+index 7559164cdda08..e2e37a0292ad6 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-idp.dts
++++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dts
+@@ -10,7 +10,6 @@
+ #include <dt-bindings/iio/qcom,spmi-adc7-pmr735a.h>
+ #include "sc7280-idp.dtsi"
+ #include "pmr735a.dtsi"
+-#include "sc7280-herobrine-lte-sku.dtsi"
+
+ / {
+ model = "Qualcomm Technologies, Inc. sc7280 IDP SKU1 platform";
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
+index cd432a2856a7b..ca50f0ba9b815 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
+@@ -13,6 +13,7 @@
+ #include "pmk8350.dtsi"
+
+ #include "sc7280-chrome-common.dtsi"
++#include "sc7280-herobrine-lte-sku.dtsi"
+
+ / {
+ aliases {
+@@ -34,7 +35,7 @@
+ pinctrl-0 = <&wcd_reset_n>;
+ pinctrl-1 = <&wcd_reset_n_sleep>;
+
+- reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>;
++ reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>;
+
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
+index 4b8c676b0bb19..f7665b3799233 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
+@@ -37,7 +37,7 @@
+ pinctrl-0 = <&wcd_reset_n>, <&us_euro_hs_sel>;
+ pinctrl-1 = <&wcd_reset_n_sleep>, <&us_euro_hs_sel>;
+
+- reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>;
++ reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>;
+ us-euro-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>;
+
+ qcom,rx-device = <&wcd_rx>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+index b51b85f583e5d..e119060ac56cb 100644
+--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+@@ -779,7 +779,7 @@
+ pins = "gpio17", "gpio18", "gpio19";
+ function = "gpio";
+ drive-strength = <2>;
+- bias-no-pull;
++ bias-disable;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+index b5eb8f7eca1d5..b5f11fbcc3004 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+@@ -1436,7 +1436,7 @@ ap_ts_i2c: &i2c14 {
+ config {
+ pins = "gpio126";
+ function = "gpio";
+- bias-no-pull;
++ bias-disable;
+ drive-strength = <2>;
+ output-low;
+ };
+@@ -1446,7 +1446,7 @@ ap_ts_i2c: &i2c14 {
+ config {
+ pins = "gpio126";
+ function = "gpio";
+- bias-no-pull;
++ bias-disable;
+ drive-strength = <2>;
+ output-high;
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+index afc17e4d403fc..f982594896796 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+@@ -628,7 +628,7 @@
+ };
+
+ wcd_intr_default: wcd-intr-default {
+- pins = "goui54";
++ pins = "gpio54";
+ function = "gpio";
+ input-enable;
+ bias-pull-down;
+diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+index 1fe3fa3ad8770..7818fb6c5a10a 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+@@ -458,7 +458,7 @@
+ sdhc_1: mmc@4744000 {
+ compatible = "qcom,sm6125-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0x04744000 0x1000>, <0x04745000 0x1000>;
+- reg-names = "hc", "core";
++ reg-names = "hc", "cqhci";
+
+ interrupts = <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index c39de7d3ace0b..7be5fc8dec671 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -485,6 +485,7 @@
+ interrupts = <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 644 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
++ iommus = <&apps_smmu 0x60 0x0>;
+
+ clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ <&gcc GCC_SDCC1_APPS_CLK>,
+@@ -1063,6 +1064,7 @@
+ interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
++ iommus = <&apps_smmu 0x560 0x0>;
+
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+@@ -1148,15 +1150,11 @@
+ dp_phy: dp-phy@88ea200 {
+ reg = <0 0x088ea200 0 0x200>,
+ <0 0x088ea400 0 0x200>,
+- <0 0x088eac00 0 0x400>,
++ <0 0x088eaa00 0 0x200>,
+ <0 0x088ea600 0 0x200>,
+- <0 0x088ea800 0 0x200>,
+- <0 0x088eaa00 0 0x100>;
++ <0 0x088ea800 0 0x200>;
+ #phy-cells = <0>;
+ #clock-cells = <1>;
+- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+- clock-names = "pipe0";
+- clock-output-names = "usb3_phy_pipe_clk_src";
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index cef8c4f4f0ff2..4a527a64772b4 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -2032,11 +2032,11 @@
+ status = "disabled";
+
+ ufs_mem_phy_lanes: phy@1d87400 {
+- reg = <0 0x01d87400 0 0x108>,
+- <0 0x01d87600 0 0x1e0>,
+- <0 0x01d87c00 0 0x1dc>,
+- <0 0x01d87800 0 0x108>,
+- <0 0x01d87a00 0 0x1e0>;
++ reg = <0 0x01d87400 0 0x16c>,
++ <0 0x01d87600 0 0x200>,
++ <0 0x01d87c00 0 0x200>,
++ <0 0x01d87800 0 0x16c>,
++ <0 0x01d87a00 0 0x200>;
+ #phy-cells = <0>;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+index a102aa5efa326..a05fe468e0b41 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+@@ -635,7 +635,7 @@
+ wcd938x: codec {
+ compatible = "qcom,wcd9380-codec";
+ #sound-dai-cells = <1>;
+- reset-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>;
++ reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
+ vdd-buck-supply = <&vreg_s4a_1p8>;
+ vdd-rxtx-supply = <&vreg_s4a_1p8>;
+ vdd-io-supply = <&vreg_s4a_1p8>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+index 5428aab3058dd..e4769dcfaad7b 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+@@ -619,7 +619,7 @@
+ pins = "gpio39";
+ function = "gpio";
+ drive-strength = <2>;
+- bias-disabled;
++ bias-disable;
+ input-enable;
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index e276eed1f8e2c..29e352a577311 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -2180,11 +2180,11 @@
+ status = "disabled";
+
+ ufs_mem_phy_lanes: phy@1d87400 {
+- reg = <0 0x01d87400 0 0x108>,
+- <0 0x01d87600 0 0x1e0>,
+- <0 0x01d87c00 0 0x1dc>,
+- <0 0x01d87800 0 0x108>,
+- <0 0x01d87a00 0 0x1e0>;
++ reg = <0 0x01d87400 0 0x16c>,
++ <0 0x01d87600 0 0x200>,
++ <0 0x01d87c00 0 0x200>,
++ <0 0x01d87800 0 0x16c>,
++ <0 0x01d87a00 0 0x200>;
+ #phy-cells = <0>;
+ };
+ };
+@@ -2455,7 +2455,7 @@
+ pins = "gpio7";
+ function = "dmic1_data";
+ drive-strength = <2>;
+- pull-down;
++ bias-pull-down;
+ input-enable;
+ };
+ };
+@@ -2892,15 +2892,11 @@
+ dp_phy: dp-phy@88ea200 {
+ reg = <0 0x088ea200 0 0x200>,
+ <0 0x088ea400 0 0x200>,
+- <0 0x088eac00 0 0x400>,
++ <0 0x088eaa00 0 0x200>,
+ <0 0x088ea600 0 0x200>,
+- <0 0x088ea800 0 0x200>,
+- <0 0x088eaa00 0 0x100>;
++ <0 0x088ea800 0 0x200>;
+ #phy-cells = <0>;
+ #clock-cells = <1>;
+- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+- clock-names = "pipe0";
+- clock-output-names = "usb3_phy_pipe_clk_src";
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index a86d9ea93b9d4..a6270d97a3192 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -2142,11 +2142,11 @@
+ status = "disabled";
+
+ ufs_mem_phy_lanes: phy@1d87400 {
+- reg = <0 0x01d87400 0 0x108>,
+- <0 0x01d87600 0 0x1e0>,
+- <0 0x01d87c00 0 0x1dc>,
+- <0 0x01d87800 0 0x108>,
+- <0 0x01d87a00 0 0x1e0>;
++ reg = <0 0x01d87400 0 0x188>,
++ <0 0x01d87600 0 0x200>,
++ <0 0x01d87c00 0 0x200>,
++ <0 0x01d87800 0 0x188>,
++ <0 0x01d87a00 0 0x200>;
+ #phy-cells = <0>;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts b/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts
+index d68765eb6d4f9..6351050bc87f2 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts
++++ b/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts
+@@ -556,8 +556,6 @@
+ pinctrl-1 = <&sdc2_sleep_state &sdc2_card_det_n>;
+ vmmc-supply = <&pm8350c_l9>;
+ vqmmc-supply = <&pm8350c_l6>;
+- /* Forbid SDR104/SDR50 - broken hw! */
+- sdhci-caps-mask = <0x3 0x0>;
+ no-sdio;
+ no-mmc;
+ status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index d32f08df743d8..32a37c878a34c 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -3161,11 +3161,11 @@
+ status = "disabled";
+
+ ufs_mem_phy_lanes: phy@1d87400 {
+- reg = <0 0x01d87400 0 0x108>,
+- <0 0x01d87600 0 0x1e0>,
+- <0 0x01d87c00 0 0x1dc>,
+- <0 0x01d87800 0 0x108>,
+- <0 0x01d87a00 0 0x1e0>;
++ reg = <0 0x01d87400 0 0x188>,
++ <0 0x01d87600 0 0x200>,
++ <0 0x01d87c00 0 0x200>,
++ <0 0x01d87800 0 0x188>,
++ <0 0x01d87a00 0 0x200>;
+ #phy-cells = <0>;
+ };
+ };
+@@ -3192,6 +3192,9 @@
+ bus-width = <4>;
+ dma-coherent;
+
++ /* Forbid SDR104/SDR50 - broken hw! */
++ sdhci-caps-mask = <0x3 0x0>;
++
+ status = "disabled";
+
+ sdhc2_opp_table: opp-table {
+diff --git a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
+index c2f152bcf10ec..4092c0016035e 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
+@@ -577,7 +577,7 @@
+ reg = <0 0xe6540000 0 0x60>;
+ interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 514>,
+- <&cpg CPG_CORE R8A779F0_CLK_S0D3>,
++ <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x31>, <&dmac0 0x30>,
+@@ -594,7 +594,7 @@
+ reg = <0 0xe6550000 0 0x60>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 515>,
+- <&cpg CPG_CORE R8A779F0_CLK_S0D3>,
++ <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x33>, <&dmac0 0x32>,
+@@ -611,7 +611,7 @@
+ reg = <0 0xe6560000 0 0x60>;
+ interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 516>,
+- <&cpg CPG_CORE R8A779F0_CLK_S0D3>,
++ <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x35>, <&dmac0 0x34>,
+@@ -628,7 +628,7 @@
+ reg = <0 0xe66a0000 0 0x60>;
+ interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 517>,
+- <&cpg CPG_CORE R8A779F0_CLK_S0D3>,
++ <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x37>, <&dmac0 0x36>,
+@@ -657,7 +657,7 @@
+ reg = <0 0xe6e60000 0 64>;
+ interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 702>,
+- <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>,
++ <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x51>, <&dmac0 0x50>,
+@@ -674,7 +674,7 @@
+ reg = <0 0xe6e68000 0 64>;
+ interrupts = <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>,
+- <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>,
++ <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x53>, <&dmac0 0x52>,
+@@ -691,7 +691,7 @@
+ reg = <0 0xe6c50000 0 64>;
+ interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 704>,
+- <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>,
++ <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x57>, <&dmac0 0x56>,
+@@ -708,7 +708,7 @@
+ reg = <0 0xe6c40000 0 64>;
+ interrupts = <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 705>,
+- <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>,
++ <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x59>, <&dmac0 0x58>,
+diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+index d70f0600ae5a9..d58b18802cb01 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+@@ -326,7 +326,7 @@
+ reg = <0 0xe6540000 0 96>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 514>,
+- <&cpg CPG_CORE R8A779G0_CLK_S0D3_PER>,
++ <&cpg CPG_CORE R8A779G0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+diff --git a/arch/arm64/boot/dts/renesas/r9a09g011.dtsi b/arch/arm64/boot/dts/renesas/r9a09g011.dtsi
+index fb1a97202c387..ebaa8cdd747d2 100644
+--- a/arch/arm64/boot/dts/renesas/r9a09g011.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a09g011.dtsi
+@@ -48,7 +48,7 @@
+ #size-cells = <2>;
+ ranges;
+
+- gic: interrupt-controller@82000000 {
++ gic: interrupt-controller@82010000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+@@ -126,7 +126,7 @@
+ i2c0: i2c@a4030000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "renesas,i2c-r9a09g011", "renesas,rzv2m-i2c";
++ compatible = "renesas,r9a09g011-i2c", "renesas,rzv2m-i2c";
+ reg = <0 0xa4030000 0 0x80>;
+ interrupts = <GIC_SPI 232 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>;
+@@ -140,7 +140,7 @@
+ i2c2: i2c@a4030100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "renesas,i2c-r9a09g011", "renesas,rzv2m-i2c";
++ compatible = "renesas,r9a09g011-i2c", "renesas,rzv2m-i2c";
+ reg = <0 0xa4030100 0 0x80>;
+ interrupts = <GIC_SPI 234 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 238 IRQ_TYPE_EDGE_RISING>;
+diff --git a/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi b/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi
+index d0abb9aa0e9ed..e3852c9463528 100644
+--- a/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi
++++ b/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi
+@@ -55,14 +55,14 @@
+ samsung,pins = "gpf5-0";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_NONE>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV2>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ ufs_refclk_out: ufs-refclk-out-pins {
+ samsung,pins = "gpf5-1";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_NONE>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV2>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+ };
+
+@@ -239,105 +239,105 @@
+ samsung,pins = "gpb6-1";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV2>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ pwm1_out: pwm1-out-pins {
+ samsung,pins = "gpb6-5";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV2>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ hs_i2c0_bus: hs-i2c0-bus-pins {
+ samsung,pins = "gpb0-0", "gpb0-1";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ hs_i2c1_bus: hs-i2c1-bus-pins {
+ samsung,pins = "gpb0-2", "gpb0-3";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ hs_i2c2_bus: hs-i2c2-bus-pins {
+ samsung,pins = "gpb0-4", "gpb0-5";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ hs_i2c3_bus: hs-i2c3-bus-pins {
+ samsung,pins = "gpb0-6", "gpb0-7";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ hs_i2c4_bus: hs-i2c4-bus-pins {
+ samsung,pins = "gpb1-0", "gpb1-1";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ hs_i2c5_bus: hs-i2c5-bus-pins {
+ samsung,pins = "gpb1-2", "gpb1-3";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ hs_i2c6_bus: hs-i2c6-bus-pins {
+ samsung,pins = "gpb1-4", "gpb1-5";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ hs_i2c7_bus: hs-i2c7-bus-pins {
+ samsung,pins = "gpb1-6", "gpb1-7";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ uart0_data: uart0-data-pins {
+ samsung,pins = "gpb7-0", "gpb7-1";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_NONE>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ uart1_data: uart1-data-pins {
+ samsung,pins = "gpb7-4", "gpb7-5";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_NONE>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ spi0_bus: spi0-bus-pins {
+ samsung,pins = "gpb4-0", "gpb4-2", "gpb4-3";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ spi1_bus: spi1-bus-pins {
+ samsung,pins = "gpb4-4", "gpb4-6", "gpb4-7";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+
+ spi2_bus: spi2-bus-pins {
+ samsung,pins = "gpb5-0", "gpb5-2", "gpb5-3";
+ samsung,pin-function = <FSD_PIN_FUNC_2>;
+ samsung,pin-pud = <FSD_PIN_PULL_UP>;
+- samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++ samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/tesla/fsd-pinctrl.h b/arch/arm64/boot/dts/tesla/fsd-pinctrl.h
+index 6ffbda3624930..c397d02208a08 100644
+--- a/arch/arm64/boot/dts/tesla/fsd-pinctrl.h
++++ b/arch/arm64/boot/dts/tesla/fsd-pinctrl.h
+@@ -16,9 +16,9 @@
+ #define FSD_PIN_PULL_UP 3
+
+ #define FSD_PIN_DRV_LV1 0
+-#define FSD_PIN_DRV_LV2 2
+-#define FSD_PIN_DRV_LV3 1
+-#define FSD_PIN_DRV_LV4 3
++#define FSD_PIN_DRV_LV2 1
++#define FSD_PIN_DRV_LV4 2
++#define FSD_PIN_DRV_LV6 3
+
+ #define FSD_PIN_FUNC_INPUT 0
+ #define FSD_PIN_FUNC_OUTPUT 1
+diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+index 4005a73cfea99..ebb1c5ce7aece 100644
+--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+@@ -120,7 +120,6 @@
+ dmas = <&main_udmap 0xc001>, <&main_udmap 0x4002>,
+ <&main_udmap 0x4003>;
+ dma-names = "tx", "rx1", "rx2";
+- dma-coherent;
+
+ rng: rng@4e10000 {
+ compatible = "inside-secure,safexcel-eip76";
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+index e5be78a58682d..d3fb86b2ea939 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+@@ -386,7 +386,6 @@
+ dmas = <&mcu_udmap 0xf501>, <&mcu_udmap 0x7502>,
+ <&mcu_udmap 0x7503>;
+ dma-names = "tx", "rx1", "rx2";
+- dma-coherent;
+
+ rng: rng@40910000 {
+ compatible = "inside-secure,safexcel-eip76";
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+index 917c9dc99efaa..603ddda5127fa 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+@@ -337,7 +337,6 @@
+ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>,
+ <&main_udmap 0x4001>;
+ dma-names = "tx", "rx1", "rx2";
+- dma-coherent;
+
+ rng: rng@4e10000 {
+ compatible = "inside-secure,safexcel-eip76";
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
+index 34e7d577ae13b..c89f28235812a 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
+@@ -60,7 +60,7 @@
+ #interrupt-cells = <1>;
+ ti,sci = <&sms>;
+ ti,sci-dev-id = <148>;
+- ti,interrupt-ranges = <8 360 56>;
++ ti,interrupt-ranges = <8 392 56>;
+ };
+
+ main_pmx0: pinctrl@11c000 {
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+index 4d1bfabd1313a..f0644851602cd 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+@@ -65,7 +65,7 @@
+ #interrupt-cells = <1>;
+ ti,sci = <&sms>;
+ ti,sci-dev-id = <125>;
+- ti,interrupt-ranges = <16 928 16>;
++ ti,interrupt-ranges = <16 960 16>;
+ };
+
+ mcu_conf: syscon@40f00000 {
+diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
+index 8bd80508a710d..4b121dc0cfba2 100644
+--- a/arch/arm64/crypto/Kconfig
++++ b/arch/arm64/crypto/Kconfig
+@@ -96,6 +96,17 @@ config CRYPTO_SHA3_ARM64
+ Architecture: arm64 using:
+ - ARMv8.2 Crypto Extensions
+
++config CRYPTO_SM3_NEON
++ tristate "Hash functions: SM3 (NEON)"
++ depends on KERNEL_MODE_NEON
++ select CRYPTO_HASH
++ select CRYPTO_SM3
++ help
++ SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
++
++ Architecture: arm64 using:
++ - NEON (Advanced SIMD) extensions
++
+ config CRYPTO_SM3_ARM64_CE
+ tristate "Hash functions: SM3 (ARMv8.2 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
+index 24bb0c4610de2..087f1625e7751 100644
+--- a/arch/arm64/crypto/Makefile
++++ b/arch/arm64/crypto/Makefile
+@@ -17,6 +17,9 @@ sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o
+ obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o
+ sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o
+
++obj-$(CONFIG_CRYPTO_SM3_NEON) += sm3-neon.o
++sm3-neon-y := sm3-neon-glue.o sm3-neon-core.o
++
+ obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o
+ sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o
+
+diff --git a/arch/arm64/crypto/sm3-neon-core.S b/arch/arm64/crypto/sm3-neon-core.S
+new file mode 100644
+index 0000000000000..4357e0e51be38
+--- /dev/null
++++ b/arch/arm64/crypto/sm3-neon-core.S
+@@ -0,0 +1,601 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * sm3-neon-core.S - SM3 secure hash using NEON instructions
++ *
++ * Linux/arm64 port of the libgcrypt SM3 implementation for AArch64
++ *
++ * Copyright (C) 2021 Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ * Copyright (c) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
++ */
++
++#include <linux/linkage.h>
++#include <linux/cfi_types.h>
++#include <asm/assembler.h>
++
++/* Context structure */
++
++#define state_h0 0
++#define state_h1 4
++#define state_h2 8
++#define state_h3 12
++#define state_h4 16
++#define state_h5 20
++#define state_h6 24
++#define state_h7 28
++
++/* Stack structure */
++
++#define STACK_W_SIZE (32 * 2 * 3)
++
++#define STACK_W (0)
++#define STACK_SIZE (STACK_W + STACK_W_SIZE)
++
++/* Register macros */
++
++#define RSTATE x0
++#define RDATA x1
++#define RNBLKS x2
++#define RKPTR x28
++#define RFRAME x29
++
++#define ra w3
++#define rb w4
++#define rc w5
++#define rd w6
++#define re w7
++#define rf w8
++#define rg w9
++#define rh w10
++
++#define t0 w11
++#define t1 w12
++#define t2 w13
++#define t3 w14
++#define t4 w15
++#define t5 w16
++#define t6 w17
++
++#define k_even w19
++#define k_odd w20
++
++#define addr0 x21
++#define addr1 x22
++
++#define s0 w23
++#define s1 w24
++#define s2 w25
++#define s3 w26
++
++#define W0 v0
++#define W1 v1
++#define W2 v2
++#define W3 v3
++#define W4 v4
++#define W5 v5
++
++#define XTMP0 v6
++#define XTMP1 v7
++#define XTMP2 v16
++#define XTMP3 v17
++#define XTMP4 v18
++#define XTMP5 v19
++#define XTMP6 v20
++
++/* Helper macros. */
++
++#define _(...) /*_*/
++
++#define clear_vec(x) \
++ movi x.8h, #0;
++
++#define rolw(o, a, n) \
++ ror o, a, #(32 - n);
++
++/* Round function macros. */
++
++#define GG1_1(x, y, z, o, t) \
++ eor o, x, y;
++#define GG1_2(x, y, z, o, t) \
++ eor o, o, z;
++#define GG1_3(x, y, z, o, t)
++
++#define FF1_1(x, y, z, o, t) GG1_1(x, y, z, o, t)
++#define FF1_2(x, y, z, o, t)
++#define FF1_3(x, y, z, o, t) GG1_2(x, y, z, o, t)
++
++#define GG2_1(x, y, z, o, t) \
++ bic o, z, x;
++#define GG2_2(x, y, z, o, t) \
++ and t, y, x;
++#define GG2_3(x, y, z, o, t) \
++ eor o, o, t;
++
++#define FF2_1(x, y, z, o, t) \
++ eor o, x, y;
++#define FF2_2(x, y, z, o, t) \
++ and t, x, y; \
++ and o, o, z;
++#define FF2_3(x, y, z, o, t) \
++ eor o, o, t;
++
++#define R(i, a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
++ K_LOAD(round); \
++ ldr t5, [sp, #(wtype##_W1_ADDR(round, widx))]; \
++ rolw(t0, a, 12); /* rol(a, 12) => t0 */ \
++ IOP(1, iop_param); \
++ FF##i##_1(a, b, c, t1, t2); \
++ ldr t6, [sp, #(wtype##_W1W2_ADDR(round, widx))]; \
++ add k, k, e; \
++ IOP(2, iop_param); \
++ GG##i##_1(e, f, g, t3, t4); \
++ FF##i##_2(a, b, c, t1, t2); \
++ IOP(3, iop_param); \
++ add k, k, t0; \
++ add h, h, t5; \
++ add d, d, t6; /* w1w2 + d => d */ \
++ IOP(4, iop_param); \
++ rolw(k, k, 7); /* rol (t0 + e + t), 7) => k */ \
++ GG##i##_2(e, f, g, t3, t4); \
++ add h, h, k; /* h + w1 + k => h */ \
++ IOP(5, iop_param); \
++ FF##i##_3(a, b, c, t1, t2); \
++ eor t0, t0, k; /* k ^ t0 => t0 */ \
++ GG##i##_3(e, f, g, t3, t4); \
++ add d, d, t1; /* FF(a,b,c) + d => d */ \
++ IOP(6, iop_param); \
++ add t3, t3, h; /* GG(e,f,g) + h => t3 */ \
++ rolw(b, b, 9); /* rol(b, 9) => b */ \
++ eor h, t3, t3, ror #(32-9); \
++ IOP(7, iop_param); \
++ add d, d, t0; /* t0 + d => d */ \
++ rolw(f, f, 19); /* rol(f, 19) => f */ \
++ IOP(8, iop_param); \
++ eor h, h, t3, ror #(32-17); /* P0(t3) => h */
++
++#define R1(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
++ R(1, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param)
++
++#define R2(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
++ R(2, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param)
++
++#define KL(round) \
++ ldp k_even, k_odd, [RKPTR, #(4*(round))];
++
++/* Input expansion macros. */
++
++/* Byte-swapped input address. */
++#define IW_W_ADDR(round, widx, offs) \
++ (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))
++
++/* Expanded input address. */
++#define XW_W_ADDR(round, widx, offs) \
++ (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))
++
++/* Rounds 1-12, byte-swapped input block addresses. */
++#define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 32)
++#define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 48)
++
++/* Rounds 1-12, expanded input block addresses. */
++#define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0)
++#define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 16)
++
++/* Input block loading.
++ * Interleaving within round function needed for in-order CPUs. */
++#define LOAD_W_VEC_1_1() \
++ add addr0, sp, #IW_W1_ADDR(0, 0);
++#define LOAD_W_VEC_1_2() \
++ add addr1, sp, #IW_W1_ADDR(4, 0);
++#define LOAD_W_VEC_1_3() \
++ ld1 {W0.16b}, [RDATA], #16;
++#define LOAD_W_VEC_1_4() \
++ ld1 {W1.16b}, [RDATA], #16;
++#define LOAD_W_VEC_1_5() \
++ ld1 {W2.16b}, [RDATA], #16;
++#define LOAD_W_VEC_1_6() \
++ ld1 {W3.16b}, [RDATA], #16;
++#define LOAD_W_VEC_1_7() \
++ rev32 XTMP0.16b, W0.16b;
++#define LOAD_W_VEC_1_8() \
++ rev32 XTMP1.16b, W1.16b;
++#define LOAD_W_VEC_2_1() \
++ rev32 XTMP2.16b, W2.16b;
++#define LOAD_W_VEC_2_2() \
++ rev32 XTMP3.16b, W3.16b;
++#define LOAD_W_VEC_2_3() \
++ eor XTMP4.16b, XTMP1.16b, XTMP0.16b;
++#define LOAD_W_VEC_2_4() \
++ eor XTMP5.16b, XTMP2.16b, XTMP1.16b;
++#define LOAD_W_VEC_2_5() \
++ st1 {XTMP0.16b}, [addr0], #16;
++#define LOAD_W_VEC_2_6() \
++ st1 {XTMP4.16b}, [addr0]; \
++ add addr0, sp, #IW_W1_ADDR(8, 0);
++#define LOAD_W_VEC_2_7() \
++ eor XTMP6.16b, XTMP3.16b, XTMP2.16b;
++#define LOAD_W_VEC_2_8() \
++ ext W0.16b, XTMP0.16b, XTMP0.16b, #8; /* W0: xx, w0, xx, xx */
++#define LOAD_W_VEC_3_1() \
++ mov W2.16b, XTMP1.16b; /* W2: xx, w6, w5, w4 */
++#define LOAD_W_VEC_3_2() \
++ st1 {XTMP1.16b}, [addr1], #16;
++#define LOAD_W_VEC_3_3() \
++ st1 {XTMP5.16b}, [addr1]; \
++ ext W1.16b, XTMP0.16b, XTMP0.16b, #4; /* W1: xx, w3, w2, w1 */
++#define LOAD_W_VEC_3_4() \
++ ext W3.16b, XTMP1.16b, XTMP2.16b, #12; /* W3: xx, w9, w8, w7 */
++#define LOAD_W_VEC_3_5() \
++ ext W4.16b, XTMP2.16b, XTMP3.16b, #8; /* W4: xx, w12, w11, w10 */
++#define LOAD_W_VEC_3_6() \
++ st1 {XTMP2.16b}, [addr0], #16;
++#define LOAD_W_VEC_3_7() \
++ st1 {XTMP6.16b}, [addr0];
++#define LOAD_W_VEC_3_8() \
++ ext W5.16b, XTMP3.16b, XTMP3.16b, #4; /* W5: xx, w15, w14, w13 */
++
++#define LOAD_W_VEC_1(iop_num, ...) \
++ LOAD_W_VEC_1_##iop_num()
++#define LOAD_W_VEC_2(iop_num, ...) \
++ LOAD_W_VEC_2_##iop_num()
++#define LOAD_W_VEC_3(iop_num, ...) \
++ LOAD_W_VEC_3_##iop_num()
++
++/* Message scheduling. Note: 3 words per vector register.
++ * Interleaving within round function needed for in-order CPUs. */
++#define SCHED_W_1_1(round, w0, w1, w2, w3, w4, w5) \
++ /* Load (w[i - 16]) => XTMP0 */ \
++ /* Load (w[i - 13]) => XTMP5 */ \
++ ext XTMP0.16b, w0.16b, w0.16b, #12; /* XTMP0: w0, xx, xx, xx */
++#define SCHED_W_1_2(round, w0, w1, w2, w3, w4, w5) \
++ ext XTMP5.16b, w1.16b, w1.16b, #12;
++#define SCHED_W_1_3(round, w0, w1, w2, w3, w4, w5) \
++ ext XTMP0.16b, XTMP0.16b, w1.16b, #12; /* XTMP0: xx, w2, w1, w0 */
++#define SCHED_W_1_4(round, w0, w1, w2, w3, w4, w5) \
++ ext XTMP5.16b, XTMP5.16b, w2.16b, #12;
++#define SCHED_W_1_5(round, w0, w1, w2, w3, w4, w5) \
++ /* w[i - 9] == w3 */ \
++ /* W3 ^ XTMP0 => XTMP0 */ \
++ eor XTMP0.16b, XTMP0.16b, w3.16b;
++#define SCHED_W_1_6(round, w0, w1, w2, w3, w4, w5) \
++ /* w[i - 3] == w5 */ \
++ /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \
++ /* rol(XTMP5, 7) => XTMP1 */ \
++ add addr0, sp, #XW_W1_ADDR((round), 0); \
++ shl XTMP2.4s, w5.4s, #15;
++#define SCHED_W_1_7(round, w0, w1, w2, w3, w4, w5) \
++ shl XTMP1.4s, XTMP5.4s, #7;
++#define SCHED_W_1_8(round, w0, w1, w2, w3, w4, w5) \
++ sri XTMP2.4s, w5.4s, #(32-15);
++#define SCHED_W_2_1(round, w0, w1, w2, w3, w4, w5) \
++ sri XTMP1.4s, XTMP5.4s, #(32-7);
++#define SCHED_W_2_2(round, w0, w1, w2, w3, w4, w5) \
++ eor XTMP0.16b, XTMP0.16b, XTMP2.16b;
++#define SCHED_W_2_3(round, w0, w1, w2, w3, w4, w5) \
++ /* w[i - 6] == W4 */ \
++ /* W4 ^ XTMP1 => XTMP1 */ \
++ eor XTMP1.16b, XTMP1.16b, w4.16b;
++#define SCHED_W_2_4(round, w0, w1, w2, w3, w4, w5) \
++ /* P1(XTMP0) ^ XTMP1 => W0 */ \
++ shl XTMP3.4s, XTMP0.4s, #15;
++#define SCHED_W_2_5(round, w0, w1, w2, w3, w4, w5) \
++ shl XTMP4.4s, XTMP0.4s, #23;
++#define SCHED_W_2_6(round, w0, w1, w2, w3, w4, w5) \
++ eor w0.16b, XTMP1.16b, XTMP0.16b;
++#define SCHED_W_2_7(round, w0, w1, w2, w3, w4, w5) \
++ sri XTMP3.4s, XTMP0.4s, #(32-15);
++#define SCHED_W_2_8(round, w0, w1, w2, w3, w4, w5) \
++ sri XTMP4.4s, XTMP0.4s, #(32-23);
++#define SCHED_W_3_1(round, w0, w1, w2, w3, w4, w5) \
++ eor w0.16b, w0.16b, XTMP3.16b;
++#define SCHED_W_3_2(round, w0, w1, w2, w3, w4, w5) \
++ /* Load (w[i - 3]) => XTMP2 */ \
++ ext XTMP2.16b, w4.16b, w4.16b, #12;
++#define SCHED_W_3_3(round, w0, w1, w2, w3, w4, w5) \
++ eor w0.16b, w0.16b, XTMP4.16b;
++#define SCHED_W_3_4(round, w0, w1, w2, w3, w4, w5) \
++ ext XTMP2.16b, XTMP2.16b, w5.16b, #12;
++#define SCHED_W_3_5(round, w0, w1, w2, w3, w4, w5) \
++ /* W1 ^ W2 => XTMP3 */ \
++ eor XTMP3.16b, XTMP2.16b, w0.16b;
++#define SCHED_W_3_6(round, w0, w1, w2, w3, w4, w5)
++#define SCHED_W_3_7(round, w0, w1, w2, w3, w4, w5) \
++ st1 {XTMP2.16b-XTMP3.16b}, [addr0];
++#define SCHED_W_3_8(round, w0, w1, w2, w3, w4, w5)
++
++#define SCHED_W_W0W1W2W3W4W5_1(iop_num, round) \
++ SCHED_W_1_##iop_num(round, W0, W1, W2, W3, W4, W5)
++#define SCHED_W_W0W1W2W3W4W5_2(iop_num, round) \
++ SCHED_W_2_##iop_num(round, W0, W1, W2, W3, W4, W5)
++#define SCHED_W_W0W1W2W3W4W5_3(iop_num, round) \
++ SCHED_W_3_##iop_num(round, W0, W1, W2, W3, W4, W5)
++
++#define SCHED_W_W1W2W3W4W5W0_1(iop_num, round) \
++ SCHED_W_1_##iop_num(round, W1, W2, W3, W4, W5, W0)
++#define SCHED_W_W1W2W3W4W5W0_2(iop_num, round) \
++ SCHED_W_2_##iop_num(round, W1, W2, W3, W4, W5, W0)
++#define SCHED_W_W1W2W3W4W5W0_3(iop_num, round) \
++ SCHED_W_3_##iop_num(round, W1, W2, W3, W4, W5, W0)
++
++#define SCHED_W_W2W3W4W5W0W1_1(iop_num, round) \
++ SCHED_W_1_##iop_num(round, W2, W3, W4, W5, W0, W1)
++#define SCHED_W_W2W3W4W5W0W1_2(iop_num, round) \
++ SCHED_W_2_##iop_num(round, W2, W3, W4, W5, W0, W1)
++#define SCHED_W_W2W3W4W5W0W1_3(iop_num, round) \
++ SCHED_W_3_##iop_num(round, W2, W3, W4, W5, W0, W1)
++
++#define SCHED_W_W3W4W5W0W1W2_1(iop_num, round) \
++ SCHED_W_1_##iop_num(round, W3, W4, W5, W0, W1, W2)
++#define SCHED_W_W3W4W5W0W1W2_2(iop_num, round) \
++ SCHED_W_2_##iop_num(round, W3, W4, W5, W0, W1, W2)
++#define SCHED_W_W3W4W5W0W1W2_3(iop_num, round) \
++ SCHED_W_3_##iop_num(round, W3, W4, W5, W0, W1, W2)
++
++#define SCHED_W_W4W5W0W1W2W3_1(iop_num, round) \
++ SCHED_W_1_##iop_num(round, W4, W5, W0, W1, W2, W3)
++#define SCHED_W_W4W5W0W1W2W3_2(iop_num, round) \
++ SCHED_W_2_##iop_num(round, W4, W5, W0, W1, W2, W3)
++#define SCHED_W_W4W5W0W1W2W3_3(iop_num, round) \
++ SCHED_W_3_##iop_num(round, W4, W5, W0, W1, W2, W3)
++
++#define SCHED_W_W5W0W1W2W3W4_1(iop_num, round) \
++ SCHED_W_1_##iop_num(round, W5, W0, W1, W2, W3, W4)
++#define SCHED_W_W5W0W1W2W3W4_2(iop_num, round) \
++ SCHED_W_2_##iop_num(round, W5, W0, W1, W2, W3, W4)
++#define SCHED_W_W5W0W1W2W3W4_3(iop_num, round) \
++ SCHED_W_3_##iop_num(round, W5, W0, W1, W2, W3, W4)
++
++
++ /*
++ * Transform blocks*64 bytes (blocks*16 32-bit words) at 'src'.
++ *
++ * void sm3_neon_transform(struct sm3_state *sst, u8 const *src,
++ * int blocks)
++ */
++ .text
++.align 3
++SYM_TYPED_FUNC_START(sm3_neon_transform)
++ ldp ra, rb, [RSTATE, #0]
++ ldp rc, rd, [RSTATE, #8]
++ ldp re, rf, [RSTATE, #16]
++ ldp rg, rh, [RSTATE, #24]
++
++ stp x28, x29, [sp, #-16]!
++ stp x19, x20, [sp, #-16]!
++ stp x21, x22, [sp, #-16]!
++ stp x23, x24, [sp, #-16]!
++ stp x25, x26, [sp, #-16]!
++ mov RFRAME, sp
++
++ sub addr0, sp, #STACK_SIZE
++ adr_l RKPTR, .LKtable
++ and sp, addr0, #(~63)
++
++ /* Preload first block. */
++ LOAD_W_VEC_1(1, 0)
++ LOAD_W_VEC_1(2, 0)
++ LOAD_W_VEC_1(3, 0)
++ LOAD_W_VEC_1(4, 0)
++ LOAD_W_VEC_1(5, 0)
++ LOAD_W_VEC_1(6, 0)
++ LOAD_W_VEC_1(7, 0)
++ LOAD_W_VEC_1(8, 0)
++ LOAD_W_VEC_2(1, 0)
++ LOAD_W_VEC_2(2, 0)
++ LOAD_W_VEC_2(3, 0)
++ LOAD_W_VEC_2(4, 0)
++ LOAD_W_VEC_2(5, 0)
++ LOAD_W_VEC_2(6, 0)
++ LOAD_W_VEC_2(7, 0)
++ LOAD_W_VEC_2(8, 0)
++ LOAD_W_VEC_3(1, 0)
++ LOAD_W_VEC_3(2, 0)
++ LOAD_W_VEC_3(3, 0)
++ LOAD_W_VEC_3(4, 0)
++ LOAD_W_VEC_3(5, 0)
++ LOAD_W_VEC_3(6, 0)
++ LOAD_W_VEC_3(7, 0)
++ LOAD_W_VEC_3(8, 0)
++
++.balign 16
++.Loop:
++ /* Transform 0-3 */
++ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0)
++ R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0)
++ R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0)
++ R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0)
++
++ /* Transform 4-7 + Precalc 12-14 */
++ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0)
++ R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 5, 1, IW, _, 0)
++ R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12)
++ R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 7, 3, IW, SCHED_W_W0W1W2W3W4W5_2, 12)
++
++ /* Transform 8-11 + Precalc 12-17 */
++ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 8, 0, IW, SCHED_W_W0W1W2W3W4W5_3, 12)
++ R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 9, 1, IW, SCHED_W_W1W2W3W4W5W0_1, 15)
++ R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 10, 2, IW, SCHED_W_W1W2W3W4W5W0_2, 15)
++ R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 11, 3, IW, SCHED_W_W1W2W3W4W5W0_3, 15)
++
++ /* Transform 12-14 + Precalc 18-20 */
++ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 12, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 18)
++ R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 13, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 18)
++ R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 14, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 18)
++
++ /* Transform 15-17 + Precalc 21-23 */
++ R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 15, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 21)
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 16, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 21)
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 17, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 21)
++
++ /* Transform 18-20 + Precalc 24-26 */
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 18, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 24)
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 19, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 24)
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 20, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 24)
++
++ /* Transform 21-23 + Precalc 27-29 */
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 21, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 27)
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 22, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 27)
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 23, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 27)
++
++ /* Transform 24-26 + Precalc 30-32 */
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 24, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 30)
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 25, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 30)
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 26, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 30)
++
++ /* Transform 27-29 + Precalc 33-35 */
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 27, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 33)
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 28, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 33)
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 29, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 33)
++
++ /* Transform 30-32 + Precalc 36-38 */
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 30, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 36)
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 31, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 36)
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 32, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 36)
++
++ /* Transform 33-35 + Precalc 39-41 */
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 33, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 39)
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 34, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 39)
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 35, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 39)
++
++ /* Transform 36-38 + Precalc 42-44 */
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 36, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 42)
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 37, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 42)
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 38, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 42)
++
++ /* Transform 39-41 + Precalc 45-47 */
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 39, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 45)
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 40, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 45)
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 41, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 45)
++
++ /* Transform 42-44 + Precalc 48-50 */
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 42, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 48)
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 43, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 48)
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 44, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 48)
++
++ /* Transform 45-47 + Precalc 51-53 */
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 45, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 51)
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 46, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 51)
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 47, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 51)
++
++ /* Transform 48-50 + Precalc 54-56 */
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 48, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 54)
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 49, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 54)
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 50, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 54)
++
++ /* Transform 51-53 + Precalc 57-59 */
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 51, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 57)
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 52, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 57)
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 53, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 57)
++
++ /* Transform 54-56 + Precalc 60-62 */
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 54, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 60)
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 55, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 60)
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 56, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 60)
++
++ /* Transform 57-59 + Precalc 63 */
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 57, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 63)
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 58, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 63)
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 59, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 63)
++
++ /* Transform 60 */
++ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 60, 0, XW, _, _)
++ subs RNBLKS, RNBLKS, #1
++ b.eq .Lend
++
++ /* Transform 61-63 + Preload next block */
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, LOAD_W_VEC_1, _)
++ ldp s0, s1, [RSTATE, #0]
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, LOAD_W_VEC_2, _)
++ ldp s2, s3, [RSTATE, #8]
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, LOAD_W_VEC_3, _)
++
++ /* Update the chaining variables. */
++ eor ra, ra, s0
++ eor rb, rb, s1
++ ldp s0, s1, [RSTATE, #16]
++ eor rc, rc, s2
++ ldp k_even, k_odd, [RSTATE, #24]
++ eor rd, rd, s3
++ eor re, re, s0
++ stp ra, rb, [RSTATE, #0]
++ eor rf, rf, s1
++ stp rc, rd, [RSTATE, #8]
++ eor rg, rg, k_even
++ stp re, rf, [RSTATE, #16]
++ eor rh, rh, k_odd
++ stp rg, rh, [RSTATE, #24]
++ b .Loop
++
++.Lend:
++ /* Transform 61-63 */
++ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, _, _)
++ ldp s0, s1, [RSTATE, #0]
++ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, _, _)
++ ldp s2, s3, [RSTATE, #8]
++ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, _, _)
++
++ /* Update the chaining variables. */
++ eor ra, ra, s0
++ clear_vec(W0)
++ eor rb, rb, s1
++ clear_vec(W1)
++ ldp s0, s1, [RSTATE, #16]
++ clear_vec(W2)
++ eor rc, rc, s2
++ clear_vec(W3)
++ ldp k_even, k_odd, [RSTATE, #24]
++ clear_vec(W4)
++ eor rd, rd, s3
++ clear_vec(W5)
++ eor re, re, s0
++ clear_vec(XTMP0)
++ stp ra, rb, [RSTATE, #0]
++ clear_vec(XTMP1)
++ eor rf, rf, s1
++ clear_vec(XTMP2)
++ stp rc, rd, [RSTATE, #8]
++ clear_vec(XTMP3)
++ eor rg, rg, k_even
++ clear_vec(XTMP4)
++ stp re, rf, [RSTATE, #16]
++ clear_vec(XTMP5)
++ eor rh, rh, k_odd
++ clear_vec(XTMP6)
++ stp rg, rh, [RSTATE, #24]
++
++ /* Clear message expansion area */
++ add addr0, sp, #STACK_W
++ st1 {W0.16b-W3.16b}, [addr0], #64
++ st1 {W0.16b-W3.16b}, [addr0], #64
++ st1 {W0.16b-W3.16b}, [addr0]
++
++ mov sp, RFRAME
++
++ ldp x25, x26, [sp], #16
++ ldp x23, x24, [sp], #16
++ ldp x21, x22, [sp], #16
++ ldp x19, x20, [sp], #16
++ ldp x28, x29, [sp], #16
++
++ ret
++SYM_FUNC_END(sm3_neon_transform)
++
++
++ .section ".rodata", "a"
++
++ .align 4
++.LKtable:
++ .long 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb
++ .long 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc
++ .long 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce
++ .long 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6
++ .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c
++ .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce
++ .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec
++ .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
++ .long 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53
++ .long 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d
++ .long 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4
++ .long 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43
++ .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c
++ .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce
++ .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec
++ .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
+diff --git a/arch/arm64/crypto/sm3-neon-glue.c b/arch/arm64/crypto/sm3-neon-glue.c
+new file mode 100644
+index 0000000000000..7182ee683f14a
+--- /dev/null
++++ b/arch/arm64/crypto/sm3-neon-glue.c
+@@ -0,0 +1,103 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * sm3-neon-glue.c - SM3 secure hash using NEON instructions
++ *
++ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
++ */
++
++#include <asm/neon.h>
++#include <asm/simd.h>
++#include <asm/unaligned.h>
++#include <crypto/internal/hash.h>
++#include <crypto/internal/simd.h>
++#include <crypto/sm3.h>
++#include <crypto/sm3_base.h>
++#include <linux/cpufeature.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++
++asmlinkage void sm3_neon_transform(struct sm3_state *sst, u8 const *src,
++ int blocks);
++
++static int sm3_neon_update(struct shash_desc *desc, const u8 *data,
++ unsigned int len)
++{
++ if (!crypto_simd_usable()) {
++ sm3_update(shash_desc_ctx(desc), data, len);
++ return 0;
++ }
++
++ kernel_neon_begin();
++ sm3_base_do_update(desc, data, len, sm3_neon_transform);
++ kernel_neon_end();
++
++ return 0;
++}
++
++static int sm3_neon_final(struct shash_desc *desc, u8 *out)
++{
++ if (!crypto_simd_usable()) {
++ sm3_final(shash_desc_ctx(desc), out);
++ return 0;
++ }
++
++ kernel_neon_begin();
++ sm3_base_do_finalize(desc, sm3_neon_transform);
++ kernel_neon_end();
++
++ return sm3_base_finish(desc, out);
++}
++
++static int sm3_neon_finup(struct shash_desc *desc, const u8 *data,
++ unsigned int len, u8 *out)
++{
++ if (!crypto_simd_usable()) {
++ struct sm3_state *sctx = shash_desc_ctx(desc);
++
++ if (len)
++ sm3_update(sctx, data, len);
++ sm3_final(sctx, out);
++ return 0;
++ }
++
++ kernel_neon_begin();
++ if (len)
++ sm3_base_do_update(desc, data, len, sm3_neon_transform);
++ sm3_base_do_finalize(desc, sm3_neon_transform);
++ kernel_neon_end();
++
++ return sm3_base_finish(desc, out);
++}
++
++static struct shash_alg sm3_alg = {
++ .digestsize = SM3_DIGEST_SIZE,
++ .init = sm3_base_init,
++ .update = sm3_neon_update,
++ .final = sm3_neon_final,
++ .finup = sm3_neon_finup,
++ .descsize = sizeof(struct sm3_state),
++ .base.cra_name = "sm3",
++ .base.cra_driver_name = "sm3-neon",
++ .base.cra_blocksize = SM3_BLOCK_SIZE,
++ .base.cra_module = THIS_MODULE,
++ .base.cra_priority = 200,
++};
++
++static int __init sm3_neon_init(void)
++{
++ return crypto_register_shash(&sm3_alg);
++}
++
++static void __exit sm3_neon_fini(void)
++{
++ crypto_unregister_shash(&sm3_alg);
++}
++
++module_init(sm3_neon_init);
++module_exit(sm3_neon_fini);
++
++MODULE_DESCRIPTION("SM3 secure hash using NEON instructions");
++MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
++MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
++MODULE_LICENSE("GPL v2");
+diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
+index 445aa3af3b762..400f8956328b9 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -308,13 +308,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
+ }
+ #endif
+
+-static inline bool is_ttbr0_addr(unsigned long addr)
++static __always_inline bool is_ttbr0_addr(unsigned long addr)
+ {
+ /* entry assembly clears tags for TTBR0 addrs */
+ return addr < TASK_SIZE;
+ }
+
+-static inline bool is_ttbr1_addr(unsigned long addr)
++static __always_inline bool is_ttbr1_addr(unsigned long addr)
+ {
+ /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 5b391490e045b..74f76514a48d0 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -353,6 +353,11 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned long esr)
+ return false;
+ }
+
++static bool is_translation_fault(unsigned long esr)
++{
++ return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
++}
++
+ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
+ struct pt_regs *regs)
+ {
+@@ -385,7 +390,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
+ } else if (addr < PAGE_SIZE) {
+ msg = "NULL pointer dereference";
+ } else {
+- if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
++ if (is_translation_fault(esr) &&
++ kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
+ return;
+
+ msg = "paging request";
+diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
+index 6e6756e8fa0a9..86a6e25908664 100644
+--- a/arch/mips/bcm63xx/clk.c
++++ b/arch/mips/bcm63xx/clk.c
+@@ -361,6 +361,8 @@ static struct clk clk_periph = {
+ */
+ int clk_enable(struct clk *clk)
+ {
++ if (!clk)
++ return 0;
+ mutex_lock(&clocks_mutex);
+ clk_enable_unlocked(clk);
+ mutex_unlock(&clocks_mutex);
+diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
+index 37c46720c719a..f38c39572a9e8 100644
+--- a/arch/mips/boot/dts/ingenic/ci20.dts
++++ b/arch/mips/boot/dts/ingenic/ci20.dts
+@@ -438,7 +438,7 @@
+ ingenic,nemc-tAW = <50>;
+ ingenic,nemc-tSTRV = <100>;
+
+- reset-gpios = <&gpf 12 GPIO_ACTIVE_HIGH>;
++ reset-gpios = <&gpf 12 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&eth0_power>;
+
+ interrupt-parent = <&gpe>;
+diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+index d09d0769f5496..0fd9ac76eb742 100644
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+@@ -211,7 +211,7 @@ union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port)
+ {
+ union cvmx_helper_link_info result;
+
+- WARN(!octeon_is_simulation(),
++ WARN_ONCE(!octeon_is_simulation(),
+ "Using deprecated link status - please update your DT");
+
+ /* Unless we fix it later, all links are defaulted to down */
+diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+index 6f49fd9be1f3c..9abfc4bf9bd83 100644
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+@@ -1096,7 +1096,7 @@ union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
+ if (index == 0)
+ result = __cvmx_helper_rgmii_link_get(ipd_port);
+ else {
+- WARN(1, "Using deprecated link status - please update your DT");
++ WARN_ONCE(1, "Using deprecated link status - please update your DT");
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h
+index 6bbf082dd149e..79d5bb0e06d63 100644
+--- a/arch/mips/include/asm/mach-ralink/mt7621.h
++++ b/arch/mips/include/asm/mach-ralink/mt7621.h
+@@ -7,10 +7,12 @@
+ #ifndef _MT7621_REGS_H_
+ #define _MT7621_REGS_H_
+
++#define IOMEM(x) ((void __iomem *)(KSEG1ADDR(x)))
++
+ #define MT7621_PALMBUS_BASE 0x1C000000
+ #define MT7621_PALMBUS_SIZE 0x03FFFFFF
+
+-#define MT7621_SYSC_BASE 0x1E000000
++#define MT7621_SYSC_BASE IOMEM(0x1E000000)
+
+ #define SYSC_REG_CHIP_NAME0 0x00
+ #define SYSC_REG_CHIP_NAME1 0x04
+diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c
+index e673603e11e5d..92140edb3ce3e 100644
+--- a/arch/mips/kernel/vpe-cmp.c
++++ b/arch/mips/kernel/vpe-cmp.c
+@@ -75,7 +75,6 @@ ATTRIBUTE_GROUPS(vpe);
+
+ static void vpe_device_release(struct device *cd)
+ {
+- kfree(cd);
+ }
+
+ static struct class vpe_class = {
+@@ -157,6 +156,7 @@ out_dev:
+ device_del(&vpe_device);
+
+ out_class:
++ put_device(&vpe_device);
+ class_unregister(&vpe_class);
+
+ out_chrdev:
+@@ -169,7 +169,7 @@ void __exit vpe_module_exit(void)
+ {
+ struct vpe *v, *n;
+
+- device_del(&vpe_device);
++ device_unregister(&vpe_device);
+ class_unregister(&vpe_class);
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
+index bad6b0891b2b5..84a82b551ec35 100644
+--- a/arch/mips/kernel/vpe-mt.c
++++ b/arch/mips/kernel/vpe-mt.c
+@@ -313,7 +313,6 @@ ATTRIBUTE_GROUPS(vpe);
+
+ static void vpe_device_release(struct device *cd)
+ {
+- kfree(cd);
+ }
+
+ static struct class vpe_class = {
+@@ -497,6 +496,7 @@ out_dev:
+ device_del(&vpe_device);
+
+ out_class:
++ put_device(&vpe_device);
+ class_unregister(&vpe_class);
+
+ out_chrdev:
+@@ -509,7 +509,7 @@ void __exit vpe_module_exit(void)
+ {
+ struct vpe *v, *n;
+
+- device_del(&vpe_device);
++ device_unregister(&vpe_device);
+ class_unregister(&vpe_class);
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
+index fb0565bc34fda..bbf5811afbf2c 100644
+--- a/arch/mips/ralink/mt7621.c
++++ b/arch/mips/ralink/mt7621.c
+@@ -25,6 +25,7 @@
+ #define MT7621_MEM_TEST_PATTERN 0xaa5555aa
+
+ static u32 detect_magic __initdata;
++static struct ralink_soc_info *soc_info_ptr;
+
+ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+ {
+@@ -97,41 +98,83 @@ void __init ralink_of_remap(void)
+ panic("Failed to remap core resources");
+ }
+
+-static void soc_dev_init(struct ralink_soc_info *soc_info, u32 rev)
++static unsigned int __init mt7621_get_soc_name0(void)
++{
++ return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME0);
++}
++
++static unsigned int __init mt7621_get_soc_name1(void)
++{
++ return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME1);
++}
++
++static bool __init mt7621_soc_valid(void)
++{
++ if (mt7621_get_soc_name0() == MT7621_CHIP_NAME0 &&
++ mt7621_get_soc_name1() == MT7621_CHIP_NAME1)
++ return true;
++ else
++ return false;
++}
++
++static const char __init *mt7621_get_soc_id(void)
++{
++ if (mt7621_soc_valid())
++ return "MT7621";
++ else
++ return "invalid";
++}
++
++static unsigned int __init mt7621_get_soc_rev(void)
++{
++ return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_REV);
++}
++
++static unsigned int __init mt7621_get_soc_ver(void)
++{
++ return (mt7621_get_soc_rev() >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK;
++}
++
++static unsigned int __init mt7621_get_soc_eco(void)
++{
++ return (mt7621_get_soc_rev() & CHIP_REV_ECO_MASK);
++}
++
++static const char __init *mt7621_get_soc_revision(void)
++{
++ if (mt7621_get_soc_rev() == 1 && mt7621_get_soc_eco() == 1)
++ return "E2";
++ else
++ return "E1";
++}
++
++static int __init mt7621_soc_dev_init(void)
+ {
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+- return;
++ return -ENOMEM;
+
+ soc_dev_attr->soc_id = "mt7621";
+ soc_dev_attr->family = "Ralink";
++ soc_dev_attr->revision = mt7621_get_soc_revision();
+
+- if (((rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK) == 1 &&
+- (rev & CHIP_REV_ECO_MASK) == 1)
+- soc_dev_attr->revision = "E2";
+- else
+- soc_dev_attr->revision = "E1";
+-
+- soc_dev_attr->data = soc_info;
++ soc_dev_attr->data = soc_info_ptr;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+- return;
++ return PTR_ERR(soc_dev);
+ }
++
++ return 0;
+ }
++device_initcall(mt7621_soc_dev_init);
+
+ void __init prom_soc_init(struct ralink_soc_info *soc_info)
+ {
+- void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE);
+- unsigned char *name = NULL;
+- u32 n0;
+- u32 n1;
+- u32 rev;
+-
+ /* Early detection of CMP support */
+ mips_cm_probe();
+ mips_cpc_probe();
+@@ -154,27 +197,23 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info)
+ __sync();
+ }
+
+- n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+- n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+-
+- if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) {
+- name = "MT7621";
++ if (mt7621_soc_valid())
+ soc_info->compatible = "mediatek,mt7621-soc";
+- } else {
+- panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+- }
++ else
++ panic("mt7621: unknown SoC, n0:%08x n1:%08x\n",
++ mt7621_get_soc_name0(),
++ mt7621_get_soc_name1());
+ ralink_soc = MT762X_SOC_MT7621AT;
+- rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "MediaTek %s ver:%u eco:%u",
+- name,
+- (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
+- (rev & CHIP_REV_ECO_MASK));
++ mt7621_get_soc_id(),
++ mt7621_get_soc_ver(),
++ mt7621_get_soc_eco());
+
+ soc_info->mem_detect = mt7621_memory_detect;
+
+- soc_dev_init(soc_info, rev);
++ soc_info_ptr = soc_info;
+
+ if (!register_cps_smp_ops())
+ return;
+diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
+index ea8072acf8d94..01c132bc33d54 100644
+--- a/arch/mips/ralink/of.c
++++ b/arch/mips/ralink/of.c
+@@ -21,6 +21,7 @@
+ #include <asm/bootinfo.h>
+ #include <asm/addrspace.h>
+ #include <asm/prom.h>
++#include <asm/mach-ralink/ralink_regs.h>
+
+ #include "common.h"
+
+@@ -81,7 +82,8 @@ static int __init plat_of_setup(void)
+ __dt_register_buses(soc_info.compatible, "palmbus");
+
+ /* make sure that the reset controller is setup early */
+- ralink_rst_init();
++ if (ralink_soc != MT762X_SOC_MT7621AT)
++ ralink_rst_init();
+
+ return 0;
+ }
+diff --git a/arch/powerpc/boot/dts/turris1x.dts b/arch/powerpc/boot/dts/turris1x.dts
+index 045af668e9284..e9cda34a140e0 100644
+--- a/arch/powerpc/boot/dts/turris1x.dts
++++ b/arch/powerpc/boot/dts/turris1x.dts
+@@ -69,6 +69,20 @@
+ interrupt-parent = <&gpio>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>, /* GPIO12 - ALERT pin */
+ <13 IRQ_TYPE_LEVEL_LOW>; /* GPIO13 - CRIT pin */
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ /* Local temperature sensor (SA56004ED internal) */
++ channel@0 {
++ reg = <0>;
++ label = "board";
++ };
++
++ /* Remote temperature sensor (D+/D- connected to P2020 CPU Temperature Diode) */
++ channel@1 {
++ reg = <1>;
++ label = "cpu";
++ };
+ };
+
+ /* DDR3 SPD/EEPROM */
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index 8abae463f6c12..95fd7f9485d55 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -79,7 +79,7 @@
+ #define H_NOT_ENOUGH_RESOURCES -44
+ #define H_R_STATE -45
+ #define H_RESCINDED -46
+-#define H_P1 -54
++#define H_ABORTED -54
+ #define H_P2 -55
+ #define H_P3 -56
+ #define H_P4 -57
+@@ -100,7 +100,6 @@
+ #define H_COP_HW -74
+ #define H_STATE -75
+ #define H_IN_USE -77
+-#define H_ABORTED -78
+ #define H_UNSUPPORTED_FLAG_START -256
+ #define H_UNSUPPORTED_FLAG_END -511
+ #define H_MULTI_THREADS_ACTIVE -9005
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index e847f9b1c5b9f..767ab166933ba 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -889,6 +889,7 @@ void __noreturn rtas_halt(void)
+
+ /* Must be in the RMO region, so we place it here */
+ static char rtas_os_term_buf[2048];
++static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE;
+
+ void rtas_os_term(char *str)
+ {
+@@ -900,16 +901,20 @@ void rtas_os_term(char *str)
+ * this property may terminate the partition which we want to avoid
+ * since it interferes with panic_timeout.
+ */
+- if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
+- RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
++ if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE)
+ return;
+
+ snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
+
++ /*
++ * Keep calling as long as RTAS returns a "try again" status,
++ * but don't use rtas_busy_delay(), which potentially
++ * schedules.
++ */
+ do {
+- status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
++ status = rtas_call(ibm_os_term_token, 1, 1, NULL,
+ __pa(rtas_os_term_buf));
+- } while (rtas_busy_delay(status));
++ } while (rtas_busy_delay_time(status));
+
+ if (status != 0)
+ printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
+@@ -1277,6 +1282,13 @@ void __init rtas_initialize(void)
+ no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
+ rtas.entry = no_entry ? rtas.base : entry;
+
++ /*
++ * Discover these now to avoid device tree lookups in the
++ * panic path.
++ */
++ if (of_property_read_bool(rtas.dev, "ibm,extended-os-term"))
++ ibm_os_term_token = rtas_token("ibm,os-term");
++
+ /* If RTAS was found, allocate the RMO buffer for it and look for
+ * the stop-self token if any
+ */
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
+index 082f6d0308a47..8718289c051dd 100644
+--- a/arch/powerpc/perf/callchain.c
++++ b/arch/powerpc/perf/callchain.c
+@@ -61,6 +61,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
+ next_sp = fp[0];
+
+ if (next_sp == sp + STACK_INT_FRAME_SIZE &&
++ validate_sp(sp, current, STACK_INT_FRAME_SIZE) &&
+ fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
+ /*
+ * This looks like an interrupt frame for an
+diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
+index 8965b4463d433..5e86371a20c78 100644
+--- a/arch/powerpc/perf/hv-gpci-requests.h
++++ b/arch/powerpc/perf/hv-gpci-requests.h
+@@ -79,6 +79,7 @@ REQUEST(__field(0, 8, partition_id)
+ )
+ #include I(REQUEST_END)
+
++#ifdef ENABLE_EVENTS_COUNTERINFO_V6
+ /*
+ * Not available for counter_info_version >= 0x8, use
+ * run_instruction_cycles_by_partition(0x100) instead.
+@@ -92,6 +93,7 @@ REQUEST(__field(0, 8, partition_id)
+ __count(0x10, 8, cycles)
+ )
+ #include I(REQUEST_END)
++#endif
+
+ #define REQUEST_NAME system_performance_capabilities
+ #define REQUEST_NUM 0x40
+@@ -103,6 +105,7 @@ REQUEST(__field(0, 1, perf_collect_privileged)
+ )
+ #include I(REQUEST_END)
+
++#ifdef ENABLE_EVENTS_COUNTERINFO_V6
+ #define REQUEST_NAME processor_bus_utilization_abc_links
+ #define REQUEST_NUM 0x50
+ #define REQUEST_IDX_KIND "hw_chip_id=?"
+@@ -194,6 +197,7 @@ REQUEST(__field(0, 4, phys_processor_idx)
+ __count(0x28, 8, instructions_completed)
+ )
+ #include I(REQUEST_END)
++#endif
+
+ /* Processor_core_power_mode (0x95) skipped, no counters */
+ /* Affinity_domain_information_by_virtual_processor (0xA0) skipped,
+diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
+index 5eb60ed5b5e8a..7ff8ff3509f5f 100644
+--- a/arch/powerpc/perf/hv-gpci.c
++++ b/arch/powerpc/perf/hv-gpci.c
+@@ -70,9 +70,9 @@ static const struct attribute_group format_group = {
+ .attrs = format_attrs,
+ };
+
+-static const struct attribute_group event_group = {
++static struct attribute_group event_group = {
+ .name = "events",
+- .attrs = hv_gpci_event_attrs,
++ /* .attrs is set in init */
+ };
+
+ #define HV_CAPS_ATTR(_name, _format) \
+@@ -330,6 +330,7 @@ static int hv_gpci_init(void)
+ int r;
+ unsigned long hret;
+ struct hv_perf_caps caps;
++ struct hv_gpci_request_buffer *arg;
+
+ hv_gpci_assert_offsets_correct();
+
+@@ -353,6 +354,36 @@ static int hv_gpci_init(void)
+ /* sampling not supported */
+ h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
++ arg = (void *)get_cpu_var(hv_gpci_reqb);
++ memset(arg, 0, HGPCI_REQ_BUFFER_SIZE);
++
++ /*
++ * hcall H_GET_PERF_COUNTER_INFO populates the output
++ * counter_info_version value based on the system hypervisor.
++ * Pass the counter request 0x10 corresponds to request type
++ * 'Dispatch_timebase_by_processor', to get the supported
++ * counter_info_version.
++ */
++ arg->params.counter_request = cpu_to_be32(0x10);
++
++ r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
++ virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
++ if (r) {
++ pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r);
++ arg->params.counter_info_version_out = 0x8;
++ }
++
++ /*
++ * Use counter_info_version_out value to assign
++ * required hv-gpci event list.
++ */
++ if (arg->params.counter_info_version_out >= 0x8)
++ event_group.attrs = hv_gpci_event_attrs;
++ else
++ event_group.attrs = hv_gpci_event_attrs_v6;
++
++ put_cpu_var(hv_gpci_reqb);
++
+ r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
+ if (r)
+ return r;
+diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h
+index 4d108262bed79..c72020912dea5 100644
+--- a/arch/powerpc/perf/hv-gpci.h
++++ b/arch/powerpc/perf/hv-gpci.h
+@@ -26,6 +26,7 @@ enum {
+ #define REQUEST_FILE "../hv-gpci-requests.h"
+ #define NAME_LOWER hv_gpci
+ #define NAME_UPPER HV_GPCI
++#define ENABLE_EVENTS_COUNTERINFO_V6
+ #include "req-gen/perf.h"
+ #undef REQUEST_FILE
+ #undef NAME_LOWER
+diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h
+index fa9bc804e67af..6b2a59fefffa7 100644
+--- a/arch/powerpc/perf/req-gen/perf.h
++++ b/arch/powerpc/perf/req-gen/perf.h
+@@ -139,6 +139,26 @@ PMU_EVENT_ATTR_STRING( \
+ #define REQUEST_(r_name, r_value, r_idx_1, r_fields) \
+ r_fields
+
++/* Generate event list for platforms with counter_info_version 0x6 or below */
++static __maybe_unused struct attribute *hv_gpci_event_attrs_v6[] = {
++#include REQUEST_FILE
++ NULL
++};
++
++/*
++ * Based on getPerfCountInfo v1.018 documentation, some of the hv-gpci
++ * events were deprecated for platform firmware that supports
++ * counter_info_version 0x8 or above.
++ * Those deprecated events are still part of platform firmware that
++ * support counter_info_version 0x6 and below. As per the getPerfCountInfo
++ * v1.018 documentation there is no counter_info_version 0x7.
++ * Undefining macro ENABLE_EVENTS_COUNTERINFO_V6, to disable the addition of
++ * deprecated events in "hv_gpci_event_attrs" attribute group, for platforms
++ * that supports counter_info_version 0x8 or above.
++ */
++#undef ENABLE_EVENTS_COUNTERINFO_V6
++
++/* Generate event list for platforms with counter_info_version 0x8 or above*/
+ static __maybe_unused struct attribute *hv_gpci_event_attrs[] = {
+ #include REQUEST_FILE
+ NULL
+diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+index 48038aaedbd36..2875c206ac0f8 100644
+--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
++++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+@@ -531,6 +531,7 @@ static int mpc52xx_lpbfifo_probe(struct platform_device *op)
+ err_bcom_rx_irq:
+ bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
+ err_bcom_rx:
++ free_irq(lpbfifo.irq, &lpbfifo);
+ err_irq:
+ iounmap(lpbfifo.regs);
+ lpbfifo.regs = NULL;
+diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+index e12cb44e717f1..caa96edf0e72a 100644
+--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
++++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+@@ -107,7 +107,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
+
+ goto next;
+ unreg:
+- platform_device_del(pdev);
++ platform_device_put(pdev);
+ err:
+ pr_err("%pOF: registration failed\n", np);
+ next:
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
+index 8e40ccac0f44e..e5a58a9b2fe9f 100644
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
+@@ -848,16 +848,7 @@ static int __init eeh_pseries_init(void)
+ }
+
+ /* Initialize error log size */
+- eeh_error_buf_size = rtas_token("rtas-error-log-max");
+- if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
+- pr_info("%s: unknown EEH error log size\n",
+- __func__);
+- eeh_error_buf_size = 1024;
+- } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
+- pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
+- __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
+- eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
+- }
++ eeh_error_buf_size = rtas_get_error_log_max();
+
+ /* Set EEH probe mode */
+ eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
+diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
+index f4b5b5a64db3d..63a1e1fe01851 100644
+--- a/arch/powerpc/platforms/pseries/plpks.c
++++ b/arch/powerpc/platforms/pseries/plpks.c
+@@ -75,7 +75,7 @@ static int pseries_status_to_err(int rc)
+ case H_FUNCTION:
+ err = -ENXIO;
+ break;
+- case H_P1:
++ case H_PARAMETER:
+ case H_P2:
+ case H_P3:
+ case H_P4:
+@@ -111,7 +111,7 @@ static int pseries_status_to_err(int rc)
+ err = -EEXIST;
+ break;
+ case H_ABORTED:
+- err = -EINTR;
++ err = -EIO;
+ break;
+ default:
+ err = -EINVAL;
+@@ -366,22 +366,24 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var)
+ {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct plpks_auth *auth;
+- struct label *label;
++ struct label *label = NULL;
+ u8 *output;
+ int rc;
+
+ if (var->namelen > MAX_NAME_SIZE)
+ return -EINVAL;
+
+- auth = construct_auth(PKS_OS_OWNER);
++ auth = construct_auth(consumer);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+- label = construct_label(var->component, var->os, var->name,
+- var->namelen);
+- if (IS_ERR(label)) {
+- rc = PTR_ERR(label);
+- goto out_free_auth;
++ if (consumer == PKS_OS_OWNER) {
++ label = construct_label(var->component, var->os, var->name,
++ var->namelen);
++ if (IS_ERR(label)) {
++ rc = PTR_ERR(label);
++ goto out_free_auth;
++ }
+ }
+
+ output = kzalloc(maxobjsize, GFP_KERNEL);
+@@ -390,9 +392,15 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var)
+ goto out_free_label;
+ }
+
+- rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
+- virt_to_phys(label), label->size, virt_to_phys(output),
+- maxobjsize);
++ if (consumer == PKS_OS_OWNER)
++ rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
++ virt_to_phys(label), label->size, virt_to_phys(output),
++ maxobjsize);
++ else
++ rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
++ virt_to_phys(var->name), var->namelen, virt_to_phys(output),
++ maxobjsize);
++
+
+ if (rc != H_SUCCESS) {
+ pr_err("Failed to read variable %s for component %s with error %d\n",
+diff --git a/arch/powerpc/platforms/pseries/plpks.h b/arch/powerpc/platforms/pseries/plpks.h
+index c6a291367bb13..275ccd86bfb5e 100644
+--- a/arch/powerpc/platforms/pseries/plpks.h
++++ b/arch/powerpc/platforms/pseries/plpks.h
+@@ -17,7 +17,7 @@
+ #define WORLDREADABLE 0x08000000
+ #define SIGNEDUPDATE 0x01000000
+
+-#define PLPKS_VAR_LINUX 0x01
++#define PLPKS_VAR_LINUX 0x02
+ #define PLPKS_VAR_COMMON 0x04
+
+ struct plpks_var {
+diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
+index e2c8f93b535ba..e454192643910 100644
+--- a/arch/powerpc/sysdev/xive/spapr.c
++++ b/arch/powerpc/sysdev/xive/spapr.c
+@@ -439,6 +439,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
+
+ data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
+ if (!data->trig_mmio) {
++ iounmap(data->eoi_mmio);
+ pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
+ return -ENOMEM;
+ }
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index f51c882bf9023..e34d7809f6c9f 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -1525,9 +1525,9 @@ bpt_cmds(void)
+ cmd = inchar();
+
+ switch (cmd) {
+- static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n";
+- int mode;
+- case 'd': /* bd - hardware data breakpoint */
++ case 'd': { /* bd - hardware data breakpoint */
++ static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n";
++ int mode;
+ if (xmon_is_ro) {
+ printf(xmon_ro_msg);
+ break;
+@@ -1560,6 +1560,7 @@ bpt_cmds(void)
+
+ force_enable_xmon();
+ break;
++ }
+
+ case 'i': /* bi - hardware instr breakpoint */
+ if (xmon_is_ro) {
+diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
+index 24b1cfb9a73e4..5d3e5240e33ae 100644
+--- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
++++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
+@@ -9,7 +9,7 @@
+ compatible = "microchip,corepwm-rtl-v4";
+ reg = <0x0 0x40000000 0x0 0xF0>;
+ microchip,sync-update-mask = /bits/ 32 <0>;
+- #pwm-cells = <2>;
++ #pwm-cells = <3>;
+ clocks = <&fabric_clk3>;
+ status = "disabled";
+ };
+diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
+index ec7b7c2a3ce28..8ced67c3b00b2 100644
+--- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
++++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
+@@ -37,7 +37,7 @@
+ status = "okay";
+ };
+
+- ddrc_cache_hi: memory@1000000000 {
++ ddrc_cache_hi: memory@1040000000 {
+ device_type = "memory";
+ reg = <0x10 0x40000000 0x0 0x40000000>;
+ status = "okay";
+diff --git a/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi
+index 8545baf4d1290..39a77df489abf 100644
+--- a/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi
++++ b/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi
+@@ -13,33 +13,4 @@
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+-
+- pcie: pcie@2000000000 {
+- compatible = "microchip,pcie-host-1.0";
+- #address-cells = <0x3>;
+- #interrupt-cells = <0x1>;
+- #size-cells = <0x2>;
+- device_type = "pci";
+- reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>;
+- reg-names = "cfg", "apb";
+- bus-range = <0x0 0x7f>;
+- interrupt-parent = <&plic>;
+- interrupts = <119>;
+- interrupt-map = <0 0 0 1 &pcie_intc 0>,
+- <0 0 0 2 &pcie_intc 1>,
+- <0 0 0 3 &pcie_intc 2>,
+- <0 0 0 4 &pcie_intc 3>;
+- interrupt-map-mask = <0 0 0 7>;
+- clocks = <&fabric_clk1>, <&fabric_clk1>, <&fabric_clk3>;
+- clock-names = "fic0", "fic1", "fic3";
+- ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
+- msi-parent = <&pcie>;
+- msi-controller;
+- status = "disabled";
+- pcie_intc: interrupt-controller {
+- #address-cells = <0>;
+- #interrupt-cells = <1>;
+- interrupt-controller;
+- };
+- };
+ };
+diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
+index a5c2ca1d1cd8b..ec19d6afc8965 100644
+--- a/arch/riscv/include/asm/hugetlb.h
++++ b/arch/riscv/include/asm/hugetlb.h
+@@ -5,4 +5,10 @@
+ #include <asm-generic/hugetlb.h>
+ #include <asm/page.h>
+
++static inline void arch_clear_hugepage_flags(struct page *page)
++{
++ clear_bit(PG_dcache_clean, &page->flags);
++}
++#define arch_clear_hugepage_flags arch_clear_hugepage_flags
++
+ #endif /* _ASM_RISCV_HUGETLB_H */
+diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
+index 92080a2279372..42497d487a174 100644
+--- a/arch/riscv/include/asm/io.h
++++ b/arch/riscv/include/asm/io.h
+@@ -135,4 +135,9 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
+
+ #include <asm-generic/io.h>
+
++#ifdef CONFIG_MMU
++#define arch_memremap_wb(addr, size) \
++ ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
++#endif
++
+ #endif /* _ASM_RISCV_IO_H */
+diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
+index dc42375c23571..42a042c0e13ed 100644
+--- a/arch/riscv/include/asm/pgtable-64.h
++++ b/arch/riscv/include/asm/pgtable-64.h
+@@ -25,7 +25,11 @@ extern bool pgtable_l5_enabled;
+ #define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+ /* p4d is folded into pgd in case of 4-level page table */
+-#define P4D_SHIFT 39
++#define P4D_SHIFT_L3 30
++#define P4D_SHIFT_L4 39
++#define P4D_SHIFT_L5 39
++#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
++ (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
+ #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
+ #define P4D_MASK (~(P4D_SIZE - 1))
+
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 186abd146eaff..3221a9e5f3724 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -263,12 +263,11 @@ ret_from_exception:
+ #endif
+ bnez s0, resume_kernel
+
+-resume_userspace:
+ /* Interrupts must be disabled here so flags are checked atomically */
+ REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
+ andi s1, s0, _TIF_WORK_MASK
+- bnez s1, work_pending
+-
++ bnez s1, resume_userspace_slow
++resume_userspace:
+ #ifdef CONFIG_CONTEXT_TRACKING_USER
+ call user_enter_callable
+ #endif
+@@ -368,19 +367,12 @@ resume_kernel:
+ j restore_all
+ #endif
+
+-work_pending:
++resume_userspace_slow:
+ /* Enter slow path for supplementary processing */
+- la ra, ret_from_exception
+- andi s1, s0, _TIF_NEED_RESCHED
+- bnez s1, work_resched
+-work_notifysig:
+- /* Handle pending signals and notify-resume requests */
+- csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
+ move a0, sp /* pt_regs */
+ move a1, s0 /* current_thread_info->flags */
+- tail do_notify_resume
+-work_resched:
+- tail schedule
++ call do_work_pending
++ j resume_userspace
+
+ /* Slow paths for ptrace. */
+ handle_syscall_trace_enter:
+diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
+index 5c591123c4409..bfb2afa4135f8 100644
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -313,19 +313,27 @@ static void do_signal(struct pt_regs *regs)
+ }
+
+ /*
+- * notification of userspace execution resumption
+- * - triggered by the _TIF_WORK_MASK flags
++ * Handle any pending work on the resume-to-userspace path, as indicated by
++ * _TIF_WORK_MASK. Entered from assembly with IRQs off.
+ */
+-asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
+- unsigned long thread_info_flags)
++asmlinkage __visible void do_work_pending(struct pt_regs *regs,
++ unsigned long thread_info_flags)
+ {
+- if (thread_info_flags & _TIF_UPROBE)
+- uprobe_notify_resume(regs);
+-
+- /* Handle pending signal delivery */
+- if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+- do_signal(regs);
+-
+- if (thread_info_flags & _TIF_NOTIFY_RESUME)
+- resume_user_mode_work(regs);
++ do {
++ if (thread_info_flags & _TIF_NEED_RESCHED) {
++ schedule();
++ } else {
++ local_irq_enable();
++ if (thread_info_flags & _TIF_UPROBE)
++ uprobe_notify_resume(regs);
++ /* Handle pending signal delivery */
++ if (thread_info_flags & (_TIF_SIGPENDING |
++ _TIF_NOTIFY_SIGNAL))
++ do_signal(regs);
++ if (thread_info_flags & _TIF_NOTIFY_RESUME)
++ resume_user_mode_work(regs);
++ }
++ local_irq_disable();
++ thread_info_flags = read_thread_flags();
++ } while (thread_info_flags & _TIF_WORK_MASK);
+ }
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index 7abd8e4c4df63..f77cb8e42bd2a 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -214,7 +214,7 @@ static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
+ * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used
+ * to get per-cpu overflow stack(get_overflow_stack).
+ */
+-long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)];
++long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
+ asmlinkage unsigned long get_overflow_stack(void)
+ {
+ return (unsigned long)this_cpu_ptr(overflow_stack) +
+diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
+index 71ebbc4821f0e..5174ef54ad1d9 100644
+--- a/arch/riscv/kvm/vcpu.c
++++ b/arch/riscv/kvm/vcpu.c
+@@ -296,12 +296,15 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
+ if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+- /* This ONE REG interface is only defined for single letter extensions */
+- if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
+- return -EINVAL;
+-
+ switch (reg_num) {
+ case KVM_REG_RISCV_CONFIG_REG(isa):
++ /*
++ * This ONE REG interface is only defined for
++ * single letter extensions.
++ */
++ if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
++ return -EINVAL;
++
+ if (!vcpu->arch.ran_atleast_once) {
+ /* Ignore the enable/disable request for certain extensions */
+ for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
+diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
+index 19cf25a74ee29..9b18bda74154e 100644
+--- a/arch/riscv/mm/physaddr.c
++++ b/arch/riscv/mm/physaddr.c
+@@ -22,7 +22,7 @@ EXPORT_SYMBOL(__virt_to_phys);
+ phys_addr_t __phys_addr_symbol(unsigned long x)
+ {
+ unsigned long kernel_start = kernel_map.virt_addr;
+- unsigned long kernel_end = (unsigned long)_end;
++ unsigned long kernel_end = kernel_start + kernel_map.size;
+
+ /*
+ * Boundary checking aginst the kernel image mapping.
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index 00df3a8f92acd..f2417ac54edd6 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -136,6 +136,25 @@ static bool in_auipc_jalr_range(s64 val)
+ val < ((1L << 31) - (1L << 11));
+ }
+
++/* Emit fixed-length instructions for address */
++static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
++{
++ u64 ip = (u64)(ctx->insns + ctx->ninsns);
++ s64 off = addr - ip;
++ s64 upper = (off + (1 << 11)) >> 12;
++ s64 lower = off & 0xfff;
++
++ if (extra_pass && !in_auipc_jalr_range(off)) {
++ pr_err("bpf-jit: target offset 0x%llx is out of range\n", off);
++ return -ERANGE;
++ }
++
++ emit(rv_auipc(rd, upper), ctx);
++ emit(rv_addi(rd, rd, lower), ctx);
++ return 0;
++}
++
++/* Emit variable-length instructions for 32-bit and 64-bit imm */
+ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
+ {
+ /* Note that the immediate from the add is sign-extended,
+@@ -1050,7 +1069,15 @@ out_be:
+ u64 imm64;
+
+ imm64 = (u64)insn1.imm << 32 | (u32)imm;
+- emit_imm(rd, imm64, ctx);
++ if (bpf_pseudo_func(insn)) {
++ /* fixed-length insns for extra jit pass */
++ ret = emit_addr(rd, imm64, extra_pass, ctx);
++ if (ret)
++ return ret;
++ } else {
++ emit_imm(rd, imm64, ctx);
++ }
++
+ return 1;
+ }
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 67745ceab0dbc..b2c0fce3f257c 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -462,8 +462,8 @@ config X86_X2APIC
+
+ Some Intel systems circa 2022 and later are locked into x2APIC mode
+ and can not fall back to the legacy APIC modes if SGX or TDX are
+- enabled in the BIOS. They will be unable to boot without enabling
+- this option.
++ enabled in the BIOS. They will boot with very reduced functionality
++ without enabling this option.
+
+ If you don't know what to do here, say N.
+
+diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S
+index b48ddebb47489..cdf3215ec272c 100644
+--- a/arch/x86/crypto/aegis128-aesni-asm.S
++++ b/arch/x86/crypto/aegis128-aesni-asm.S
+@@ -7,6 +7,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/frame.h>
+
+ #define STATE0 %xmm0
+@@ -402,7 +403,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_ad)
+ * void crypto_aegis128_aesni_enc(void *state, unsigned int length,
+ * const void *src, void *dst);
+ */
+-SYM_FUNC_START(crypto_aegis128_aesni_enc)
++SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc)
+ FRAME_BEGIN
+
+ cmp $0x10, LEN
+@@ -499,7 +500,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_enc)
+ * void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length,
+ * const void *src, void *dst);
+ */
+-SYM_FUNC_START(crypto_aegis128_aesni_enc_tail)
++SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc_tail)
+ FRAME_BEGIN
+
+ /* load the state: */
+@@ -556,7 +557,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_enc_tail)
+ * void crypto_aegis128_aesni_dec(void *state, unsigned int length,
+ * const void *src, void *dst);
+ */
+-SYM_FUNC_START(crypto_aegis128_aesni_dec)
++SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec)
+ FRAME_BEGIN
+
+ cmp $0x10, LEN
+@@ -653,7 +654,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_dec)
+ * void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length,
+ * const void *src, void *dst);
+ */
+-SYM_FUNC_START(crypto_aegis128_aesni_dec_tail)
++SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
+ FRAME_BEGIN
+
+ /* load the state: */
+diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S
+index c75fd7d015ed8..03ae4cd1d976a 100644
+--- a/arch/x86/crypto/aria-aesni-avx-asm_64.S
++++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S
+@@ -7,6 +7,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/frame.h>
+
+ /* struct aria_ctx: */
+@@ -913,7 +914,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way)
+ RET;
+ SYM_FUNC_END(__aria_aesni_avx_crypt_16way)
+
+-SYM_FUNC_START(aria_aesni_avx_encrypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_encrypt_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+@@ -938,7 +939,7 @@ SYM_FUNC_START(aria_aesni_avx_encrypt_16way)
+ RET;
+ SYM_FUNC_END(aria_aesni_avx_encrypt_16way)
+
+-SYM_FUNC_START(aria_aesni_avx_decrypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_decrypt_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+@@ -1039,7 +1040,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_ctr_gen_keystream_16way)
+ RET;
+ SYM_FUNC_END(__aria_aesni_avx_ctr_gen_keystream_16way)
+
+-SYM_FUNC_START(aria_aesni_avx_ctr_crypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_ctr_crypt_16way)
+ /* input:
+ * %rdi: ctx
+ * %rsi: dst
+@@ -1208,7 +1209,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way)
+ RET;
+ SYM_FUNC_END(__aria_aesni_avx_gfni_crypt_16way)
+
+-SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_encrypt_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+@@ -1233,7 +1234,7 @@ SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way)
+ RET;
+ SYM_FUNC_END(aria_aesni_avx_gfni_encrypt_16way)
+
+-SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_decrypt_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+@@ -1258,7 +1259,7 @@ SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way)
+ RET;
+ SYM_FUNC_END(aria_aesni_avx_gfni_decrypt_16way)
+
+-SYM_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way)
+ /* input:
+ * %rdi: ctx
+ * %rsi: dst
+diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S
+index 2f94ec0e763bf..3cae5a1bb3d6e 100644
+--- a/arch/x86/crypto/sha1_ni_asm.S
++++ b/arch/x86/crypto/sha1_ni_asm.S
+@@ -54,6 +54,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+
+ #define DIGEST_PTR %rdi /* 1st arg */
+ #define DATA_PTR %rsi /* 2nd arg */
+@@ -93,7 +94,7 @@
+ */
+ .text
+ .align 32
+-SYM_FUNC_START(sha1_ni_transform)
++SYM_TYPED_FUNC_START(sha1_ni_transform)
+ push %rbp
+ mov %rsp, %rbp
+ sub $FRAME_SIZE, %rsp
+diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
+index 263f916362e02..f54988c80eb40 100644
+--- a/arch/x86/crypto/sha1_ssse3_asm.S
++++ b/arch/x86/crypto/sha1_ssse3_asm.S
+@@ -25,6 +25,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+
+ #define CTX %rdi // arg1
+ #define BUF %rsi // arg2
+@@ -67,7 +68,7 @@
+ * param: function's name
+ */
+ .macro SHA1_VECTOR_ASM name
+- SYM_FUNC_START(\name)
++ SYM_TYPED_FUNC_START(\name)
+
+ push %rbx
+ push %r12
+diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
+index 3baa1ec390974..06ea30c20828d 100644
+--- a/arch/x86/crypto/sha256-avx-asm.S
++++ b/arch/x86/crypto/sha256-avx-asm.S
+@@ -48,6 +48,7 @@
+ ########################################################################
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+
+ ## assume buffers not aligned
+ #define VMOVDQ vmovdqu
+@@ -346,7 +347,7 @@ a = TMP_
+ ## arg 3 : Num blocks
+ ########################################################################
+ .text
+-SYM_FUNC_START(sha256_transform_avx)
++SYM_TYPED_FUNC_START(sha256_transform_avx)
+ .align 32
+ pushq %rbx
+ pushq %r12
+diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
+index 9bcdbc47b8b4b..2d2be531a11ed 100644
+--- a/arch/x86/crypto/sha256-avx2-asm.S
++++ b/arch/x86/crypto/sha256-avx2-asm.S
+@@ -49,6 +49,7 @@
+ ########################################################################
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+
+ ## assume buffers not aligned
+ #define VMOVDQ vmovdqu
+@@ -523,7 +524,7 @@ STACK_SIZE = _CTX + _CTX_SIZE
+ ## arg 3 : Num blocks
+ ########################################################################
+ .text
+-SYM_FUNC_START(sha256_transform_rorx)
++SYM_TYPED_FUNC_START(sha256_transform_rorx)
+ .align 32
+ pushq %rbx
+ pushq %r12
+diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
+index c4a5db612c327..7db28839108dd 100644
+--- a/arch/x86/crypto/sha256-ssse3-asm.S
++++ b/arch/x86/crypto/sha256-ssse3-asm.S
+@@ -47,6 +47,7 @@
+ ########################################################################
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+
+ ## assume buffers not aligned
+ #define MOVDQ movdqu
+@@ -355,7 +356,7 @@ a = TMP_
+ ## arg 3 : Num blocks
+ ########################################################################
+ .text
+-SYM_FUNC_START(sha256_transform_ssse3)
++SYM_TYPED_FUNC_START(sha256_transform_ssse3)
+ .align 32
+ pushq %rbx
+ pushq %r12
+diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S
+index 94d50dd27cb53..47f93937f798a 100644
+--- a/arch/x86/crypto/sha256_ni_asm.S
++++ b/arch/x86/crypto/sha256_ni_asm.S
+@@ -54,6 +54,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+
+ #define DIGEST_PTR %rdi /* 1st arg */
+ #define DATA_PTR %rsi /* 2nd arg */
+@@ -97,7 +98,7 @@
+
+ .text
+ .align 32
+-SYM_FUNC_START(sha256_ni_transform)
++SYM_TYPED_FUNC_START(sha256_ni_transform)
+
+ shl $6, NUM_BLKS /* convert to bytes */
+ jz .Ldone_hash
+diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
+index 1fefe6dd3a9e2..b0984f19fdb40 100644
+--- a/arch/x86/crypto/sha512-avx-asm.S
++++ b/arch/x86/crypto/sha512-avx-asm.S
+@@ -48,6 +48,7 @@
+ ########################################################################
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+
+ .text
+
+@@ -273,7 +274,7 @@ frame_size = frame_WK + WK_SIZE
+ # of SHA512 message blocks.
+ # "blocks" is the message length in SHA512 blocks
+ ########################################################################
+-SYM_FUNC_START(sha512_transform_avx)
++SYM_TYPED_FUNC_START(sha512_transform_avx)
+ test msglen, msglen
+ je nowork
+
+diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
+index 5cdaab7d69015..b1ca99055ef99 100644
+--- a/arch/x86/crypto/sha512-avx2-asm.S
++++ b/arch/x86/crypto/sha512-avx2-asm.S
+@@ -50,6 +50,7 @@
+ ########################################################################
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+
+ .text
+
+@@ -565,7 +566,7 @@ frame_size = frame_CTX + CTX_SIZE
+ # of SHA512 message blocks.
+ # "blocks" is the message length in SHA512 blocks
+ ########################################################################
+-SYM_FUNC_START(sha512_transform_rorx)
++SYM_TYPED_FUNC_START(sha512_transform_rorx)
+ # Save GPRs
+ push %rbx
+ push %r12
+diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
+index b84c22e06c5f7..c06afb5270e5f 100644
+--- a/arch/x86/crypto/sha512-ssse3-asm.S
++++ b/arch/x86/crypto/sha512-ssse3-asm.S
+@@ -48,6 +48,7 @@
+ ########################################################################
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+
+ .text
+
+@@ -274,7 +275,7 @@ frame_size = frame_WK + WK_SIZE
+ # of SHA512 message blocks.
+ # "blocks" is the message length in SHA512 blocks.
+ ########################################################################
+-SYM_FUNC_START(sha512_transform_ssse3)
++SYM_TYPED_FUNC_START(sha512_transform_ssse3)
+
+ test msglen, msglen
+ je nowork
+diff --git a/arch/x86/crypto/sm3-avx-asm_64.S b/arch/x86/crypto/sm3-avx-asm_64.S
+index b12b9efb5ec51..8fc5ac681fd63 100644
+--- a/arch/x86/crypto/sm3-avx-asm_64.S
++++ b/arch/x86/crypto/sm3-avx-asm_64.S
+@@ -12,6 +12,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/frame.h>
+
+ /* Context structure */
+@@ -328,7 +329,7 @@
+ * const u8 *data, int nblocks);
+ */
+ .align 16
+-SYM_FUNC_START(sm3_transform_avx)
++SYM_TYPED_FUNC_START(sm3_transform_avx)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: data (64*nblks bytes)
+diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
+index 4767ab61ff489..22b6560eb9e1e 100644
+--- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S
++++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
+@@ -14,6 +14,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/frame.h>
+
+ #define rRIP (%rip)
+@@ -420,7 +421,7 @@ SYM_FUNC_END(sm4_aesni_avx_crypt8)
+ * const u8 *src, u8 *iv)
+ */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
++SYM_TYPED_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
+ /* input:
+ * %rdi: round key array, CTX
+ * %rsi: dst (8 blocks)
+@@ -495,7 +496,7 @@ SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8)
+ * const u8 *src, u8 *iv)
+ */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
++SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
+ /* input:
+ * %rdi: round key array, CTX
+ * %rsi: dst (8 blocks)
+@@ -545,7 +546,7 @@ SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
+ * const u8 *src, u8 *iv)
+ */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
++SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
+ /* input:
+ * %rdi: round key array, CTX
+ * %rsi: dst (8 blocks)
+diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
+index 4732fe8bb65b6..23ee39a8ada8c 100644
+--- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
++++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
+@@ -14,6 +14,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/frame.h>
+
+ #define rRIP (%rip)
+@@ -282,7 +283,7 @@ SYM_FUNC_END(__sm4_crypt_blk16)
+ * const u8 *src, u8 *iv)
+ */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
++SYM_TYPED_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
+ /* input:
+ * %rdi: round key array, CTX
+ * %rsi: dst (16 blocks)
+@@ -395,7 +396,7 @@ SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
+ * const u8 *src, u8 *iv)
+ */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
++SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
+ /* input:
+ * %rdi: round key array, CTX
+ * %rsi: dst (16 blocks)
+@@ -449,7 +450,7 @@ SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
+ * const u8 *src, u8 *iv)
+ */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
++SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
+ /* input:
+ * %rdi: round key array, CTX
+ * %rsi: dst (16 blocks)
+diff --git a/arch/x86/entry/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S
+index 4bf48462fca7a..e8c60ae7a7c83 100644
+--- a/arch/x86/entry/vdso/vdso.lds.S
++++ b/arch/x86/entry/vdso/vdso.lds.S
+@@ -27,7 +27,9 @@ VERSION {
+ __vdso_time;
+ clock_getres;
+ __vdso_clock_getres;
++#ifdef CONFIG_X86_SGX
+ __vdso_sgx_enter_enclave;
++#endif
+ local: *;
+ };
+ }
+diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
+index 1ef4f7861e2ec..1f4869227efb9 100644
+--- a/arch/x86/events/intel/uncore_snb.c
++++ b/arch/x86/events/intel/uncore_snb.c
+@@ -1338,6 +1338,7 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box,
+ /* MCHBAR is disabled */
+ if (!(mch_bar & BIT(0))) {
+ pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
++ pci_dev_put(pdev);
+ return;
+ }
+ mch_bar &= ~BIT(0);
+@@ -1352,6 +1353,8 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box,
+ box->io_addr = ioremap(addr, type->mmio_map_size);
+ if (!box->io_addr)
+ pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
++
++ pci_dev_put(pdev);
+ }
+
+ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index ed869443efb21..fcd95e93f479a 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -2891,6 +2891,7 @@ static bool hswep_has_limit_sbox(unsigned int device)
+ return false;
+
+ pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
++ pci_dev_put(dev);
+ if (!hswep_get_chop(capid4))
+ return true;
+
+@@ -4492,6 +4493,8 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map
+ type->topology = NULL;
+ }
+
++ pci_dev_put(dev);
++
+ return ret;
+ }
+
+@@ -4857,6 +4860,8 @@ static int snr_uncore_mmio_map(struct intel_uncore_box *box,
+
+ addr += box_ctl;
+
++ pci_dev_put(pdev);
++
+ box->io_addr = ioremap(addr, type->mmio_map_size);
+ if (!box->io_addr) {
+ pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index a269049a43ce3..85863b9c9e684 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -535,8 +535,6 @@ void hyperv_cleanup(void)
+ union hv_x64_msr_hypercall_contents hypercall_msr;
+ union hv_reference_tsc_msr tsc_msr;
+
+- unregister_syscore_ops(&hv_syscore_ops);
+-
+ /* Reset our OS id */
+ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+ hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 3415321c8240c..3216da7074bad 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -249,7 +249,6 @@ static inline u64 native_x2apic_icr_read(void)
+ extern int x2apic_mode;
+ extern int x2apic_phys;
+ extern void __init x2apic_set_max_apicid(u32 apicid);
+-extern void __init check_x2apic(void);
+ extern void x2apic_setup(void);
+ static inline int x2apic_enabled(void)
+ {
+@@ -258,13 +257,13 @@ static inline int x2apic_enabled(void)
+
+ #define x2apic_supported() (boot_cpu_has(X86_FEATURE_X2APIC))
+ #else /* !CONFIG_X86_X2APIC */
+-static inline void check_x2apic(void) { }
+ static inline void x2apic_setup(void) { }
+ static inline int x2apic_enabled(void) { return 0; }
+
+ #define x2apic_mode (0)
+ #define x2apic_supported() (0)
+ #endif /* !CONFIG_X86_X2APIC */
++extern void __init check_x2apic(void);
+
+ struct irq_data;
+
+diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
+index fd6f6e5b755a7..a336feef0af14 100644
+--- a/arch/x86/include/asm/realmode.h
++++ b/arch/x86/include/asm/realmode.h
+@@ -91,6 +91,7 @@ static inline void set_real_mode_mem(phys_addr_t mem)
+
+ void reserve_real_mode(void);
+ void load_trampoline_pgtable(void);
++void init_real_mode(void);
+
+ #endif /* __ASSEMBLY__ */
+
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index e9170457697e4..c1c8c581759d6 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -285,6 +285,8 @@ struct x86_hyper_runtime {
+ * possible in x86_early_init_platform_quirks() by
+ * only using the current x86_hardware_subarch
+ * semantics.
++ * @realmode_reserve: reserve memory for realmode trampoline
++ * @realmode_init: initialize realmode trampoline
+ * @hyper: x86 hypervisor specific runtime callbacks
+ */
+ struct x86_platform_ops {
+@@ -301,6 +303,8 @@ struct x86_platform_ops {
+ void (*apic_post_init)(void);
+ struct x86_legacy_features legacy;
+ void (*set_legacy_features)(void);
++ void (*realmode_reserve)(void);
++ void (*realmode_init)(void);
+ struct x86_hyper_runtime hyper;
+ struct x86_guest guest;
+ };
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index c6876d3ea4b17..20d9a604da7c4 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1931,16 +1931,19 @@ void __init check_x2apic(void)
+ }
+ }
+ #else /* CONFIG_X86_X2APIC */
+-static int __init validate_x2apic(void)
++void __init check_x2apic(void)
+ {
+ if (!apic_is_x2apic_enabled())
+- return 0;
++ return;
+ /*
+- * Checkme: Can we simply turn off x2apic here instead of panic?
++ * Checkme: Can we simply turn off x2APIC here instead of disabling the APIC?
+ */
+- panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
++ pr_err("Kernel does not support x2APIC, please recompile with CONFIG_X86_X2APIC.\n");
++ pr_err("Disabling APIC, expect reduced performance and functionality.\n");
++
++ disable_apic = 1;
++ setup_clear_cpu_cap(X86_FEATURE_APIC);
+ }
+-early_initcall(validate_x2apic);
+
+ static inline void try_to_enable_x2apic(int remap_mode) { }
+ static inline void __x2apic_enable(void) { }
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 2d7ea5480ec33..4278996504833 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -1034,8 +1034,32 @@ static const struct {
+
+ static struct ratelimit_state bld_ratelimit;
+
++static unsigned int sysctl_sld_mitigate = 1;
+ static DEFINE_SEMAPHORE(buslock_sem);
+
++#ifdef CONFIG_PROC_SYSCTL
++static struct ctl_table sld_sysctls[] = {
++ {
++ .procname = "split_lock_mitigate",
++ .data = &sysctl_sld_mitigate,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_douintvec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = SYSCTL_ONE,
++ },
++ {}
++};
++
++static int __init sld_mitigate_sysctl_init(void)
++{
++ register_sysctl_init("kernel", sld_sysctls);
++ return 0;
++}
++
++late_initcall(sld_mitigate_sysctl_init);
++#endif
++
+ static inline bool match_option(const char *arg, int arglen, const char *opt)
+ {
+ int len = strlen(opt), ratelimit;
+@@ -1146,12 +1170,20 @@ static void split_lock_init(void)
+ split_lock_verify_msr(sld_state != sld_off);
+ }
+
+-static void __split_lock_reenable(struct work_struct *work)
++static void __split_lock_reenable_unlock(struct work_struct *work)
+ {
+ sld_update_msr(true);
+ up(&buslock_sem);
+ }
+
++static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);
++
++static void __split_lock_reenable(struct work_struct *work)
++{
++ sld_update_msr(true);
++}
++static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
++
+ /*
+ * If a CPU goes offline with pending delayed work to re-enable split lock
+ * detection then the delayed work will be executed on some other CPU. That
+@@ -1169,10 +1201,9 @@ static int splitlock_cpu_offline(unsigned int cpu)
+ return 0;
+ }
+
+-static DECLARE_DELAYED_WORK(split_lock_reenable, __split_lock_reenable);
+-
+ static void split_lock_warn(unsigned long ip)
+ {
++ struct delayed_work *work;
+ int cpu;
+
+ if (!current->reported_split_lock)
+@@ -1180,14 +1211,26 @@ static void split_lock_warn(unsigned long ip)
+ current->comm, current->pid, ip);
+ current->reported_split_lock = 1;
+
+- /* misery factor #1, sleep 10ms before trying to execute split lock */
+- if (msleep_interruptible(10) > 0)
+- return;
+- /* Misery factor #2, only allow one buslocked disabled core at a time */
+- if (down_interruptible(&buslock_sem) == -EINTR)
+- return;
++ if (sysctl_sld_mitigate) {
++ /*
++ * misery factor #1:
++ * sleep 10ms before trying to execute split lock.
++ */
++ if (msleep_interruptible(10) > 0)
++ return;
++ /*
++ * Misery factor #2:
++ * only allow one buslocked disabled core at a time.
++ */
++ if (down_interruptible(&buslock_sem) == -EINTR)
++ return;
++ work = &sl_reenable_unlock;
++ } else {
++ work = &sl_reenable;
++ }
++
+ cpu = get_cpu();
+- schedule_delayed_work_on(cpu, &split_lock_reenable, 2);
++ schedule_delayed_work_on(cpu, work, 2);
+
+ /* Disable split lock detection on this CPU to make progress */
+ sld_update_msr(false);
+diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
+index 1ec20807de1e8..2c258255a6296 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -680,11 +680,15 @@ const struct vm_operations_struct sgx_vm_ops = {
+ void sgx_encl_release(struct kref *ref)
+ {
+ struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
++ unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1);
+ struct sgx_va_page *va_page;
+ struct sgx_encl_page *entry;
+- unsigned long index;
++ unsigned long count = 0;
++
++ XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base));
+
+- xa_for_each(&encl->page_array, index, entry) {
++ xas_lock(&xas);
++ xas_for_each(&xas, entry, max_page_index) {
+ if (entry->epc_page) {
+ /*
+ * The page and its radix tree entry cannot be freed
+@@ -699,9 +703,20 @@ void sgx_encl_release(struct kref *ref)
+ }
+
+ kfree(entry);
+- /* Invoke scheduler to prevent soft lockups. */
+- cond_resched();
++ /*
++ * Invoke scheduler on every XA_CHECK_SCHED iteration
++ * to prevent soft lockups.
++ */
++ if (!(++count % XA_CHECK_SCHED)) {
++ xas_pause(&xas);
++ xas_unlock(&xas);
++
++ cond_resched();
++
++ xas_lock(&xas);
++ }
+ }
++ xas_unlock(&xas);
+
+ xa_destroy(&encl->page_array);
+
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 216fee7144eef..892609cde4a20 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1175,7 +1175,7 @@ void __init setup_arch(char **cmdline_p)
+ * Moreover, on machines with SandyBridge graphics or in setups that use
+ * crashkernel the entire 1M is reserved anyway.
+ */
+- reserve_real_mode();
++ x86_platform.realmode_reserve();
+
+ init_mem_mapping();
+
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index b63cf8f7745ee..6c07f6daaa227 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -722,8 +722,9 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+ switch (opc1) {
+ case 0xeb: /* jmp 8 */
+ case 0xe9: /* jmp 32 */
+- case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
+ break;
++ case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
++ goto setup;
+
+ case 0xe8: /* call relative */
+ branch_clear_offset(auprobe, insn);
+@@ -753,6 +754,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+ return -ENOTSUPP;
+ }
+
++setup:
+ auprobe->branch.opc1 = opc1;
+ auprobe->branch.ilen = insn->length;
+ auprobe->branch.offs = insn->immediate.value;
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index 57353519bc119..ef80d361b4632 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -25,6 +25,7 @@
+ #include <asm/iommu.h>
+ #include <asm/mach_traps.h>
+ #include <asm/irqdomain.h>
++#include <asm/realmode.h>
+
+ void x86_init_noop(void) { }
+ void __init x86_init_uint_noop(unsigned int unused) { }
+@@ -145,6 +146,8 @@ struct x86_platform_ops x86_platform __ro_after_init = {
+ .get_nmi_reason = default_get_nmi_reason,
+ .save_sched_clock_state = tsc_save_sched_clock_state,
+ .restore_sched_clock_state = tsc_restore_sched_clock_state,
++ .realmode_reserve = reserve_real_mode,
++ .realmode_init = init_real_mode,
+ .hyper.pin_vcpu = x86_op_int_noop,
+
+ .guest = {
+diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
+index 41d7669a97ad1..af565816d2ba6 100644
+--- a/arch/x86/realmode/init.c
++++ b/arch/x86/realmode/init.c
+@@ -200,14 +200,18 @@ static void __init set_real_mode_permissions(void)
+ set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+ }
+
+-static int __init init_real_mode(void)
++void __init init_real_mode(void)
+ {
+ if (!real_mode_header)
+ panic("Real mode trampoline was not allocated");
+
+ setup_real_mode();
+ set_real_mode_permissions();
++}
+
++static int __init do_init_real_mode(void)
++{
++ x86_platform.realmode_init();
+ return 0;
+ }
+-early_initcall(init_real_mode);
++early_initcall(do_init_real_mode);
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 038da45f057a7..8944726255c9c 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1266,6 +1266,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
+ xen_vcpu_info_reset(0);
+
+ x86_platform.get_nmi_reason = xen_get_nmi_reason;
++ x86_platform.realmode_reserve = x86_init_noop;
++ x86_platform.realmode_init = x86_init_noop;
+
+ x86_init.resources.memory_setup = xen_memory_setup;
+ x86_init.irqs.intr_mode_select = x86_init_noop;
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index c3e1f9a7d43aa..4b0d6fff88de5 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
+
+ void xen_smp_intr_free(unsigned int cpu)
+ {
++ kfree(per_cpu(xen_resched_irq, cpu).name);
++ per_cpu(xen_resched_irq, cpu).name = NULL;
+ if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
+ per_cpu(xen_resched_irq, cpu).irq = -1;
+- kfree(per_cpu(xen_resched_irq, cpu).name);
+- per_cpu(xen_resched_irq, cpu).name = NULL;
+ }
++ kfree(per_cpu(xen_callfunc_irq, cpu).name);
++ per_cpu(xen_callfunc_irq, cpu).name = NULL;
+ if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
+ per_cpu(xen_callfunc_irq, cpu).irq = -1;
+- kfree(per_cpu(xen_callfunc_irq, cpu).name);
+- per_cpu(xen_callfunc_irq, cpu).name = NULL;
+ }
++ kfree(per_cpu(xen_debug_irq, cpu).name);
++ per_cpu(xen_debug_irq, cpu).name = NULL;
+ if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
+ per_cpu(xen_debug_irq, cpu).irq = -1;
+- kfree(per_cpu(xen_debug_irq, cpu).name);
+- per_cpu(xen_debug_irq, cpu).name = NULL;
+ }
++ kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
++ per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
+ if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
+ NULL);
+ per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
+- kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
+- per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
+ }
+ }
+
+@@ -65,6 +65,7 @@ int xen_smp_intr_init(unsigned int cpu)
+ char *resched_name, *callfunc_name, *debug_name;
+
+ resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
++ per_cpu(xen_resched_irq, cpu).name = resched_name;
+ rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
+ cpu,
+ xen_reschedule_interrupt,
+@@ -74,9 +75,9 @@ int xen_smp_intr_init(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_resched_irq, cpu).irq = rc;
+- per_cpu(xen_resched_irq, cpu).name = resched_name;
+
+ callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
++ per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
+ cpu,
+ xen_call_function_interrupt,
+@@ -86,10 +87,10 @@ int xen_smp_intr_init(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_callfunc_irq, cpu).irq = rc;
+- per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
+
+ if (!xen_fifo_events) {
+ debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
++ per_cpu(xen_debug_irq, cpu).name = debug_name;
+ rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
+ xen_debug_interrupt,
+ IRQF_PERCPU | IRQF_NOBALANCING,
+@@ -97,10 +98,10 @@ int xen_smp_intr_init(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_debug_irq, cpu).irq = rc;
+- per_cpu(xen_debug_irq, cpu).name = debug_name;
+ }
+
+ callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
++ per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
+ cpu,
+ xen_call_function_single_interrupt,
+@@ -110,7 +111,6 @@ int xen_smp_intr_init(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
+- per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
+
+ return 0;
+
+diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
+index 480be82e9b7be..6175f2c5c8224 100644
+--- a/arch/x86/xen/smp_pv.c
++++ b/arch/x86/xen/smp_pv.c
+@@ -97,18 +97,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
+
+ void xen_smp_intr_free_pv(unsigned int cpu)
+ {
++ kfree(per_cpu(xen_irq_work, cpu).name);
++ per_cpu(xen_irq_work, cpu).name = NULL;
+ if (per_cpu(xen_irq_work, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
+ per_cpu(xen_irq_work, cpu).irq = -1;
+- kfree(per_cpu(xen_irq_work, cpu).name);
+- per_cpu(xen_irq_work, cpu).name = NULL;
+ }
+
++ kfree(per_cpu(xen_pmu_irq, cpu).name);
++ per_cpu(xen_pmu_irq, cpu).name = NULL;
+ if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
+ per_cpu(xen_pmu_irq, cpu).irq = -1;
+- kfree(per_cpu(xen_pmu_irq, cpu).name);
+- per_cpu(xen_pmu_irq, cpu).name = NULL;
+ }
+ }
+
+@@ -118,6 +118,7 @@ int xen_smp_intr_init_pv(unsigned int cpu)
+ char *callfunc_name, *pmu_name;
+
+ callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
++ per_cpu(xen_irq_work, cpu).name = callfunc_name;
+ rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
+ cpu,
+ xen_irq_work_interrupt,
+@@ -127,10 +128,10 @@ int xen_smp_intr_init_pv(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_irq_work, cpu).irq = rc;
+- per_cpu(xen_irq_work, cpu).name = callfunc_name;
+
+ if (is_xen_pmu) {
+ pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
++ per_cpu(xen_pmu_irq, cpu).name = pmu_name;
+ rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
+ xen_pmu_irq_handler,
+ IRQF_PERCPU|IRQF_NOBALANCING,
+@@ -138,7 +139,6 @@ int xen_smp_intr_init_pv(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_pmu_irq, cpu).irq = rc;
+- per_cpu(xen_pmu_irq, cpu).name = pmu_name;
+ }
+
+ return 0;
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index 043c73dfd2c98..5c6fc16e4b925 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu)
+ cpu, per_cpu(lock_kicker_irq, cpu));
+
+ name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
++ per_cpu(irq_name, cpu) = name;
+ irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
+ cpu,
+ dummy_handler,
+@@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu)
+ if (irq >= 0) {
+ disable_irq(irq); /* make sure it's never delivered */
+ per_cpu(lock_kicker_irq, cpu) = irq;
+- per_cpu(irq_name, cpu) = name;
+ }
+
+ printk("cpu %d spinlock event irq %d\n", cpu, irq);
+@@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu)
+ if (!xen_pvspin)
+ return;
+
++ kfree(per_cpu(irq_name, cpu));
++ per_cpu(irq_name, cpu) = NULL;
+ /*
+ * When booting the kernel with 'mitigations=auto,nosmt', the secondary
+ * CPUs are not activated, and lock_kicker_irq is not initialized.
+@@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu)
+
+ unbind_from_irqhandler(irq, NULL);
+ per_cpu(lock_kicker_irq, cpu) = -1;
+- kfree(per_cpu(irq_name, cpu));
+- per_cpu(irq_name, cpu) = NULL;
+ }
+
+ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 7ea427817f7f5..7b894df32e320 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq);
+
+ void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
+ {
++ struct bfq_queue *old_bfqq = bic->bfqq[is_sync];
++
++ /* Clear bic pointer if bfqq is detached from this bic */
++ if (old_bfqq && old_bfqq->bic == bic)
++ old_bfqq->bic = NULL;
++
+ /*
+ * If bfqq != NULL, then a non-stable queue merge between
+ * bic->bfqq and bfqq is happening here. This causes troubles
+@@ -5377,9 +5383,8 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
+- bfqq->bic = NULL;
+- bfq_exit_bfqq(bfqd, bfqq);
+ bic_set_bfqq(bic, NULL, is_sync);
++ bfq_exit_bfqq(bfqd, bfqq);
+ spin_unlock_irqrestore(&bfqd->lock, flags);
+ }
+ }
+@@ -6784,6 +6789,12 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
+ true, is_sync,
+ NULL);
++ if (unlikely(bfqq == &bfqd->oom_bfqq))
++ bfqq_already_existing = true;
++ } else
++ bfqq_already_existing = true;
++
++ if (!bfqq_already_existing) {
+ bfqq->waker_bfqq = old_bfqq->waker_bfqq;
+ bfqq->tentative_waker_bfqq = NULL;
+
+@@ -6797,8 +6808,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
+ if (bfqq->waker_bfqq)
+ hlist_add_head(&bfqq->woken_list_node,
+ &bfqq->waker_bfqq->woken_list);
+- } else
+- bfqq_already_existing = true;
++ }
+ }
+ }
+
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index ed761c62ad0a7..fcf9cf49f5de1 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -33,6 +33,7 @@
+ #include "blk-cgroup.h"
+ #include "blk-ioprio.h"
+ #include "blk-throttle.h"
++#include "blk-rq-qos.h"
+
+ /*
+ * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
+@@ -1275,6 +1276,7 @@ err_unlock:
+ void blkcg_exit_disk(struct gendisk *disk)
+ {
+ blkg_destroy_all(disk);
++ rq_qos_exit(disk->queue);
+ blk_throtl_exit(disk);
+ }
+
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index 93997d297d427..4515288fbe351 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -185,7 +185,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
+ {
+ struct request_queue *q = hctx->queue;
+ struct blk_mq_ctx *ctx;
+- int i, ret;
++ int i, j, ret;
+
+ if (!hctx->nr_ctx)
+ return 0;
+@@ -197,9 +197,16 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
+ hctx_for_each_ctx(hctx, ctx, i) {
+ ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
+ if (ret)
+- break;
++ goto out;
+ }
+
++ return 0;
++out:
++ hctx_for_each_ctx(hctx, ctx, j) {
++ if (j < i)
++ kobject_del(&ctx->kobj);
++ }
++ kobject_del(&hctx->kobj);
+ return ret;
+ }
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 228a6696d8351..0b855e033a834 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1529,7 +1529,13 @@ static void blk_mq_rq_timed_out(struct request *req)
+ blk_add_timer(req);
+ }
+
+-static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
++struct blk_expired_data {
++ bool has_timedout_rq;
++ unsigned long next;
++ unsigned long timeout_start;
++};
++
++static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
+ {
+ unsigned long deadline;
+
+@@ -1539,13 +1545,13 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
+ return false;
+
+ deadline = READ_ONCE(rq->deadline);
+- if (time_after_eq(jiffies, deadline))
++ if (time_after_eq(expired->timeout_start, deadline))
+ return true;
+
+- if (*next == 0)
+- *next = deadline;
+- else if (time_after(*next, deadline))
+- *next = deadline;
++ if (expired->next == 0)
++ expired->next = deadline;
++ else if (time_after(expired->next, deadline))
++ expired->next = deadline;
+ return false;
+ }
+
+@@ -1561,7 +1567,7 @@ void blk_mq_put_rq_ref(struct request *rq)
+
+ static bool blk_mq_check_expired(struct request *rq, void *priv)
+ {
+- unsigned long *next = priv;
++ struct blk_expired_data *expired = priv;
+
+ /*
+ * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
+@@ -1570,7 +1576,18 @@ static bool blk_mq_check_expired(struct request *rq, void *priv)
+ * it was completed and reallocated as a new request after returning
+ * from blk_mq_check_expired().
+ */
+- if (blk_mq_req_expired(rq, next))
++ if (blk_mq_req_expired(rq, expired)) {
++ expired->has_timedout_rq = true;
++ return false;
++ }
++ return true;
++}
++
++static bool blk_mq_handle_expired(struct request *rq, void *priv)
++{
++ struct blk_expired_data *expired = priv;
++
++ if (blk_mq_req_expired(rq, expired))
+ blk_mq_rq_timed_out(rq);
+ return true;
+ }
+@@ -1579,7 +1596,9 @@ static void blk_mq_timeout_work(struct work_struct *work)
+ {
+ struct request_queue *q =
+ container_of(work, struct request_queue, timeout_work);
+- unsigned long next = 0;
++ struct blk_expired_data expired = {
++ .timeout_start = jiffies,
++ };
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long i;
+
+@@ -1599,10 +1618,23 @@ static void blk_mq_timeout_work(struct work_struct *work)
+ if (!percpu_ref_tryget(&q->q_usage_counter))
+ return;
+
+- blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
++ /* check if there is any timed-out request */
++ blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
++ if (expired.has_timedout_rq) {
++ /*
++ * Before walking tags, we must ensure any submit started
++ * before the current time has finished. Since the submit
++ * uses srcu or rcu, wait for a synchronization point to
++ * ensure all running submits have finished
++ */
++ blk_mq_wait_quiesce_done(q);
++
++ expired.next = 0;
++ blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
++ }
+
+- if (next != 0) {
+- mod_timer(&q->timeout, next);
++ if (expired.next != 0) {
++ mod_timer(&q->timeout, expired.next);
+ } else {
+ /*
+ * Request timeouts are handled as a forward rolling timer. If
+diff --git a/block/blk.h b/block/blk.h
+index a186ea20f39d8..8b75a95b28d60 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -436,7 +436,7 @@ static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
+ }
+ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
+
+-int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
++int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner);
+
+ int disk_alloc_events(struct gendisk *disk);
+ void disk_add_events(struct gendisk *disk);
+diff --git a/block/genhd.c b/block/genhd.c
+index 0f9769db2de83..c4765681a8b4b 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -356,7 +356,7 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
+ }
+ EXPORT_SYMBOL_GPL(disk_uevent);
+
+-int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
++int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner)
+ {
+ struct block_device *bdev;
+
+@@ -366,6 +366,9 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
+ return -EINVAL;
+ if (disk->open_partitions)
+ return -EBUSY;
++ /* Someone else has bdev exclusively open? */
++ if (disk->part0->bd_holder && disk->part0->bd_holder != owner)
++ return -EBUSY;
+
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
+ bdev = blkdev_get_by_dev(disk_devt(disk), mode, NULL);
+@@ -500,7 +503,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+
+ bdev_add(disk->part0, ddev->devt);
+ if (get_capacity(disk))
+- disk_scan_partitions(disk, FMODE_READ);
++ disk_scan_partitions(disk, FMODE_READ, NULL);
+
+ /*
+ * Announce the disk and partitions after all partitions are
+@@ -530,6 +533,7 @@ out_unregister_queue:
+ rq_qos_exit(disk->queue);
+ out_put_slave_dir:
+ kobject_put(disk->slave_dir);
++ disk->slave_dir = NULL;
+ out_put_holder_dir:
+ kobject_put(disk->part0->bd_holder_dir);
+ out_del_integrity:
+@@ -629,6 +633,7 @@ void del_gendisk(struct gendisk *disk)
+
+ kobject_put(disk->part0->bd_holder_dir);
+ kobject_put(disk->slave_dir);
++ disk->slave_dir = NULL;
+
+ part_stat_set_all(disk->part0, 0);
+ disk->part0->bd_stamp = 0;
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 60121e89052bc..96617512982e5 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -467,9 +467,10 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
+ * user space. Note the separate arg/argp parameters that are needed
+ * to deal with the compat_ptr() conversion.
+ */
+-static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
+- unsigned cmd, unsigned long arg, void __user *argp)
++static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd,
++ unsigned long arg, void __user *argp)
+ {
++ struct block_device *bdev = I_BDEV(file->f_mapping->host);
+ unsigned int max_sectors;
+
+ switch (cmd) {
+@@ -527,7 +528,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
+ return -EACCES;
+ if (bdev_is_partition(bdev))
+ return -EINVAL;
+- return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL);
++ return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL,
++ file);
+ case BLKTRACESTART:
+ case BLKTRACESTOP:
+ case BLKTRACETEARDOWN:
+@@ -605,7 +607,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ break;
+ }
+
+- ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
++ ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+@@ -674,7 +676,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ break;
+ }
+
+- ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
++ ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
+ if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
+ ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
+
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 668095eca0faf..ca3a40fc7da91 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -68,11 +68,12 @@ struct aead_instance_ctx {
+
+ struct cryptd_skcipher_ctx {
+ refcount_t refcnt;
+- struct crypto_sync_skcipher *child;
++ struct crypto_skcipher *child;
+ };
+
+ struct cryptd_skcipher_request_ctx {
+ crypto_completion_t complete;
++ struct skcipher_request req;
+ };
+
+ struct cryptd_hash_ctx {
+@@ -227,13 +228,13 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
+ const u8 *key, unsigned int keylen)
+ {
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
+- struct crypto_sync_skcipher *child = ctx->child;
++ struct crypto_skcipher *child = ctx->child;
+
+- crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+- crypto_sync_skcipher_set_flags(child,
+- crypto_skcipher_get_flags(parent) &
+- CRYPTO_TFM_REQ_MASK);
+- return crypto_sync_skcipher_setkey(child, key, keylen);
++ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
++ crypto_skcipher_set_flags(child,
++ crypto_skcipher_get_flags(parent) &
++ CRYPTO_TFM_REQ_MASK);
++ return crypto_skcipher_setkey(child, key, keylen);
+ }
+
+ static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
+@@ -258,13 +259,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+- struct crypto_sync_skcipher *child = ctx->child;
+- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
++ struct skcipher_request *subreq = &rctx->req;
++ struct crypto_skcipher *child = ctx->child;
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+- skcipher_request_set_sync_tfm(subreq, child);
++ skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+@@ -286,13 +287,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+- struct crypto_sync_skcipher *child = ctx->child;
+- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
++ struct skcipher_request *subreq = &rctx->req;
++ struct crypto_skcipher *child = ctx->child;
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+- skcipher_request_set_sync_tfm(subreq, child);
++ skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+@@ -343,9 +344,10 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+- ctx->child = (struct crypto_sync_skcipher *)cipher;
++ ctx->child = cipher;
+ crypto_skcipher_set_reqsize(
+- tfm, sizeof(struct cryptd_skcipher_request_ctx));
++ tfm, sizeof(struct cryptd_skcipher_request_ctx) +
++ crypto_skcipher_reqsize(cipher));
+ return 0;
+ }
+
+@@ -353,7 +355,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
+ {
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+- crypto_free_sync_skcipher(ctx->child);
++ crypto_free_skcipher(ctx->child);
+ }
+
+ static void cryptd_skcipher_free(struct skcipher_instance *inst)
+@@ -931,7 +933,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
+ {
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+
+- return &ctx->child->base;
++ return ctx->child;
+ }
+ EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
+
+diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
+index a82679b576bb4..b23235d58a122 100644
+--- a/crypto/tcrypt.c
++++ b/crypto/tcrypt.c
+@@ -1090,15 +1090,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
+ goto out_free_tfm;
+ }
+
+-
+- for (i = 0; i < num_mb; ++i)
+- if (testmgr_alloc_buf(data[i].xbuf)) {
+- while (i--)
+- testmgr_free_buf(data[i].xbuf);
+- goto out_free_tfm;
+- }
+-
+-
+ for (i = 0; i < num_mb; ++i) {
+ data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+@@ -1471,387 +1462,387 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
+ }
+
+ for (i = 1; i < 200; i++)
+- ret += do_test(NULL, 0, 0, i, num_mb);
++ ret = min(ret, do_test(NULL, 0, 0, i, num_mb));
+ break;
+
+ case 1:
+- ret += tcrypt_test("md5");
++ ret = min(ret, tcrypt_test("md5"));
+ break;
+
+ case 2:
+- ret += tcrypt_test("sha1");
++ ret = min(ret, tcrypt_test("sha1"));
+ break;
+
+ case 3:
+- ret += tcrypt_test("ecb(des)");
+- ret += tcrypt_test("cbc(des)");
+- ret += tcrypt_test("ctr(des)");
++ ret = min(ret, tcrypt_test("ecb(des)"));
++ ret = min(ret, tcrypt_test("cbc(des)"));
++ ret = min(ret, tcrypt_test("ctr(des)"));
+ break;
+
+ case 4:
+- ret += tcrypt_test("ecb(des3_ede)");
+- ret += tcrypt_test("cbc(des3_ede)");
+- ret += tcrypt_test("ctr(des3_ede)");
++ ret = min(ret, tcrypt_test("ecb(des3_ede)"));
++ ret = min(ret, tcrypt_test("cbc(des3_ede)"));
++ ret = min(ret, tcrypt_test("ctr(des3_ede)"));
+ break;
+
+ case 5:
+- ret += tcrypt_test("md4");
++ ret = min(ret, tcrypt_test("md4"));
+ break;
+
+ case 6:
+- ret += tcrypt_test("sha256");
++ ret = min(ret, tcrypt_test("sha256"));
+ break;
+
+ case 7:
+- ret += tcrypt_test("ecb(blowfish)");
+- ret += tcrypt_test("cbc(blowfish)");
+- ret += tcrypt_test("ctr(blowfish)");
++ ret = min(ret, tcrypt_test("ecb(blowfish)"));
++ ret = min(ret, tcrypt_test("cbc(blowfish)"));
++ ret = min(ret, tcrypt_test("ctr(blowfish)"));
+ break;
+
+ case 8:
+- ret += tcrypt_test("ecb(twofish)");
+- ret += tcrypt_test("cbc(twofish)");
+- ret += tcrypt_test("ctr(twofish)");
+- ret += tcrypt_test("lrw(twofish)");
+- ret += tcrypt_test("xts(twofish)");
++ ret = min(ret, tcrypt_test("ecb(twofish)"));
++ ret = min(ret, tcrypt_test("cbc(twofish)"));
++ ret = min(ret, tcrypt_test("ctr(twofish)"));
++ ret = min(ret, tcrypt_test("lrw(twofish)"));
++ ret = min(ret, tcrypt_test("xts(twofish)"));
+ break;
+
+ case 9:
+- ret += tcrypt_test("ecb(serpent)");
+- ret += tcrypt_test("cbc(serpent)");
+- ret += tcrypt_test("ctr(serpent)");
+- ret += tcrypt_test("lrw(serpent)");
+- ret += tcrypt_test("xts(serpent)");
++ ret = min(ret, tcrypt_test("ecb(serpent)"));
++ ret = min(ret, tcrypt_test("cbc(serpent)"));
++ ret = min(ret, tcrypt_test("ctr(serpent)"));
++ ret = min(ret, tcrypt_test("lrw(serpent)"));
++ ret = min(ret, tcrypt_test("xts(serpent)"));
+ break;
+
+ case 10:
+- ret += tcrypt_test("ecb(aes)");
+- ret += tcrypt_test("cbc(aes)");
+- ret += tcrypt_test("lrw(aes)");
+- ret += tcrypt_test("xts(aes)");
+- ret += tcrypt_test("ctr(aes)");
+- ret += tcrypt_test("rfc3686(ctr(aes))");
+- ret += tcrypt_test("ofb(aes)");
+- ret += tcrypt_test("cfb(aes)");
+- ret += tcrypt_test("xctr(aes)");
++ ret = min(ret, tcrypt_test("ecb(aes)"));
++ ret = min(ret, tcrypt_test("cbc(aes)"));
++ ret = min(ret, tcrypt_test("lrw(aes)"));
++ ret = min(ret, tcrypt_test("xts(aes)"));
++ ret = min(ret, tcrypt_test("ctr(aes)"));
++ ret = min(ret, tcrypt_test("rfc3686(ctr(aes))"));
++ ret = min(ret, tcrypt_test("ofb(aes)"));
++ ret = min(ret, tcrypt_test("cfb(aes)"));
++ ret = min(ret, tcrypt_test("xctr(aes)"));
+ break;
+
+ case 11:
+- ret += tcrypt_test("sha384");
++ ret = min(ret, tcrypt_test("sha384"));
+ break;
+
+ case 12:
+- ret += tcrypt_test("sha512");
++ ret = min(ret, tcrypt_test("sha512"));
+ break;
+
+ case 13:
+- ret += tcrypt_test("deflate");
++ ret = min(ret, tcrypt_test("deflate"));
+ break;
+
+ case 14:
+- ret += tcrypt_test("ecb(cast5)");
+- ret += tcrypt_test("cbc(cast5)");
+- ret += tcrypt_test("ctr(cast5)");
++ ret = min(ret, tcrypt_test("ecb(cast5)"));
++ ret = min(ret, tcrypt_test("cbc(cast5)"));
++ ret = min(ret, tcrypt_test("ctr(cast5)"));
+ break;
+
+ case 15:
+- ret += tcrypt_test("ecb(cast6)");
+- ret += tcrypt_test("cbc(cast6)");
+- ret += tcrypt_test("ctr(cast6)");
+- ret += tcrypt_test("lrw(cast6)");
+- ret += tcrypt_test("xts(cast6)");
++ ret = min(ret, tcrypt_test("ecb(cast6)"));
++ ret = min(ret, tcrypt_test("cbc(cast6)"));
++ ret = min(ret, tcrypt_test("ctr(cast6)"));
++ ret = min(ret, tcrypt_test("lrw(cast6)"));
++ ret = min(ret, tcrypt_test("xts(cast6)"));
+ break;
+
+ case 16:
+- ret += tcrypt_test("ecb(arc4)");
++ ret = min(ret, tcrypt_test("ecb(arc4)"));
+ break;
+
+ case 17:
+- ret += tcrypt_test("michael_mic");
++ ret = min(ret, tcrypt_test("michael_mic"));
+ break;
+
+ case 18:
+- ret += tcrypt_test("crc32c");
++ ret = min(ret, tcrypt_test("crc32c"));
+ break;
+
+ case 19:
+- ret += tcrypt_test("ecb(tea)");
++ ret = min(ret, tcrypt_test("ecb(tea)"));
+ break;
+
+ case 20:
+- ret += tcrypt_test("ecb(xtea)");
++ ret = min(ret, tcrypt_test("ecb(xtea)"));
+ break;
+
+ case 21:
+- ret += tcrypt_test("ecb(khazad)");
++ ret = min(ret, tcrypt_test("ecb(khazad)"));
+ break;
+
+ case 22:
+- ret += tcrypt_test("wp512");
++ ret = min(ret, tcrypt_test("wp512"));
+ break;
+
+ case 23:
+- ret += tcrypt_test("wp384");
++ ret = min(ret, tcrypt_test("wp384"));
+ break;
+
+ case 24:
+- ret += tcrypt_test("wp256");
++ ret = min(ret, tcrypt_test("wp256"));
+ break;
+
+ case 26:
+- ret += tcrypt_test("ecb(anubis)");
+- ret += tcrypt_test("cbc(anubis)");
++ ret = min(ret, tcrypt_test("ecb(anubis)"));
++ ret = min(ret, tcrypt_test("cbc(anubis)"));
+ break;
+
+ case 30:
+- ret += tcrypt_test("ecb(xeta)");
++ ret = min(ret, tcrypt_test("ecb(xeta)"));
+ break;
+
+ case 31:
+- ret += tcrypt_test("pcbc(fcrypt)");
++ ret = min(ret, tcrypt_test("pcbc(fcrypt)"));
+ break;
+
+ case 32:
+- ret += tcrypt_test("ecb(camellia)");
+- ret += tcrypt_test("cbc(camellia)");
+- ret += tcrypt_test("ctr(camellia)");
+- ret += tcrypt_test("lrw(camellia)");
+- ret += tcrypt_test("xts(camellia)");
++ ret = min(ret, tcrypt_test("ecb(camellia)"));
++ ret = min(ret, tcrypt_test("cbc(camellia)"));
++ ret = min(ret, tcrypt_test("ctr(camellia)"));
++ ret = min(ret, tcrypt_test("lrw(camellia)"));
++ ret = min(ret, tcrypt_test("xts(camellia)"));
+ break;
+
+ case 33:
+- ret += tcrypt_test("sha224");
++ ret = min(ret, tcrypt_test("sha224"));
+ break;
+
+ case 35:
+- ret += tcrypt_test("gcm(aes)");
++ ret = min(ret, tcrypt_test("gcm(aes)"));
+ break;
+
+ case 36:
+- ret += tcrypt_test("lzo");
++ ret = min(ret, tcrypt_test("lzo"));
+ break;
+
+ case 37:
+- ret += tcrypt_test("ccm(aes)");
++ ret = min(ret, tcrypt_test("ccm(aes)"));
+ break;
+
+ case 38:
+- ret += tcrypt_test("cts(cbc(aes))");
++ ret = min(ret, tcrypt_test("cts(cbc(aes))"));
+ break;
+
+ case 39:
+- ret += tcrypt_test("xxhash64");
++ ret = min(ret, tcrypt_test("xxhash64"));
+ break;
+
+ case 40:
+- ret += tcrypt_test("rmd160");
++ ret = min(ret, tcrypt_test("rmd160"));
+ break;
+
+ case 42:
+- ret += tcrypt_test("blake2b-512");
++ ret = min(ret, tcrypt_test("blake2b-512"));
+ break;
+
+ case 43:
+- ret += tcrypt_test("ecb(seed)");
++ ret = min(ret, tcrypt_test("ecb(seed)"));
+ break;
+
+ case 45:
+- ret += tcrypt_test("rfc4309(ccm(aes))");
++ ret = min(ret, tcrypt_test("rfc4309(ccm(aes))"));
+ break;
+
+ case 46:
+- ret += tcrypt_test("ghash");
++ ret = min(ret, tcrypt_test("ghash"));
+ break;
+
+ case 47:
+- ret += tcrypt_test("crct10dif");
++ ret = min(ret, tcrypt_test("crct10dif"));
+ break;
+
+ case 48:
+- ret += tcrypt_test("sha3-224");
++ ret = min(ret, tcrypt_test("sha3-224"));
+ break;
+
+ case 49:
+- ret += tcrypt_test("sha3-256");
++ ret = min(ret, tcrypt_test("sha3-256"));
+ break;
+
+ case 50:
+- ret += tcrypt_test("sha3-384");
++ ret = min(ret, tcrypt_test("sha3-384"));
+ break;
+
+ case 51:
+- ret += tcrypt_test("sha3-512");
++ ret = min(ret, tcrypt_test("sha3-512"));
+ break;
+
+ case 52:
+- ret += tcrypt_test("sm3");
++ ret = min(ret, tcrypt_test("sm3"));
+ break;
+
+ case 53:
+- ret += tcrypt_test("streebog256");
++ ret = min(ret, tcrypt_test("streebog256"));
+ break;
+
+ case 54:
+- ret += tcrypt_test("streebog512");
++ ret = min(ret, tcrypt_test("streebog512"));
+ break;
+
+ case 55:
+- ret += tcrypt_test("gcm(sm4)");
++ ret = min(ret, tcrypt_test("gcm(sm4)"));
+ break;
+
+ case 56:
+- ret += tcrypt_test("ccm(sm4)");
++ ret = min(ret, tcrypt_test("ccm(sm4)"));
+ break;
+
+ case 57:
+- ret += tcrypt_test("polyval");
++ ret = min(ret, tcrypt_test("polyval"));
+ break;
+
+ case 58:
+- ret += tcrypt_test("gcm(aria)");
++ ret = min(ret, tcrypt_test("gcm(aria)"));
+ break;
+
+ case 100:
+- ret += tcrypt_test("hmac(md5)");
++ ret = min(ret, tcrypt_test("hmac(md5)"));
+ break;
+
+ case 101:
+- ret += tcrypt_test("hmac(sha1)");
++ ret = min(ret, tcrypt_test("hmac(sha1)"));
+ break;
+
+ case 102:
+- ret += tcrypt_test("hmac(sha256)");
++ ret = min(ret, tcrypt_test("hmac(sha256)"));
+ break;
+
+ case 103:
+- ret += tcrypt_test("hmac(sha384)");
++ ret = min(ret, tcrypt_test("hmac(sha384)"));
+ break;
+
+ case 104:
+- ret += tcrypt_test("hmac(sha512)");
++ ret = min(ret, tcrypt_test("hmac(sha512)"));
+ break;
+
+ case 105:
+- ret += tcrypt_test("hmac(sha224)");
++ ret = min(ret, tcrypt_test("hmac(sha224)"));
+ break;
+
+ case 106:
+- ret += tcrypt_test("xcbc(aes)");
++ ret = min(ret, tcrypt_test("xcbc(aes)"));
+ break;
+
+ case 108:
+- ret += tcrypt_test("hmac(rmd160)");
++ ret = min(ret, tcrypt_test("hmac(rmd160)"));
+ break;
+
+ case 109:
+- ret += tcrypt_test("vmac64(aes)");
++ ret = min(ret, tcrypt_test("vmac64(aes)"));
+ break;
+
+ case 111:
+- ret += tcrypt_test("hmac(sha3-224)");
++ ret = min(ret, tcrypt_test("hmac(sha3-224)"));
+ break;
+
+ case 112:
+- ret += tcrypt_test("hmac(sha3-256)");
++ ret = min(ret, tcrypt_test("hmac(sha3-256)"));
+ break;
+
+ case 113:
+- ret += tcrypt_test("hmac(sha3-384)");
++ ret = min(ret, tcrypt_test("hmac(sha3-384)"));
+ break;
+
+ case 114:
+- ret += tcrypt_test("hmac(sha3-512)");
++ ret = min(ret, tcrypt_test("hmac(sha3-512)"));
+ break;
+
+ case 115:
+- ret += tcrypt_test("hmac(streebog256)");
++ ret = min(ret, tcrypt_test("hmac(streebog256)"));
+ break;
+
+ case 116:
+- ret += tcrypt_test("hmac(streebog512)");
++ ret = min(ret, tcrypt_test("hmac(streebog512)"));
+ break;
+
+ case 150:
+- ret += tcrypt_test("ansi_cprng");
++ ret = min(ret, tcrypt_test("ansi_cprng"));
+ break;
+
+ case 151:
+- ret += tcrypt_test("rfc4106(gcm(aes))");
++ ret = min(ret, tcrypt_test("rfc4106(gcm(aes))"));
+ break;
+
+ case 152:
+- ret += tcrypt_test("rfc4543(gcm(aes))");
++ ret = min(ret, tcrypt_test("rfc4543(gcm(aes))"));
+ break;
+
+ case 153:
+- ret += tcrypt_test("cmac(aes)");
++ ret = min(ret, tcrypt_test("cmac(aes)"));
+ break;
+
+ case 154:
+- ret += tcrypt_test("cmac(des3_ede)");
++ ret = min(ret, tcrypt_test("cmac(des3_ede)"));
+ break;
+
+ case 155:
+- ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(aes))"));
+ break;
+
+ case 156:
+- ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))");
++ ret = min(ret, tcrypt_test("authenc(hmac(md5),ecb(cipher_null))"));
+ break;
+
+ case 157:
+- ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))"));
+ break;
+
+ case 158:
+- ret += tcrypt_test("cbcmac(sm4)");
++ ret = min(ret, tcrypt_test("cbcmac(sm4)"));
+ break;
+
+ case 159:
+- ret += tcrypt_test("cmac(sm4)");
++ ret = min(ret, tcrypt_test("cmac(sm4)"));
+ break;
+
+ case 181:
+- ret += tcrypt_test("authenc(hmac(sha1),cbc(des))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des))"));
+ break;
+ case 182:
+- ret += tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))"));
+ break;
+ case 183:
+- ret += tcrypt_test("authenc(hmac(sha224),cbc(des))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des))"));
+ break;
+ case 184:
+- ret += tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))"));
+ break;
+ case 185:
+- ret += tcrypt_test("authenc(hmac(sha256),cbc(des))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des))"));
+ break;
+ case 186:
+- ret += tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))"));
+ break;
+ case 187:
+- ret += tcrypt_test("authenc(hmac(sha384),cbc(des))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des))"));
+ break;
+ case 188:
+- ret += tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))"));
+ break;
+ case 189:
+- ret += tcrypt_test("authenc(hmac(sha512),cbc(des))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des))"));
+ break;
+ case 190:
+- ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
++ ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"));
+ break;
+ case 191:
+- ret += tcrypt_test("ecb(sm4)");
+- ret += tcrypt_test("cbc(sm4)");
+- ret += tcrypt_test("cfb(sm4)");
+- ret += tcrypt_test("ctr(sm4)");
++ ret = min(ret, tcrypt_test("ecb(sm4)"));
++ ret = min(ret, tcrypt_test("cbc(sm4)"));
++ ret = min(ret, tcrypt_test("cfb(sm4)"));
++ ret = min(ret, tcrypt_test("ctr(sm4)"));
+ break;
+ case 192:
+- ret += tcrypt_test("ecb(aria)");
+- ret += tcrypt_test("cbc(aria)");
+- ret += tcrypt_test("cfb(aria)");
+- ret += tcrypt_test("ctr(aria)");
++ ret = min(ret, tcrypt_test("ecb(aria)"));
++ ret = min(ret, tcrypt_test("cbc(aria)"));
++ ret = min(ret, tcrypt_test("cfb(aria)"));
++ ret = min(ret, tcrypt_test("ctr(aria)"));
+ break;
+ case 200:
+ test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
+index ae2e768830bfc..9332bc688713c 100644
+--- a/drivers/acpi/acpica/dsmethod.c
++++ b/drivers/acpi/acpica/dsmethod.c
+@@ -517,7 +517,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ status = AE_NO_MEMORY;
+- goto cleanup;
++ goto pop_walk_state;
+ }
+
+ info->parameters = &this_walk_state->operands[0];
+@@ -529,7 +529,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
+
+ ACPI_FREE(info);
+ if (ACPI_FAILURE(status)) {
+- goto cleanup;
++ goto pop_walk_state;
+ }
+
+ next_walk_state->method_nesting_depth =
+@@ -575,6 +575,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
+
+ return_ACPI_STATUS(status);
+
++pop_walk_state:
++
++ /* On error, pop the walk state to be deleted from thread */
++
++ acpi_ds_pop_walk_state(thread);
++
+ cleanup:
+
+ /* On error, we must terminate the method properly */
+diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
+index 400b9e15a709c..63c17f420fb86 100644
+--- a/drivers/acpi/acpica/utcopy.c
++++ b/drivers/acpi/acpica/utcopy.c
+@@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
+ status = acpi_ut_walk_package_tree(source_obj, dest_obj,
+ acpi_ut_copy_ielement_to_ielement,
+ walk_state);
+- if (ACPI_FAILURE(status)) {
+-
+- /* On failure, delete the destination package object */
+-
+- acpi_ut_remove_reference(dest_obj);
+- }
+-
+ return_ACPI_STATUS(status);
+ }
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 9b42628cf21b3..9751b84c1b221 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1875,6 +1875,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),
+ },
+ },
++ {
++ /*
++ * HP Pavilion Gaming Laptop 15-cx0041ur
++ */
++ .callback = ec_honor_dsdt_gpe,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"),
++ },
++ },
+ {
+ /*
+ * Samsung hardware
+diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
+index 1cc4647f78b86..c2c786eb95abc 100644
+--- a/drivers/acpi/irq.c
++++ b/drivers/acpi/irq.c
+@@ -94,6 +94,7 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
+ /**
+ * acpi_get_irq_source_fwhandle() - Retrieve fwhandle from IRQ resource source.
+ * @source: acpi_resource_source to use for the lookup.
++ * @gsi: GSI IRQ number
+ *
+ * Description:
+ * Retrieve the fwhandle of the device referenced by the given IRQ resource
+@@ -297,8 +298,8 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
+ /**
+ * acpi_set_irq_model - Setup the GSI irqdomain information
+ * @model: the value assigned to acpi_irq_model
+- * @fwnode: the irq_domain identifier for mapping and looking up
+- * GSI interrupts
++ * @fn: a dispatcher function that will return the domain fwnode
++ * for a given GSI
+ */
+ void __init acpi_set_irq_model(enum acpi_irq_model_id model,
+ struct fwnode_handle *(*fn)(u32))
+diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
+index 9abf350bd7a5a..27fb6cdad75f9 100644
+--- a/drivers/acpi/pfr_telemetry.c
++++ b/drivers/acpi/pfr_telemetry.c
+@@ -144,7 +144,7 @@ static int get_pfrt_log_data_info(struct pfrt_log_data_info *data_info,
+ ret = 0;
+
+ free_acpi_buffer:
+- kfree(out_obj);
++ ACPI_FREE(out_obj);
+
+ return ret;
+ }
+@@ -180,7 +180,7 @@ static int set_pfrt_log_level(int level, struct pfrt_log_device *pfrt_log_dev)
+ ret = -EBUSY;
+ }
+
+- kfree(out_obj);
++ ACPI_FREE(out_obj);
+
+ return ret;
+ }
+@@ -218,7 +218,7 @@ static int get_pfrt_log_level(struct pfrt_log_device *pfrt_log_dev)
+ ret = obj->integer.value;
+
+ free_acpi_buffer:
+- kfree(out_obj);
++ ACPI_FREE(out_obj);
+
+ return ret;
+ }
+diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
+index 6bb0b778b5da5..9d2bdc13253a5 100644
+--- a/drivers/acpi/pfr_update.c
++++ b/drivers/acpi/pfr_update.c
+@@ -178,7 +178,7 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr,
+ ret = 0;
+
+ free_acpi_buffer:
+- kfree(out_obj);
++ ACPI_FREE(out_obj);
+
+ return ret;
+ }
+@@ -224,7 +224,7 @@ static int query_buffer(struct pfru_com_buf_info *info,
+ ret = 0;
+
+ free_acpi_buffer:
+- kfree(out_obj);
++ ACPI_FREE(out_obj);
+
+ return ret;
+ }
+@@ -385,7 +385,7 @@ static int start_update(int action, struct pfru_device *pfru_dev)
+ ret = 0;
+
+ free_acpi_buffer:
+- kfree(out_obj);
++ ACPI_FREE(out_obj);
+
+ return ret;
+ }
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index acfabfe07c4fa..fc5b5b2c9e819 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1134,6 +1134,9 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
+ status = acpi_get_parent(handle, &pr_ahandle);
+ while (ACPI_SUCCESS(status)) {
+ d = acpi_fetch_acpi_dev(pr_ahandle);
++ if (!d)
++ break;
++
+ handle = pr_ahandle;
+
+ if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index f27914aedbd5a..16dcd31d124fe 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -432,10 +432,24 @@ static const struct dmi_system_id asus_laptop[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+ },
+ },
++ {
++ .ident = "Asus ExpertBook B2502",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
++ },
++ },
+ { }
+ };
+
+-static const struct dmi_system_id lenovo_82ra[] = {
++static const struct dmi_system_id lenovo_laptop[] = {
++ {
++ .ident = "LENOVO IdeaPad Flex 5 14ALC7",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "82R9"),
++ },
++ },
+ {
+ .ident = "LENOVO IdeaPad Flex 5 16ALC7",
+ .matches = {
+@@ -446,6 +460,17 @@ static const struct dmi_system_id lenovo_82ra[] = {
+ { }
+ };
+
++static const struct dmi_system_id schenker_gm_rg[] = {
++ {
++ .ident = "XMG CORE 15 (M22)",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
++ DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
++ },
++ },
++ { }
++};
++
+ struct irq_override_cmp {
+ const struct dmi_system_id *system;
+ unsigned char irq;
+@@ -458,8 +483,9 @@ struct irq_override_cmp {
+ static const struct irq_override_cmp override_table[] = {
+ { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
+ { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
+- { lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+- { lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
++ { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
++ { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
++ { schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ };
+
+ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index b2a6162876387..13f10fbcd7f03 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -34,6 +34,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
++#include <linux/pnp.h>
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
+ #include <acpi/video.h>
+@@ -105,6 +106,26 @@ static bool nvidia_wmi_ec_supported(void)
+ }
+ #endif
+
++static bool apple_gmux_backlight_present(void)
++{
++ struct acpi_device *adev;
++ struct device *dev;
++
++ adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
++ if (!adev)
++ return false;
++
++ dev = acpi_get_first_physical_node(adev);
++ if (!dev)
++ return false;
++
++ /*
++ * drivers/platform/x86/apple-gmux.c only supports old style
++ * Apple GMUX with an IO-resource.
++ */
++ return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
++}
++
+ /* Force to use vendor driver when the ACPI device is known to be
+ * buggy */
+ static int video_detect_force_vendor(const struct dmi_system_id *d)
+@@ -132,6 +153,10 @@ static int video_detect_force_none(const struct dmi_system_id *d)
+ }
+
+ static const struct dmi_system_id video_detect_dmi_table[] = {
++ /*
++ * Models which should use the vendor backlight interface,
++ * because of broken ACPI video backlight control.
++ */
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1128309 */
+ .callback = video_detect_force_vendor,
+@@ -197,14 +222,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"),
+ },
+ },
+- {
+- .callback = video_detect_force_vendor,
+- /* GIGABYTE GB-BXBT-2807 */
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
+- },
+- },
+ {
+ .callback = video_detect_force_vendor,
+ /* Samsung N150/N210/N220 */
+@@ -234,18 +251,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ },
+ {
+ .callback = video_detect_force_vendor,
+- /* Sony VPCEH3U1E */
++ /* Xiaomi Mi Pad 2 */
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
+ },
+ },
++
++ /*
++ * Models which should use the vendor backlight interface,
++ * because of broken native backlight control.
++ */
+ {
+ .callback = video_detect_force_vendor,
+- /* Xiaomi Mi Pad 2 */
++ /* Sony Vaio PCG-FRV35 */
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PCG-FRV35"),
+ },
+ },
+
+@@ -609,6 +631,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "N250P"),
+ },
+ },
++ {
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=202401 */
++ .callback = video_detect_force_native,
++ /* Sony Vaio VPCEH3U1E */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
++ },
++ },
++ {
++ .callback = video_detect_force_native,
++ /* Sony Vaio VPCY11S1E */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCY11S1E"),
++ },
++ },
+
+ /*
+ * These Toshibas have a broken acpi-video interface for brightness
+@@ -671,6 +710,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
+ },
+ },
++ {
++ .callback = video_detect_force_none,
++ /* GIGABYTE GB-BXBT-2807 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
++ },
++ },
+ {
+ .callback = video_detect_force_none,
+ /* MSI MS-7721 */
+@@ -729,7 +776,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ if (nvidia_wmi_ec_present)
+ return acpi_backlight_nvidia_wmi_ec;
+
+- if (apple_gmux_present())
++ if (apple_gmux_backlight_present())
+ return acpi_backlight_apple_gmux;
+
+ /* Chromebooks should always prefer native backlight control. */
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index 5350c73564b60..c7afce465a071 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -28,10 +28,6 @@ static bool sleep_no_lps0 __read_mostly;
+ module_param(sleep_no_lps0, bool, 0644);
+ MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
+
+-static bool prefer_microsoft_dsm_guid __read_mostly;
+-module_param(prefer_microsoft_dsm_guid, bool, 0644);
+-MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation");
+-
+ static const struct acpi_device_id lps0_device_ids[] = {
+ {"PNP0D80", },
+ {"", },
+@@ -369,27 +365,15 @@ out:
+ }
+
+ struct amd_lps0_hid_device_data {
+- const unsigned int rev_id;
+ const bool check_off_by_one;
+- const bool prefer_amd_guid;
+ };
+
+ static const struct amd_lps0_hid_device_data amd_picasso = {
+- .rev_id = 0,
+ .check_off_by_one = true,
+- .prefer_amd_guid = false,
+ };
+
+ static const struct amd_lps0_hid_device_data amd_cezanne = {
+- .rev_id = 0,
+- .check_off_by_one = false,
+- .prefer_amd_guid = false,
+-};
+-
+-static const struct amd_lps0_hid_device_data amd_rembrandt = {
+- .rev_id = 2,
+ .check_off_by_one = false,
+- .prefer_amd_guid = true,
+ };
+
+ static const struct acpi_device_id amd_hid_ids[] = {
+@@ -397,69 +381,27 @@ static const struct acpi_device_id amd_hid_ids[] = {
+ {"AMD0005", (kernel_ulong_t)&amd_picasso, },
+ {"AMDI0005", (kernel_ulong_t)&amd_picasso, },
+ {"AMDI0006", (kernel_ulong_t)&amd_cezanne, },
+- {"AMDI0007", (kernel_ulong_t)&amd_rembrandt, },
+ {}
+ };
+
+-static int lps0_prefer_microsoft(const struct dmi_system_id *id)
++static int lps0_prefer_amd(const struct dmi_system_id *id)
+ {
+- pr_debug("Preferring Microsoft GUID.\n");
+- prefer_microsoft_dsm_guid = true;
++ pr_debug("Using AMD GUID w/ _REV 2.\n");
++ rev_id = 2;
+ return 0;
+ }
+-
+ static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
+ {
+ /*
+- * ASUS TUF Gaming A17 FA707RE
+- * https://bugzilla.kernel.org/show_bug.cgi?id=216101
+- */
+- .callback = lps0_prefer_microsoft,
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"),
+- },
+- },
+- {
+- /* ASUS ROG Zephyrus G14 (2022) */
+- .callback = lps0_prefer_microsoft,
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"),
+- },
+- },
+- {
+- /*
+- * Lenovo Yoga Slim 7 Pro X 14ARH7
+- * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2
+- * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL
+- */
+- .callback = lps0_prefer_microsoft,
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "82"),
+- },
+- },
+- {
+- /*
+- * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE
+- * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
++ * AMD Rembrandt based HP EliteBook 835/845/865 G9
++ * Contains specialized AML in AMD/_REV 2 path to avoid
++ * triggering a bug in Qualcomm WLAN firmware. This may be
++ * removed in the future if that firmware is fixed.
+ */
+- .callback = lps0_prefer_microsoft,
++ .callback = lps0_prefer_amd,
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"),
+- },
+- },
+- {
+- /*
+- * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW
+- * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
+- */
+- .callback = lps0_prefer_microsoft,
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"),
++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++ DMI_MATCH(DMI_BOARD_NAME, "8990"),
+ },
+ },
+ {}
+@@ -484,16 +426,14 @@ static int lps0_device_attach(struct acpi_device *adev,
+ if (dev_id->id[0])
+ data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data;
+ else
+- data = &amd_rembrandt;
+- rev_id = data->rev_id;
++ data = &amd_cezanne;
+ lps0_dsm_func_mask = validate_dsm(adev->handle,
+ ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
+ if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) {
+ lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
+ acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
+ ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
+- } else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid &&
+- !prefer_microsoft_dsm_guid) {
++ } else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) {
+ lps0_dsm_func_mask_microsoft = -EINVAL;
+ acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
+ }
+@@ -501,8 +441,7 @@ static int lps0_device_attach(struct acpi_device *adev,
+ rev_id = 1;
+ lps0_dsm_func_mask = validate_dsm(adev->handle,
+ ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
+- if (!prefer_microsoft_dsm_guid)
+- lps0_dsm_func_mask_microsoft = -EINVAL;
++ lps0_dsm_func_mask_microsoft = -EINVAL;
+ }
+
+ if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index d7d3f1669d4c0..4e816bb402f68 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -308,7 +308,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
+- /* Lenovo Yoga Tablet 1050F/L */
++ /* Lenovo Yoga Tablet 2 1050F/L */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+@@ -319,6 +319,27 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
++ {
++ /* Lenovo Yoga Tab 3 Pro X90F */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
++ },
++ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++ },
++ {
++ /* Medion Lifetab S10346 */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++ /* Way too generic, also match on BIOS data */
++ DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"),
++ },
++ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++ },
+ {
+ /* Nextbook Ares 8 */
+ .matches = {
+@@ -348,6 +369,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
+ { "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
+ { "INT33F4", 0 }, /* X-Powers AXP288 PMIC */
+ { "INT33FD", 0 }, /* Intel Crystal Cove PMIC */
++ { "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */
+ { "NPCE69A", 0 }, /* Asus Transformer keyboard dock */
+ {}
+ };
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 639de2d75d636..53ab2306da009 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -84,6 +84,7 @@ enum board_ids {
+ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static void ahci_remove_one(struct pci_dev *dev);
+ static void ahci_shutdown_one(struct pci_dev *dev);
++static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
+ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+@@ -677,6 +678,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ ahci_save_initial_config(&pdev->dev, hpriv);
+ }
+
++static int ahci_pci_reset_controller(struct ata_host *host)
++{
++ struct pci_dev *pdev = to_pci_dev(host->dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ int rc;
++
++ rc = ahci_reset_controller(host);
++ if (rc)
++ return rc;
++
++ /*
++ * If platform firmware failed to enable ports, try to enable
++ * them here.
++ */
++ ahci_intel_pcs_quirk(pdev, hpriv);
++
++ return 0;
++}
++
+ static void ahci_pci_init_controller(struct ata_host *host)
+ {
+ struct ahci_host_priv *hpriv = host->private_data;
+@@ -871,7 +891,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
+ struct ata_host *host = pci_get_drvdata(pdev);
+ int rc;
+
+- rc = ahci_reset_controller(host);
++ rc = ahci_pci_reset_controller(host);
+ if (rc)
+ return rc;
+ ahci_pci_init_controller(host);
+@@ -907,7 +927,7 @@ static int ahci_pci_device_resume(struct device *dev)
+ ahci_mcp89_apple_enable(pdev);
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+- rc = ahci_reset_controller(host);
++ rc = ahci_pci_reset_controller(host);
+ if (rc)
+ return rc;
+
+@@ -1785,12 +1805,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* save initial config */
+ ahci_pci_save_initial_config(pdev, hpriv);
+
+- /*
+- * If platform firmware failed to enable ports, try to enable
+- * them here.
+- */
+- ahci_intel_pcs_quirk(pdev, hpriv);
+-
+ /* prepare host */
+ if (hpriv->cap & HOST_CAP_NCQ) {
+ pi.flags |= ATA_FLAG_NCQ;
+@@ -1900,7 +1914,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if (rc)
+ return rc;
+
+- rc = ahci_reset_controller(host);
++ rc = ahci_pci_reset_controller(host);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
+index b6806d41a8c50..fd4dccc253896 100644
+--- a/drivers/ata/libata-sata.c
++++ b/drivers/ata/libata-sata.c
+@@ -1392,7 +1392,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
+ tf->hob_lbah = buf[10];
+ tf->nsect = buf[12];
+ tf->hob_nsect = buf[13];
+- if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
++ if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id) &&
++ (tf->status & ATA_SENSE))
+ tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
+
+ return 0;
+@@ -1456,8 +1457,12 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
+ memcpy(&qc->result_tf, &tf, sizeof(tf));
+ qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+ qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
+- if (dev->class == ATA_DEV_ZAC &&
+- ((qc->result_tf.status & ATA_SENSE) || qc->result_tf.auxiliary)) {
++
++ /*
++ * If the device supports NCQ autosense, ata_eh_read_log_10h() will have
++ * stored the sense data in qc->result_tf.auxiliary.
++ */
++ if (qc->result_tf.auxiliary) {
+ char sense_key, asc, ascq;
+
+ sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
+diff --git a/drivers/base/class.c b/drivers/base/class.c
+index 64f7b9a0970f7..8ceafb7d0203b 100644
+--- a/drivers/base/class.c
++++ b/drivers/base/class.c
+@@ -192,6 +192,11 @@ int __class_register(struct class *cls, struct lock_class_key *key)
+ }
+ error = class_add_groups(class_get(cls), cls->class_groups);
+ class_put(cls);
++ if (error) {
++ kobject_del(&cp->subsys.kobj);
++ kfree_const(cp->subsys.kobj.name);
++ kfree(cp);
++ }
+ return error;
+ }
+ EXPORT_SYMBOL_GPL(__class_register);
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index b52049098d4ee..14088b5adb556 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -484,7 +484,17 @@ static int rpm_idle(struct device *dev, int rpmflags)
+
+ dev->power.idle_notification = true;
+
+- retval = __rpm_callback(callback, dev);
++ if (dev->power.irq_safe)
++ spin_unlock(&dev->power.lock);
++ else
++ spin_unlock_irq(&dev->power.lock);
++
++ retval = callback(dev);
++
++ if (dev->power.irq_safe)
++ spin_lock(&dev->power.lock);
++ else
++ spin_lock_irq(&dev->power.lock);
+
+ dev->power.idle_notification = false;
+ wake_up_all(&dev->power.wait_queue);
+diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
+index 4ef9488d05cde..3de89795f5843 100644
+--- a/drivers/base/regmap/regmap-irq.c
++++ b/drivers/base/regmap/regmap-irq.c
+@@ -722,6 +722,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ int i;
+ int ret = -ENOMEM;
+ int num_type_reg;
++ int num_regs;
+ u32 reg;
+
+ if (chip->num_regs <= 0)
+@@ -796,14 +797,20 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ goto err_alloc;
+ }
+
+- num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
+- if (num_type_reg) {
+- d->type_buf_def = kcalloc(num_type_reg,
++ /*
++ * Use num_config_regs if defined, otherwise fall back to num_type_reg
++ * to maintain backward compatibility.
++ */
++ num_type_reg = chip->num_config_regs ? chip->num_config_regs
++ : chip->num_type_reg;
++ num_regs = chip->type_in_mask ? chip->num_regs : num_type_reg;
++ if (num_regs) {
++ d->type_buf_def = kcalloc(num_regs,
+ sizeof(*d->type_buf_def), GFP_KERNEL);
+ if (!d->type_buf_def)
+ goto err_alloc;
+
+- d->type_buf = kcalloc(num_type_reg, sizeof(*d->type_buf),
++ d->type_buf = kcalloc(num_regs, sizeof(*d->type_buf),
+ GFP_KERNEL);
+ if (!d->type_buf)
+ goto err_alloc;
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 8532b839a3435..6772402326842 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -2217,7 +2217,8 @@ void drbd_destroy_device(struct kref *kref)
+ kref_put(&peer_device->connection->kref, drbd_destroy_connection);
+ kfree(peer_device);
+ }
+- memset(device, 0xfd, sizeof(*device));
++ if (device->submit.wq)
++ destroy_workqueue(device->submit.wq);
+ kfree(device);
+ kref_put(&resource->kref, drbd_destroy_resource);
+ }
+@@ -2309,7 +2310,6 @@ void drbd_destroy_resource(struct kref *kref)
+ idr_destroy(&resource->devices);
+ free_cpumask_var(resource->cpu_mask);
+ kfree(resource->name);
+- memset(resource, 0xf2, sizeof(*resource));
+ kfree(resource);
+ }
+
+@@ -2650,7 +2650,6 @@ void drbd_destroy_connection(struct kref *kref)
+ drbd_free_socket(&connection->data);
+ kfree(connection->int_dig_in);
+ kfree(connection->int_dig_vv);
+- memset(connection, 0xfc, sizeof(*connection));
+ kfree(connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
+ }
+@@ -2774,7 +2773,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
+
+ err = add_disk(disk);
+ if (err)
+- goto out_idr_remove_from_resource;
++ goto out_destroy_workqueue;
+
+ /* inherit the connection state */
+ device->state.conn = first_connection(resource)->cstate;
+@@ -2788,6 +2787,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
+ drbd_debugfs_device_add(device);
+ return NO_ERROR;
+
++out_destroy_workqueue:
++ destroy_workqueue(device->submit.wq);
+ out_idr_remove_from_resource:
+ for_each_connection_safe(connection, n, resource) {
+ peer_device = idr_remove(&connection->peer_devices, vnr);
+diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
+index 864c98e748757..249eba7d21c28 100644
+--- a/drivers/block/drbd/drbd_nl.c
++++ b/drivers/block/drbd/drbd_nl.c
+@@ -1210,6 +1210,7 @@ static void decide_on_discard_support(struct drbd_device *device,
+ struct drbd_connection *connection =
+ first_peer_device(device)->connection;
+ struct request_queue *q = device->rq_queue;
++ unsigned int max_discard_sectors;
+
+ if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
+ goto not_supported;
+@@ -1230,15 +1231,14 @@ static void decide_on_discard_support(struct drbd_device *device,
+ * topology on all peers.
+ */
+ blk_queue_discard_granularity(q, 512);
+- q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
+- q->limits.max_write_zeroes_sectors =
+- drbd_max_discard_sectors(connection);
++ max_discard_sectors = drbd_max_discard_sectors(connection);
++ blk_queue_max_discard_sectors(q, max_discard_sectors);
++ blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+ return;
+
+ not_supported:
+ blk_queue_discard_granularity(q, 0);
+- q->limits.max_discard_sectors = 0;
+- q->limits.max_write_zeroes_sectors = 0;
++ blk_queue_max_discard_sectors(q, 0);
+ }
+
+ static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index ccad3d7b3ddd9..487840e3564df 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4593,8 +4593,10 @@ static int __init do_floppy_init(void)
+ goto out_put_disk;
+
+ err = floppy_alloc_disk(drive, 0);
+- if (err)
++ if (err) {
++ blk_mq_free_tag_set(&tag_sets[drive]);
+ goto out_put_disk;
++ }
+
+ timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
+ }
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index ad92192c7d617..d12d3d171ec4c 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1773,7 +1773,16 @@ static const struct block_device_operations lo_fops = {
+ /*
+ * And now the modules code and kernel interface.
+ */
+-static int max_loop;
++
++/*
++ * If max_loop is specified, create that many devices upfront.
++ * This also becomes a hard limit. If max_loop is not specified,
++ * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
++ * init time. Loop devices can be requested on-demand with the
++ * /dev/loop-control interface, or be instantiated by accessing
++ * a 'dead' device node.
++ */
++static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
+ module_param(max_loop, int, 0444);
+ MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
+ module_param(max_part, int, 0444);
+@@ -2181,7 +2190,7 @@ MODULE_ALIAS("devname:loop-control");
+
+ static int __init loop_init(void)
+ {
+- int i, nr;
++ int i;
+ int err;
+
+ part_shift = 0;
+@@ -2209,19 +2218,6 @@ static int __init loop_init(void)
+ goto err_out;
+ }
+
+- /*
+- * If max_loop is specified, create that many devices upfront.
+- * This also becomes a hard limit. If max_loop is not specified,
+- * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
+- * init time. Loop devices can be requested on-demand with the
+- * /dev/loop-control interface, or be instantiated by accessing
+- * a 'dead' device node.
+- */
+- if (max_loop)
+- nr = max_loop;
+- else
+- nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
+-
+ err = misc_register(&loop_misc);
+ if (err < 0)
+ goto err_out;
+@@ -2233,7 +2229,7 @@ static int __init loop_init(void)
+ }
+
+ /* pre-create number of devices given by config or max_loop */
+- for (i = 0; i < nr; i++)
++ for (i = 0; i < max_loop; i++)
+ loop_add(i);
+
+ printk(KERN_INFO "loop: module loaded\n");
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
+index a657e9a3e96a5..f6b4b7a1be4cc 100644
+--- a/drivers/bluetooth/btintel.c
++++ b/drivers/bluetooth/btintel.c
+@@ -2524,7 +2524,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
+ */
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+- return err;
++ break;
+
+ /* Apply the device specific HCI quirks
+ *
+@@ -2566,7 +2566,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
+ default:
+ bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
+ INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
+- return -EINVAL;
++ err = -EINVAL;
++ break;
+ }
+
+ exit_error:
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index f05018988a177..6beafd62d7226 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -802,13 +802,13 @@ static inline void btusb_free_frags(struct btusb_data *data)
+
+ spin_lock_irqsave(&data->rxlock, flags);
+
+- kfree_skb(data->evt_skb);
++ dev_kfree_skb_irq(data->evt_skb);
+ data->evt_skb = NULL;
+
+- kfree_skb(data->acl_skb);
++ dev_kfree_skb_irq(data->acl_skb);
+ data->acl_skb = NULL;
+
+- kfree_skb(data->sco_skb);
++ dev_kfree_skb_irq(data->sco_skb);
+ data->sco_skb = NULL;
+
+ spin_unlock_irqrestore(&data->rxlock, flags);
+diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
+index d7e0b75db8a60..2b6c0e1922cb3 100644
+--- a/drivers/bluetooth/hci_bcm.c
++++ b/drivers/bluetooth/hci_bcm.c
+@@ -53,11 +53,13 @@
+ * struct bcm_device_data - device specific data
+ * @no_early_set_baudrate: Disallow set baudrate before driver setup()
+ * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it
++ * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable
+ * @max_autobaud_speed: max baudrate supported by device in autobaud mode
+ */
+ struct bcm_device_data {
+ bool no_early_set_baudrate;
+ bool drive_rts_on_open;
++ bool no_uart_clock_set;
+ u32 max_autobaud_speed;
+ };
+
+@@ -100,6 +102,7 @@ struct bcm_device_data {
+ * @is_suspended: whether flow control is currently disabled
+ * @no_early_set_baudrate: don't set_baudrate before setup()
+ * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it
++ * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable
+ * @pcm_int_params: keep the initial PCM configuration
+ * @use_autobaud_mode: start Bluetooth device in autobaud mode
+ * @max_autobaud_speed: max baudrate supported by device in autobaud mode
+@@ -140,6 +143,7 @@ struct bcm_device {
+ #endif
+ bool no_early_set_baudrate;
+ bool drive_rts_on_open;
++ bool no_uart_clock_set;
+ bool use_autobaud_mode;
+ u8 pcm_int_params[5];
+ u32 max_autobaud_speed;
+@@ -172,10 +176,11 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
+ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
+ {
+ struct hci_dev *hdev = hu->hdev;
++ struct bcm_data *bcm = hu->priv;
+ struct sk_buff *skb;
+ struct bcm_update_uart_baud_rate param;
+
+- if (speed > 3000000) {
++ if (speed > 3000000 && !bcm->dev->no_uart_clock_set) {
+ struct bcm_write_uart_clock_setting clock;
+
+ clock.type = BCM_UART_CLOCK_48MHZ;
+@@ -1529,6 +1534,7 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
+ bcmdev->max_autobaud_speed = data->max_autobaud_speed;
+ bcmdev->no_early_set_baudrate = data->no_early_set_baudrate;
+ bcmdev->drive_rts_on_open = data->drive_rts_on_open;
++ bcmdev->no_uart_clock_set = data->no_uart_clock_set;
+ }
+
+ return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
+@@ -1550,6 +1556,10 @@ static struct bcm_device_data bcm43438_device_data = {
+ .drive_rts_on_open = true,
+ };
+
++static struct bcm_device_data cyw4373a0_device_data = {
++ .no_uart_clock_set = true,
++};
++
+ static struct bcm_device_data cyw55572_device_data = {
+ .max_autobaud_speed = 921600,
+ };
+@@ -1566,6 +1576,7 @@ static const struct of_device_id bcm_bluetooth_of_match[] = {
+ { .compatible = "brcm,bcm4349-bt", .data = &bcm43438_device_data },
+ { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
+ { .compatible = "brcm,bcm4335a0" },
++ { .compatible = "cypress,cyw4373a0-bt", .data = &cyw4373a0_device_data },
+ { .compatible = "infineon,cyw55572-bt", .data = &cyw55572_device_data },
+ { },
+ };
+diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
+index cf4a560958173..8055f63603f45 100644
+--- a/drivers/bluetooth/hci_bcsp.c
++++ b/drivers/bluetooth/hci_bcsp.c
+@@ -378,7 +378,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
+ i++;
+
+ __skb_unlink(skb, &bcsp->unack);
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ }
+
+ if (skb_queue_empty(&bcsp->unack))
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index c5a0409ef84fd..6455bc4fb5bb3 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -313,7 +313,7 @@ static void h5_pkt_cull(struct h5 *h5)
+ break;
+
+ __skb_unlink(skb, &h5->unack);
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ }
+
+ if (skb_queue_empty(&h5->unack))
+diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
+index 4eb420a9ed04e..5abc01a2acf72 100644
+--- a/drivers/bluetooth/hci_ll.c
++++ b/drivers/bluetooth/hci_ll.c
+@@ -345,7 +345,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+ default:
+ BT_ERR("illegal hcill state: %ld (losing packet)",
+ ll->hcill_state);
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ break;
+ }
+
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 8df11016fd51b..bae9b2a408d95 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -912,7 +912,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+ default:
+ BT_ERR("Illegal tx state: %d (losing packet)",
+ qca->tx_ibs_state);
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ break;
+ }
+
+diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
+index c22d4184bb612..0555e3838bce1 100644
+--- a/drivers/char/hw_random/amd-rng.c
++++ b/drivers/char/hw_random/amd-rng.c
+@@ -143,15 +143,19 @@ static int __init amd_rng_mod_init(void)
+ found:
+ err = pci_read_config_dword(pdev, 0x58, &pmbase);
+ if (err)
+- return err;
++ goto put_dev;
+
+ pmbase &= 0x0000FF00;
+- if (pmbase == 0)
+- return -EIO;
++ if (pmbase == 0) {
++ err = -EIO;
++ goto put_dev;
++ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+- if (!priv)
+- return -ENOMEM;
++ if (!priv) {
++ err = -ENOMEM;
++ goto put_dev;
++ }
+
+ if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
+ dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
+@@ -185,6 +189,8 @@ err_iomap:
+ release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ out:
+ kfree(priv);
++put_dev:
++ pci_dev_put(pdev);
+ return err;
+ }
+
+@@ -200,6 +206,8 @@ static void __exit amd_rng_mod_exit(void)
+
+ release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+
++ pci_dev_put(priv->pcidev);
++
+ kfree(priv);
+ }
+
+diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
+index 138ce434f86b2..12fbe80918319 100644
+--- a/drivers/char/hw_random/geode-rng.c
++++ b/drivers/char/hw_random/geode-rng.c
+@@ -51,6 +51,10 @@ static const struct pci_device_id pci_tbl[] = {
+ };
+ MODULE_DEVICE_TABLE(pci, pci_tbl);
+
++struct amd_geode_priv {
++ struct pci_dev *pcidev;
++ void __iomem *membase;
++};
+
+ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ {
+@@ -90,6 +94,7 @@ static int __init geode_rng_init(void)
+ const struct pci_device_id *ent;
+ void __iomem *mem;
+ unsigned long rng_base;
++ struct amd_geode_priv *priv;
+
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pci_tbl, pdev);
+@@ -97,17 +102,26 @@ static int __init geode_rng_init(void)
+ goto found;
+ }
+ /* Device not found. */
+- goto out;
++ return err;
+
+ found:
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv) {
++ err = -ENOMEM;
++ goto put_dev;
++ }
++
+ rng_base = pci_resource_start(pdev, 0);
+ if (rng_base == 0)
+- goto out;
++ goto free_priv;
+ err = -ENOMEM;
+ mem = ioremap(rng_base, 0x58);
+ if (!mem)
+- goto out;
+- geode_rng.priv = (unsigned long)mem;
++ goto free_priv;
++
++ geode_rng.priv = (unsigned long)priv;
++ priv->membase = mem;
++ priv->pcidev = pdev;
+
+ pr_info("AMD Geode RNG detected\n");
+ err = hwrng_register(&geode_rng);
+@@ -116,20 +130,26 @@ found:
+ err);
+ goto err_unmap;
+ }
+-out:
+ return err;
+
+ err_unmap:
+ iounmap(mem);
+- goto out;
++free_priv:
++ kfree(priv);
++put_dev:
++ pci_dev_put(pdev);
++ return err;
+ }
+
+ static void __exit geode_rng_exit(void)
+ {
+- void __iomem *mem = (void __iomem *)geode_rng.priv;
++ struct amd_geode_priv *priv;
+
++ priv = (struct amd_geode_priv *)geode_rng.priv;
+ hwrng_unregister(&geode_rng);
+- iounmap(mem);
++ iounmap(priv->membase);
++ pci_dev_put(priv->pcidev);
++ kfree(priv);
+ }
+
+ module_init(geode_rng_init);
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 49a1707693c9f..d5ee52be176d3 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -3704,12 +3704,16 @@ static void deliver_smi_err_response(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg,
+ unsigned char err)
+ {
++ int rv;
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = err;
+ msg->rsp_size = 3;
+- /* It's an error, so it will never requeue, no need to check return. */
+- handle_one_recv_msg(intf, msg);
++
++ /* This will never requeue, but it may ask us to free the message. */
++ rv = handle_one_recv_msg(intf, msg);
++ if (rv == 0)
++ ipmi_free_smi_msg(msg);
+ }
+
+ static void cleanup_smi_msgs(struct ipmi_smi *intf)
+diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
+index 19c32bf50e0e9..2dea8cd5a09ac 100644
+--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
++++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
+@@ -406,13 +406,31 @@ static void aspeed_kcs_check_obe(struct timer_list *timer)
+ static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
+ {
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
++ int rc;
++ u8 str;
+
+ /* We don't have an OBE IRQ, emulate it */
+ if (mask & KCS_BMC_EVENT_TYPE_OBE) {
+- if (KCS_BMC_EVENT_TYPE_OBE & state)
+- mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
+- else
++ if (KCS_BMC_EVENT_TYPE_OBE & state) {
++ /*
++ * Given we don't have an OBE IRQ, delay by polling briefly to see if we can
++ * observe such an event before returning to the caller. This is not
++ * incorrect because OBF may have already become clear before enabling the
++ * IRQ if we had one, under which circumstance no event will be propagated
++ * anyway.
++ *
++ * The onus is on the client to perform a race-free check that it hasn't
++ * missed the event.
++ */
++ rc = read_poll_timeout_atomic(aspeed_kcs_inb, str,
++ !(str & KCS_BMC_STR_OBF), 1, 100, false,
++ &priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
++ /* Time for the slow path? */
++ if (rc == -ETIMEDOUT)
++ mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
++ } else {
+ del_timer(&priv->obe.timer);
++ }
+ }
+
+ if (mask & KCS_BMC_EVENT_TYPE_IBF) {
+diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
+index 1b18ce5ebab1e..0913d3eb8d518 100644
+--- a/drivers/char/tpm/eventlog/acpi.c
++++ b/drivers/char/tpm/eventlog/acpi.c
+@@ -90,16 +90,21 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ return -ENODEV;
+
+ if (tbl->header.length <
+- sizeof(*tbl) + sizeof(struct acpi_tpm2_phy))
++ sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) {
++ acpi_put_table((struct acpi_table_header *)tbl);
+ return -ENODEV;
++ }
+
+ tpm2_phy = (void *)tbl + sizeof(*tbl);
+ len = tpm2_phy->log_area_minimum_length;
+
+ start = tpm2_phy->log_area_start_address;
+- if (!start || !len)
++ if (!start || !len) {
++ acpi_put_table((struct acpi_table_header *)tbl);
+ return -ENODEV;
++ }
+
++ acpi_put_table((struct acpi_table_header *)tbl);
+ format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ } else {
+ /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
+@@ -120,8 +125,10 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ break;
+ }
+
++ acpi_put_table((struct acpi_table_header *)buff);
+ format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ }
++
+ if (!len) {
+ dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
+ return -EIO;
+@@ -156,5 +163,4 @@ err:
+ kfree(log->bios_event_log);
+ log->bios_event_log = NULL;
+ return ret;
+-
+ }
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 18606651d1aa4..16fc481d60950 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -252,7 +252,7 @@ static int __crb_relinquish_locality(struct device *dev,
+ iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
+ if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
+ TPM2_TIMEOUT_C)) {
+- dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
++ dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
+ return -ETIME;
+ }
+
+@@ -676,12 +676,16 @@ static int crb_acpi_add(struct acpi_device *device)
+
+ /* Should the FIFO driver handle this? */
+ sm = buf->start_method;
+- if (sm == ACPI_TPM2_MEMORY_MAPPED)
+- return -ENODEV;
++ if (sm == ACPI_TPM2_MEMORY_MAPPED) {
++ rc = -ENODEV;
++ goto out;
++ }
+
+ priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
+- if (!priv)
+- return -ENOMEM;
++ if (!priv) {
++ rc = -ENOMEM;
++ goto out;
++ }
+
+ if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
+@@ -689,7 +693,8 @@ static int crb_acpi_add(struct acpi_device *device)
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
+- return -EINVAL;
++ rc = -EINVAL;
++ goto out;
+ }
+ crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
+ priv->smc_func_id = crb_smc->smc_func_id;
+@@ -700,17 +705,23 @@ static int crb_acpi_add(struct acpi_device *device)
+
+ rc = crb_map_io(device, priv, buf);
+ if (rc)
+- return rc;
++ goto out;
+
+ chip = tpmm_chip_alloc(dev, &tpm_crb);
+- if (IS_ERR(chip))
+- return PTR_ERR(chip);
++ if (IS_ERR(chip)) {
++ rc = PTR_ERR(chip);
++ goto out;
++ }
+
+ dev_set_drvdata(&chip->dev, priv);
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+- return tpm_chip_register(chip);
++ rc = tpm_chip_register(chip);
++
++out:
++ acpi_put_table((struct acpi_table_header *)buf);
++ return rc;
+ }
+
+ static int crb_acpi_remove(struct acpi_device *device)
+diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
+index 5c233423c56fa..deff23bb54bf1 100644
+--- a/drivers/char/tpm/tpm_ftpm_tee.c
++++ b/drivers/char/tpm/tpm_ftpm_tee.c
+@@ -397,7 +397,13 @@ static int __init ftpm_mod_init(void)
+ if (rc)
+ return rc;
+
+- return driver_register(&ftpm_tee_driver.driver);
++ rc = driver_register(&ftpm_tee_driver.driver);
++ if (rc) {
++ platform_driver_unregister(&ftpm_tee_plat_driver);
++ return rc;
++ }
++
++ return 0;
+ }
+
+ static void __exit ftpm_mod_exit(void)
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index bcff6429e0b4f..ed5dabd3c72d6 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -125,6 +125,7 @@ static int check_acpi_tpm2(struct device *dev)
+ const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
+ struct acpi_table_tpm2 *tbl;
+ acpi_status st;
++ int ret = 0;
+
+ if (!aid || aid->driver_data != DEVICE_IS_TPM2)
+ return 0;
+@@ -132,8 +133,7 @@ static int check_acpi_tpm2(struct device *dev)
+ /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
+ * table is mandatory
+ */
+- st =
+- acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
++ st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
+ if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
+ dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+ return -EINVAL;
+@@ -141,9 +141,10 @@ static int check_acpi_tpm2(struct device *dev)
+
+ /* The tpm2_crb driver handles this device */
+ if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
+- return -ENODEV;
++ ret = -ENODEV;
+
+- return 0;
++ acpi_put_table((struct acpi_table_header *)tbl);
++ return ret;
+ }
+ #else
+ static int check_acpi_tpm2(struct device *dev)
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 757623bacfd50..3f98e587b3e84 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -682,15 +682,19 @@ static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
+ {
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+- switch (priv->manufacturer_id) {
+- case TPM_VID_WINBOND:
+- return ((status == TPM_STS_VALID) ||
+- (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
+- case TPM_VID_STM:
+- return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
+- default:
+- return (status == TPM_STS_COMMAND_READY);
++ if (!test_bit(TPM_TIS_DEFAULT_CANCELLATION, &priv->flags)) {
++ switch (priv->manufacturer_id) {
++ case TPM_VID_WINBOND:
++ return ((status == TPM_STS_VALID) ||
++ (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
++ case TPM_VID_STM:
++ return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
++ default:
++ break;
++ }
+ }
++
++ return status == TPM_STS_COMMAND_READY;
+ }
+
+ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
+index 66a5a13cd1df2..b68479e0de10f 100644
+--- a/drivers/char/tpm/tpm_tis_core.h
++++ b/drivers/char/tpm/tpm_tis_core.h
+@@ -86,6 +86,7 @@ enum tis_defaults {
+ enum tpm_tis_flags {
+ TPM_TIS_ITPM_WORKAROUND = BIT(0),
+ TPM_TIS_INVALID_STATUS = BIT(1),
++ TPM_TIS_DEFAULT_CANCELLATION = BIT(2),
+ };
+
+ struct tpm_tis_data {
+diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
+index 0692510dfcab9..f3a7251c8e38f 100644
+--- a/drivers/char/tpm/tpm_tis_i2c.c
++++ b/drivers/char/tpm/tpm_tis_i2c.c
+@@ -49,7 +49,7 @@
+
+ /* Masks with bits that must be read zero */
+ #define TPM_ACCESS_READ_ZERO 0x48
+-#define TPM_INT_ENABLE_ZERO 0x7FFFFF6
++#define TPM_INT_ENABLE_ZERO 0x7FFFFF60
+ #define TPM_STS_READ_ZERO 0x23
+ #define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000
+ #define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000
+@@ -329,6 +329,7 @@ static int tpm_tis_i2c_probe(struct i2c_client *dev,
+ if (!phy->io_buf)
+ return -ENOMEM;
+
++ set_bit(TPM_TIS_DEFAULT_CANCELLATION, &phy->priv.flags);
+ phy->i2c_client = dev;
+
+ /* must precede all communication with the tpm */
+diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
+index d37c45b676abe..2afea905f7f3c 100644
+--- a/drivers/clk/imx/clk-imx8mn.c
++++ b/drivers/clk/imx/clk-imx8mn.c
+@@ -27,10 +27,10 @@ static u32 share_count_nand;
+ static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
+ static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+ static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+-static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
++static const char * const video_pll_bypass_sels[] = {"video_pll", "video_pll_ref_sel", };
+ static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
+ static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+-static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
++static const char * const m7_alt_pll_bypass_sels[] = {"m7_alt_pll", "m7_alt_pll_ref_sel", };
+ static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+ static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
+
+@@ -40,24 +40,24 @@ static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl
+
+ static const char * const imx8mn_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+
+-static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "vpu_pll_out",
+- "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
++static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "m7_alt_pll_out",
++ "sys_pll1_800m", "audio_pll1_out", "video_pll_out", "sys_pll3_out", };
+
+ static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+- "video_pll1_out", "audio_pll2_out", };
++ "video_pll_out", "audio_pll2_out", };
+
+ static const char * const imx8mn_gpu_shader_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+- "video_pll1_out", "audio_pll2_out", };
++ "video_pll_out", "audio_pll2_out", };
+
+ static const char * const imx8mn_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m",
+ "sys_pll2_250m", "sys_pll2_1000m", "audio_pll1_out",
+- "video_pll1_out", "sys_pll1_100m",};
++ "video_pll_out", "sys_pll1_100m",};
+
+ static const char * const imx8mn_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+ "sys_pll2_250m", "sys_pll2_200m", "audio_pll1_out",
+- "video_pll1_out", "sys_pll3_out", };
++ "video_pll_out", "sys_pll3_out", };
+
+ static const char * const imx8mn_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+ "sys_pll2_200m", "sys_pll1_133m", "sys_pll3_out",
+@@ -77,23 +77,23 @@ static const char * const imx8mn_usb_bus_sels[] = {"osc_24m", "sys_pll2_500m", "
+
+ static const char * const imx8mn_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+- "video_pll1_out", "audio_pll2_out", };
++ "video_pll_out", "audio_pll2_out", };
+
+ static const char * const imx8mn_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+- "video_pll1_out", "audio_pll2_out", };
++ "video_pll_out", "audio_pll2_out", };
+
+ static const char * const imx8mn_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "sys_pll2_500m", "audio_pll1_out",
+- "video_pll1_out", "audio_pll2_out", };
++ "video_pll_out", "audio_pll2_out", };
+
+ static const char * const imx8mn_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m",
+ "sys_pll1_400m", "sys_pll2_125m", "sys_pll3_out",
+- "audio_pll1_out", "video_pll1_out", };
++ "audio_pll1_out", "video_pll_out", };
+
+ static const char * const imx8mn_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll2_166m", "sys_pll3_out",
+- "audio_pll1_out", "video_pll1_out", };
++ "audio_pll1_out", "video_pll_out", };
+
+ static const char * const imx8mn_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m",
+ "sys_pll2_500m", "sys_pll2_1000m", "sys_pll3_out",
+@@ -103,49 +103,49 @@ static const char * const imx8mn_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+-static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out",
++static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll_out", "audio_pll2_out",
+ "audio_pll1_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll3_out", "clk_ext4", };
+
+ static const char * const imx8mn_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+- "clk_ext3", "clk_ext4", };
++ "video_pll_out", "sys_pll1_133m", "dummy",
++ "clk_ext2", "clk_ext3", };
+
+ static const char * const imx8mn_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll_out", "sys_pll1_133m", "dummy",
+ "clk_ext3", "clk_ext4", };
+
+ static const char * const imx8mn_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll_out", "sys_pll1_133m", "dummy",
+ "clk_ext2", "clk_ext3", };
+
+ static const char * const imx8mn_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll_out", "sys_pll1_133m", "dummy",
+ "clk_ext3", "clk_ext4", };
+
+ static const char * const imx8mn_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll_out", "sys_pll1_133m", "dummy",
+ "clk_ext3", "clk_ext4", };
+
+ static const char * const imx8mn_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll_out", "sys_pll1_133m", "dummy",
+ "clk_ext2", "clk_ext3", };
+
+ static const char * const imx8mn_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
+ "sys_pll2_100m", "sys_pll1_160m", "audio_pll1_out",
+- "video_pll1_out", "clk_ext4", };
++ "video_pll_out", "clk_ext4", };
+
+ static const char * const imx8mn_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+ "clk_ext1", "clk_ext2", "clk_ext3",
+- "clk_ext4", "video_pll1_out", };
++ "clk_ext4", "video_pll_out", };
+
+ static const char * const imx8mn_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m",
+- "sys_pll2_200m", "sys_pll2_500m", "video_pll1_out",
+- "audio_pll2_out", };
++ "sys_pll2_200m", "sys_pll2_500m", "audio_pll1_out",
++ "video_pll_out", "audio_pll2_out", };
+
+ static const char * const imx8mn_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out",
+ "sys_pll1_400m", "audio_pll2_out", "sys_pll3_out",
+- "sys_pll2_250m", "video_pll1_out", };
++ "sys_pll2_250m", "video_pll_out", };
+
+ static const char * const imx8mn_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll2_333m",
+ "sys_pll2_500m", "audio_pll2_out", "sys_pll1_266m",
+@@ -160,19 +160,19 @@ static const char * const imx8mn_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "s
+ "audio_pll2_out", "sys_pll1_100m", };
+
+ static const char * const imx8mn_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+- "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
++ "sys_pll3_out", "audio_pll1_out", "video_pll_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+ static const char * const imx8mn_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+- "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
++ "sys_pll3_out", "audio_pll1_out", "video_pll_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+ static const char * const imx8mn_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+- "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
++ "sys_pll3_out", "audio_pll1_out", "video_pll_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+ static const char * const imx8mn_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+- "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
++ "sys_pll3_out", "audio_pll1_out", "video_pll_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+ static const char * const imx8mn_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+@@ -213,63 +213,63 @@ static const char * const imx8mn_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "s
+
+ static const char * const imx8mn_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+- "sys_pll1_80m", "video_pll1_out", };
++ "sys_pll1_80m", "video_pll_out", };
+
+ static const char * const imx8mn_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+- "sys_pll1_80m", "video_pll1_out", };
++ "sys_pll1_80m", "video_pll_out", };
+
+ static const char * const imx8mn_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+- "sys_pll1_80m", "video_pll1_out", };
++ "sys_pll1_80m", "video_pll_out", };
+
+ static const char * const imx8mn_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+- "sys_pll1_80m", "video_pll1_out", };
++ "sys_pll1_80m", "video_pll_out", };
+
+ static const char * const imx8mn_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+- "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++ "sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+ static const char * const imx8mn_gpt2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+- "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++ "sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+ static const char * const imx8mn_gpt3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+- "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++ "sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+ static const char * const imx8mn_gpt4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+- "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++ "sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+ static const char * const imx8mn_gpt5_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+- "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++ "sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+ static const char * const imx8mn_gpt6_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+- "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++ "sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+ static const char * const imx8mn_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m",
+- "vpu_pll_out", "sys_pll2_125m", "sys_pll3_out",
++ "m7_alt_pll_out", "sys_pll2_125m", "sys_pll3_out",
+ "sys_pll1_80m", "sys_pll2_166m", };
+
+-static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out",
++static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "m7_alt_pll_out",
+ "sys_pll3_out", "sys_pll2_200m", "sys_pll1_266m",
+ "sys_pll2_500m", "sys_pll1_100m", };
+
+ static const char * const imx8mn_dsi_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+- "audio_pll2_out", "video_pll1_out", };
++ "audio_pll2_out", "video_pll_out", };
+
+ static const char * const imx8mn_dsi_phy_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2",
+- "audio_pll2_out", "video_pll1_out", };
++ "audio_pll2_out", "video_pll_out", };
+
+ static const char * const imx8mn_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+- "audio_pll2_out", "video_pll1_out", };
++ "audio_pll2_out", "video_pll_out", };
+
+ static const char * const imx8mn_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+ "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+@@ -277,15 +277,15 @@ static const char * const imx8mn_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "s
+
+ static const char * const imx8mn_camera_pixel_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+- "audio_pll2_out", "video_pll1_out", };
++ "audio_pll2_out", "video_pll_out", };
+
+ static const char * const imx8mn_csi1_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2",
+- "audio_pll2_out", "video_pll1_out", };
++ "audio_pll2_out", "video_pll_out", };
+
+ static const char * const imx8mn_csi2_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2",
+- "audio_pll2_out", "video_pll1_out", };
++ "audio_pll2_out", "video_pll_out", };
+
+ static const char * const imx8mn_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+@@ -306,9 +306,9 @@ static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "du
+ "dummy", "sys_pll1_80m", };
+ static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m",
+ "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out",
+- "video_pll1_out", "osc_32k", };
++ "video_pll_out", "osc_32k", };
+
+-static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
++static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll_out",
+ "dummy", "dummy", "gpu_pll_out", "dummy",
+ "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
+ "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+@@ -349,19 +349,19 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+
+ hws[IMX8MN_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+- hws[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
++ hws[IMX8MN_VIDEO_PLL_REF_SEL] = imx_clk_hw_mux("video_pll_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+- hws[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
++ hws[IMX8MN_M7_ALT_PLL_REF_SEL] = imx_clk_hw_mux("m7_alt_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+ hws[IMX8MN_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+ hws[IMX8MN_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+- hws[IMX8MN_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
++ hws[IMX8MN_VIDEO_PLL] = imx_clk_hw_pll14xx("video_pll", "video_pll_ref_sel", base + 0x28, &imx_1443x_pll);
+ hws[IMX8MN_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll);
+ hws[IMX8MN_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+- hws[IMX8MN_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
++ hws[IMX8MN_M7_ALT_PLL] = imx_clk_hw_pll14xx("m7_alt_pll", "m7_alt_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+ hws[IMX8MN_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+ hws[IMX8MN_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000);
+ hws[IMX8MN_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000);
+@@ -370,20 +370,20 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ /* PLL bypass out */
+ hws[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+- hws[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
++ hws[IMX8MN_VIDEO_PLL_BYPASS] = imx_clk_hw_mux_flags("video_pll_bypass", base + 0x28, 16, 1, video_pll_bypass_sels, ARRAY_SIZE(video_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+- hws[IMX8MN_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
++ hws[IMX8MN_M7_ALT_PLL_BYPASS] = imx_clk_hw_mux_flags("m7_alt_pll_bypass", base + 0x74, 28, 1, m7_alt_pll_bypass_sels, ARRAY_SIZE(m7_alt_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+
+ /* PLL out gate */
+ hws[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+ hws[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+- hws[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
++ hws[IMX8MN_VIDEO_PLL_OUT] = imx_clk_hw_gate("video_pll_out", "video_pll_bypass", base + 0x28, 13);
+ hws[IMX8MN_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+ hws[IMX8MN_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
+- hws[IMX8MN_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
++ hws[IMX8MN_M7_ALT_PLL_OUT] = imx_clk_hw_gate("m7_alt_pll_out", "m7_alt_pll_bypass", base + 0x74, 11);
+ hws[IMX8MN_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
+ hws[IMX8MN_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
+
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 652ae58c2735f..5d68d975b4eb1 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -17,6 +17,7 @@
+
+ static u32 share_count_nand;
+ static u32 share_count_media;
++static u32 share_count_usb;
+
+ static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
+ static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+@@ -673,7 +674,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
+ hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0);
+ hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0);
+- hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0);
++ hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate2_shared2("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0, &share_count_usb);
++ hws[IMX8MP_CLK_USB_SUSP] = imx_clk_hw_gate2_shared2("usb_suspend_clk", "osc_32k", ccm_base + 0x44d0, 0, &share_count_usb);
+ hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0);
+ hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0);
+ hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0);
+diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
+index 99cff1fd108b5..02d6a9894521d 100644
+--- a/drivers/clk/imx/clk-imx93.c
++++ b/drivers/clk/imx/clk-imx93.c
+@@ -170,7 +170,7 @@ static const struct imx93_clk_ccgr {
+ { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub },
+ { IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
+ { IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
+- { IMX93_CLK_FLEXSPI1_GATE, "flexspi", "flexspi_root", 0x8640, },
++ { IMX93_CLK_FLEXSPI1_GATE, "flexspi1", "flexspi1_root", 0x8640, },
+ { IMX93_CLK_GPIO1_GATE, "gpio1", "m33_root", 0x8880, },
+ { IMX93_CLK_GPIO2_GATE, "gpio2", "bus_wakeup_root", 0x88c0, },
+ { IMX93_CLK_GPIO3_GATE, "gpio3", "bus_wakeup_root", 0x8900, },
+@@ -240,7 +240,7 @@ static const struct imx93_clk_ccgr {
+ { IMX93_CLK_AUD_XCVR_GATE, "aud_xcvr", "audio_xcvr_root", 0x9b80, },
+ { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, },
+ { IMX93_CLK_HSIO_32K_GATE, "hsio_32k", "osc_32k", 0x9dc0, },
+- { IMX93_CLK_ENET1_GATE, "enet1", "enet_root", 0x9e00, },
++ { IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, },
+ { IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, },
+ { IMX93_CLK_SYS_CNT_GATE, "sys_cnt", "osc_24m", 0x9e80, },
+ { IMX93_CLK_TSTMR1_GATE, "tstmr1", "bus_aon_root", 0x9ec0, },
+@@ -258,7 +258,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ struct device_node *np = dev->of_node;
+ const struct imx93_clk_root *root;
+ const struct imx93_clk_ccgr *ccgr;
+- void __iomem *base = NULL;
++ void __iomem *base, *anatop_base;
+ int i, ret;
+
+ clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+@@ -285,20 +285,22 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ "sys_pll_pfd2", 1, 2);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx93-anatop");
+- base = of_iomap(np, 0);
++ anatop_base = of_iomap(np, 0);
+ of_node_put(np);
+- if (WARN_ON(!base))
++ if (WARN_ON(!anatop_base))
+ return -ENOMEM;
+
+- clks[IMX93_CLK_AUDIO_PLL] = imx_clk_fracn_gppll("audio_pll", "osc_24m", base + 0x1200,
++ clks[IMX93_CLK_AUDIO_PLL] = imx_clk_fracn_gppll("audio_pll", "osc_24m", anatop_base + 0x1200,
+ &imx_fracn_gppll);
+- clks[IMX93_CLK_VIDEO_PLL] = imx_clk_fracn_gppll("video_pll", "osc_24m", base + 0x1400,
++ clks[IMX93_CLK_VIDEO_PLL] = imx_clk_fracn_gppll("video_pll", "osc_24m", anatop_base + 0x1400,
+ &imx_fracn_gppll);
+
+ np = dev->of_node;
+ base = devm_platform_ioremap_resource(pdev, 0);
+- if (WARN_ON(IS_ERR(base)))
++ if (WARN_ON(IS_ERR(base))) {
++ iounmap(anatop_base);
+ return PTR_ERR(base);
++ }
+
+ for (i = 0; i < ARRAY_SIZE(root_array); i++) {
+ root = &root_array[i];
+@@ -327,6 +329,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+
+ unregister_hws:
+ imx_unregister_hw_clocks(clks, IMX93_CLK_END);
++ iounmap(anatop_base);
+
+ return ret;
+ }
+diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c
+index 9539d35588ee9..26108e9f7e67a 100644
+--- a/drivers/clk/imx/clk-imxrt1050.c
++++ b/drivers/clk/imx/clk-imxrt1050.c
+@@ -140,7 +140,7 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev)
+ hws[IMXRT1050_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", ccm_base + 0x80, 2);
+ hws[IMXRT1050_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", ccm_base + 0x80, 4);
+ hws[IMXRT1050_CLK_LPUART1] = imx_clk_hw_gate2("lpuart1", "lpuart_podf", ccm_base + 0x7c, 24);
+- hws[IMXRT1050_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif", "lcdif_podf", ccm_base + 0x74, 10);
++ hws[IMXRT1050_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif", "lcdif_podf", ccm_base + 0x70, 28);
+ hws[IMXRT1050_CLK_DMA] = imx_clk_hw_gate("dma", "ipg", ccm_base + 0x7C, 6);
+ hws[IMXRT1050_CLK_DMA_MUX] = imx_clk_hw_gate("dmamux0", "ipg", ccm_base + 0x7C, 7);
+ imx_check_clk_hws(hws, IMXRT1050_CLK_END);
+diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
+index d90727a53283c..49666047bf0ed 100644
+--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
++++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
+@@ -153,7 +153,7 @@ static const struct mtk_gate infra_clks[] = {
+ 18),
+ GATE_INFRA1(CLK_INFRA_MSDC_66M_CK, "infra_msdc_66m", "infra_sysaxi_d2",
+ 19),
+- GATE_INFRA1(CLK_INFRA_ADC_26M_CK, "infra_adc_26m", "csw_f26m_sel", 20),
++ GATE_INFRA1(CLK_INFRA_ADC_26M_CK, "infra_adc_26m", "infra_adc_frc", 20),
+ GATE_INFRA1(CLK_INFRA_ADC_FRC_CK, "infra_adc_frc", "csw_f26m_sel", 21),
+ GATE_INFRA1(CLK_INFRA_FBIST2FPC_CK, "infra_fbist2fpc", "nfi1x_sel", 23),
+ /* INFRA2 */
+diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
+index 7be028dced63d..32aae880a14f3 100644
+--- a/drivers/clk/microchip/clk-mpfs-ccc.c
++++ b/drivers/clk/microchip/clk-mpfs-ccc.c
+@@ -166,6 +166,9 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
+ struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
+ char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
+
++ if (!name)
++ return -ENOMEM;
++
+ snprintf(name, 23, "%s_out%u", parent->name, i);
+ out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
+ out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
+@@ -200,6 +203,9 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
+ struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
+ char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
+
++ if (!name)
++ return -ENOMEM;
++
+ pll_hw->base = data->pll_base[i];
+ snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
+ pll_hw->name = (const char *)name;
+diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
+index 45da736bd5f4c..293a9dfa7151a 100644
+--- a/drivers/clk/qcom/clk-krait.c
++++ b/drivers/clk/qcom/clk-krait.c
+@@ -114,6 +114,8 @@ static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate,
+
+ if (d->lpl)
+ mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift;
++ else
++ mask <<= d->shift;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ val = krait_get_l2_indirect_reg(d->offset);
+diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
+index 0c3c2e26ede90..ea6f54ed846ec 100644
+--- a/drivers/clk/qcom/dispcc-sm6350.c
++++ b/drivers/clk/qcom/dispcc-sm6350.c
+@@ -306,7 +306,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+- .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
++ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_pixel_ops,
+ },
+ };
+@@ -385,7 +385,7 @@ static struct clk_branch disp_cc_mdss_byte0_clk = {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
++ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
+index 718de17a1e600..6447f3e81b555 100644
+--- a/drivers/clk/qcom/gcc-ipq806x.c
++++ b/drivers/clk/qcom/gcc-ipq806x.c
+@@ -79,7 +79,9 @@ static struct clk_regmap pll4_vote = {
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll4_vote",
+- .parent_names = (const char *[]){ "pll4" },
++ .parent_data = &(const struct clk_parent_data){
++ .fw_name = "pll4", .name = "pll4",
++ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
+index 9755ef4888c19..a0ba37656b07b 100644
+--- a/drivers/clk/qcom/gcc-sm8250.c
++++ b/drivers/clk/qcom/gcc-sm8250.c
+@@ -3267,7 +3267,7 @@ static struct gdsc usb30_prim_gdsc = {
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+- .pwrsts = PWRSTS_OFF_ON,
++ .pwrsts = PWRSTS_RET_ON,
+ };
+
+ static struct gdsc usb30_sec_gdsc = {
+@@ -3275,7 +3275,7 @@ static struct gdsc usb30_sec_gdsc = {
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+- .pwrsts = PWRSTS_OFF_ON,
++ .pwrsts = PWRSTS_RET_ON,
+ };
+
+ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+index 063e0365f3119..1339f9211a149 100644
+--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
++++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+@@ -722,33 +722,17 @@ static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = {
+ };
+ MODULE_DEVICE_TABLE(of, lpass_audio_cc_sc7280_match_table);
+
+-static void lpassaudio_pm_runtime_disable(void *data)
+-{
+- pm_runtime_disable(data);
+-}
+-
+-static void lpassaudio_pm_clk_destroy(void *data)
+-{
+- pm_clk_destroy(data);
+-}
+-
+-static int lpassaudio_create_pm_clks(struct platform_device *pdev)
++static int lpass_audio_setup_runtime_pm(struct platform_device *pdev)
+ {
+ int ret;
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+- pm_runtime_enable(&pdev->dev);
+-
+- ret = devm_add_action_or_reset(&pdev->dev, lpassaudio_pm_runtime_disable, &pdev->dev);
+- if (ret)
+- return ret;
+-
+- ret = pm_clk_create(&pdev->dev);
++ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+- ret = devm_add_action_or_reset(&pdev->dev, lpassaudio_pm_clk_destroy, &pdev->dev);
++ ret = devm_pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
+
+@@ -756,7 +740,7 @@ static int lpassaudio_create_pm_clks(struct platform_device *pdev)
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+
+- return ret;
++ return pm_runtime_resume_and_get(&pdev->dev);
+ }
+
+ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+@@ -765,7 +749,7 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+ struct regmap *regmap;
+ int ret;
+
+- ret = lpassaudio_create_pm_clks(pdev);
++ ret = lpass_audio_setup_runtime_pm(pdev);
+ if (ret)
+ return ret;
+
+@@ -775,8 +759,8 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+
+ regmap = qcom_cc_map(pdev, desc);
+ if (IS_ERR(regmap)) {
+- pm_runtime_disable(&pdev->dev);
+- return PTR_ERR(regmap);
++ ret = PTR_ERR(regmap);
++ goto exit;
+ }
+
+ clk_zonda_pll_configure(&lpass_audio_cc_pll, regmap, &lpass_audio_cc_pll_config);
+@@ -788,20 +772,18 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+ ret = qcom_cc_really_probe(pdev, &lpass_audio_cc_sc7280_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n");
+- pm_runtime_disable(&pdev->dev);
+- return ret;
++ goto exit;
+ }
+
+ ret = qcom_cc_probe_by_index(pdev, 1, &lpass_audio_cc_reset_sc7280_desc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC Resets\n");
+- pm_runtime_disable(&pdev->dev);
+- return ret;
++ goto exit;
+ }
+
+ pm_runtime_mark_last_busy(&pdev->dev);
++exit:
+ pm_runtime_put_autosuspend(&pdev->dev);
+- pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+ }
+@@ -839,14 +821,15 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
+ struct regmap *regmap;
+ int ret;
+
+- ret = lpassaudio_create_pm_clks(pdev);
++ ret = lpass_audio_setup_runtime_pm(pdev);
+ if (ret)
+ return ret;
+
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
+ lpass_audio_cc_sc7280_regmap_config.name = "cc";
+ desc = &lpass_cc_sc7280_desc;
+- return qcom_cc_probe(pdev, desc);
++ ret = qcom_cc_probe(pdev, desc);
++ goto exit;
+ }
+
+ lpass_audio_cc_sc7280_regmap_config.name = "lpasscc_aon";
+@@ -854,18 +837,22 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
+ desc = &lpass_aon_cc_sc7280_desc;
+
+ regmap = qcom_cc_map(pdev, desc);
+- if (IS_ERR(regmap))
+- return PTR_ERR(regmap);
++ if (IS_ERR(regmap)) {
++ ret = PTR_ERR(regmap);
++ goto exit;
++ }
+
+ clk_lucid_pll_configure(&lpass_aon_cc_pll, regmap, &lpass_aon_cc_pll_config);
+
+ ret = qcom_cc_really_probe(pdev, &lpass_aon_cc_sc7280_desc, regmap);
+- if (ret)
++ if (ret) {
+ dev_err(&pdev->dev, "Failed to register LPASS AON CC clocks\n");
++ goto exit;
++ }
+
+ pm_runtime_mark_last_busy(&pdev->dev);
++exit:
+ pm_runtime_put_autosuspend(&pdev->dev);
+- pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+ }
+diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
+index ac09b7b840aba..a5731994cbed1 100644
+--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
++++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
+@@ -356,7 +356,7 @@ static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = {
+ .num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs),
+ };
+
+-static int lpass_create_pm_clks(struct platform_device *pdev)
++static int lpass_setup_runtime_pm(struct platform_device *pdev)
+ {
+ int ret;
+
+@@ -375,7 +375,7 @@ static int lpass_create_pm_clks(struct platform_device *pdev)
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+
+- return ret;
++ return pm_runtime_resume_and_get(&pdev->dev);
+ }
+
+ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+@@ -384,7 +384,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+ struct regmap *regmap;
+ int ret;
+
+- ret = lpass_create_pm_clks(pdev);
++ ret = lpass_setup_runtime_pm(pdev);
+ if (ret)
+ return ret;
+
+@@ -392,12 +392,14 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+ desc = &lpass_audio_hm_sc7180_desc;
+ ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ if (ret)
+- return ret;
++ goto exit;
+
+ lpass_core_cc_sc7180_regmap_config.name = "lpass_core_cc";
+ regmap = qcom_cc_map(pdev, &lpass_core_cc_sc7180_desc);
+- if (IS_ERR(regmap))
+- return PTR_ERR(regmap);
++ if (IS_ERR(regmap)) {
++ ret = PTR_ERR(regmap);
++ goto exit;
++ }
+
+ /*
+ * Keep the CLK always-ON
+@@ -415,6 +417,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+ ret = qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
++exit:
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return ret;
+@@ -425,14 +428,19 @@ static int lpass_hm_core_probe(struct platform_device *pdev)
+ const struct qcom_cc_desc *desc;
+ int ret;
+
+- ret = lpass_create_pm_clks(pdev);
++ ret = lpass_setup_runtime_pm(pdev);
+ if (ret)
+ return ret;
+
+ lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core";
+ desc = &lpass_core_hm_sc7180_desc;
+
+- return qcom_cc_probe_by_index(pdev, 0, desc);
++ ret = qcom_cc_probe_by_index(pdev, 0, desc);
++
++ pm_runtime_mark_last_busy(&pdev->dev);
++ pm_runtime_put_autosuspend(&pdev->dev);
++
++ return ret;
+ }
+
+ static const struct of_device_id lpass_hm_sc7180_match_table[] = {
+diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+index d74d46833012f..e02542ca24a06 100644
+--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+@@ -116,7 +116,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
+ DEF_FIXED("cp", R8A779A0_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cl16mck", R8A779A0_CLK_CL16MCK, CLK_PLL1_DIV2, 64, 1),
+
+- DEF_GEN4_SDH("sdh0", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870),
++ DEF_GEN4_SDH("sd0h", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870),
+ DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, 0x870),
+
+ DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+index 4baf355e26d88..27b668def357f 100644
+--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+@@ -113,7 +113,7 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
+ DEF_FIXED("sasyncperd2", R8A779F0_CLK_SASYNCPERD2, R8A779F0_CLK_SASYNCPERD1, 2, 1),
+ DEF_FIXED("sasyncperd4", R8A779F0_CLK_SASYNCPERD4, R8A779F0_CLK_SASYNCPERD1, 4, 1),
+
+- DEF_GEN4_SDH("sdh0", R8A779F0_CLK_SD0H, CLK_SDSRC, 0x870),
++ DEF_GEN4_SDH("sd0h", R8A779F0_CLK_SD0H, CLK_SDSRC, 0x870),
+ DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, R8A779F0_CLK_SD0H, 0x870),
+
+ DEF_BASE("rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+@@ -126,10 +126,10 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
+ };
+
+ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+- DEF_MOD("hscif0", 514, R8A779F0_CLK_S0D3),
+- DEF_MOD("hscif1", 515, R8A779F0_CLK_S0D3),
+- DEF_MOD("hscif2", 516, R8A779F0_CLK_S0D3),
+- DEF_MOD("hscif3", 517, R8A779F0_CLK_S0D3),
++ DEF_MOD("hscif0", 514, R8A779F0_CLK_SASYNCPERD1),
++ DEF_MOD("hscif1", 515, R8A779F0_CLK_SASYNCPERD1),
++ DEF_MOD("hscif2", 516, R8A779F0_CLK_SASYNCPERD1),
++ DEF_MOD("hscif3", 517, R8A779F0_CLK_SASYNCPERD1),
+ DEF_MOD("i2c0", 518, R8A779F0_CLK_S0D6_PER),
+ DEF_MOD("i2c1", 519, R8A779F0_CLK_S0D6_PER),
+ DEF_MOD("i2c2", 520, R8A779F0_CLK_S0D6_PER),
+@@ -142,10 +142,10 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+ DEF_MOD("msiof3", 621, R8A779F0_CLK_MSO),
+ DEF_MOD("pcie0", 624, R8A779F0_CLK_S0D2),
+ DEF_MOD("pcie1", 625, R8A779F0_CLK_S0D2),
+- DEF_MOD("scif0", 702, R8A779F0_CLK_S0D12_PER),
+- DEF_MOD("scif1", 703, R8A779F0_CLK_S0D12_PER),
+- DEF_MOD("scif3", 704, R8A779F0_CLK_S0D12_PER),
+- DEF_MOD("scif4", 705, R8A779F0_CLK_S0D12_PER),
++ DEF_MOD("scif0", 702, R8A779F0_CLK_SASYNCPERD4),
++ DEF_MOD("scif1", 703, R8A779F0_CLK_SASYNCPERD4),
++ DEF_MOD("scif3", 704, R8A779F0_CLK_SASYNCPERD4),
++ DEF_MOD("scif4", 705, R8A779F0_CLK_SASYNCPERD4),
+ DEF_MOD("sdhi0", 706, R8A779F0_CLK_SD0),
+ DEF_MOD("sys-dmac0", 709, R8A779F0_CLK_S0D3_PER),
+ DEF_MOD("sys-dmac1", 710, R8A779F0_CLK_S0D3_PER),
+diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
+index 1488c9d6e6394..983faa5707b9c 100644
+--- a/drivers/clk/renesas/r9a06g032-clocks.c
++++ b/drivers/clk/renesas/r9a06g032-clocks.c
+@@ -412,7 +412,7 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd,
+ int error;
+ int index;
+
+- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
++ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i++,
+ &clkspec)) {
+ if (clkspec.np != pd->dev.of_node)
+ continue;
+@@ -425,7 +425,6 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd,
+ if (error)
+ return error;
+ }
+- i++;
+ }
+
+ return 0;
+diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
+index f7827b3b7fc1c..6e5e502be44a6 100644
+--- a/drivers/clk/rockchip/clk-pll.c
++++ b/drivers/clk/rockchip/clk-pll.c
+@@ -981,6 +981,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
+ return mux_clk;
+
+ err_pll:
++ kfree(pll->rate_table);
+ clk_unregister(mux_clk);
+ mux_clk = pll_clk;
+ err_mux:
+diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
+index fe383471c5f0a..0ff28938943f0 100644
+--- a/drivers/clk/samsung/clk-pll.c
++++ b/drivers/clk/samsung/clk-pll.c
+@@ -1583,6 +1583,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
+ if (ret) {
+ pr_err("%s: failed to register pll clock %s : %d\n",
+ __func__, pll_clk->name, ret);
++ kfree(pll->rate_table);
+ kfree(pll);
+ return;
+ }
+diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
+index 53d6e3ec4309f..c94b59b80dd43 100644
+--- a/drivers/clk/socfpga/clk-gate.c
++++ b/drivers/clk/socfpga/clk-gate.c
+@@ -188,8 +188,10 @@ void __init socfpga_gate_init(struct device_node *node)
+ return;
+
+ ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL);
+- if (WARN_ON(!ops))
++ if (WARN_ON(!ops)) {
++ kfree(socfpga_clk);
+ return;
++ }
+
+ rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
+ if (rc)
+@@ -243,6 +245,7 @@ void __init socfpga_gate_init(struct device_node *node)
+
+ err = clk_hw_register(NULL, hw_clk);
+ if (err) {
++ kfree(ops);
+ kfree(socfpga_clk);
+ return;
+ }
+diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
+index d820292a381d0..40df1db102a77 100644
+--- a/drivers/clk/st/clkgen-fsyn.c
++++ b/drivers/clk/st/clkgen-fsyn.c
+@@ -1020,9 +1020,10 @@ static void __init st_of_quadfs_setup(struct device_node *np,
+
+ clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, datac->data,
+ reg, lock);
+- if (IS_ERR(clk))
++ if (IS_ERR(clk)) {
++ kfree(lock);
+ goto err_exit;
+- else
++ } else
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
+index a484cb945d67b..1f3234f226674 100644
+--- a/drivers/clk/visconti/pll.c
++++ b/drivers/clk/visconti/pll.c
+@@ -277,6 +277,7 @@ static struct clk_hw *visconti_register_pll(struct visconti_pll_provider *ctx,
+ ret = clk_hw_register(NULL, &pll->hw);
+ if (ret) {
+ pr_err("failed to register pll clock %s : %d\n", name, ret);
++ kfree(pll->rate_table);
+ kfree(pll);
+ pll_hw_clk = ERR_PTR(ret);
+ }
+diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
+index 64dcb082d4cf6..7b952aa52c0b9 100644
+--- a/drivers/clocksource/sh_cmt.c
++++ b/drivers/clocksource/sh_cmt.c
+@@ -13,6 +13,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/ioport.h>
+ #include <linux/irq.h>
+ #include <linux/module.h>
+@@ -116,6 +117,7 @@ struct sh_cmt_device {
+ void __iomem *mapbase;
+ struct clk *clk;
+ unsigned long rate;
++ unsigned int reg_delay;
+
+ raw_spinlock_t lock; /* Protect the shared start/stop register */
+
+@@ -247,10 +249,17 @@ static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
+
+ static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
+ {
+- if (ch->iostart)
+- ch->cmt->info->write_control(ch->iostart, 0, value);
+- else
+- ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
++ u32 old_value = sh_cmt_read_cmstr(ch);
++
++ if (value != old_value) {
++ if (ch->iostart) {
++ ch->cmt->info->write_control(ch->iostart, 0, value);
++ udelay(ch->cmt->reg_delay);
++ } else {
++ ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
++ udelay(ch->cmt->reg_delay);
++ }
++ }
+ }
+
+ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
+@@ -260,7 +269,12 @@ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
+
+ static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
+ {
+- ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
++ u32 old_value = sh_cmt_read_cmcsr(ch);
++
++ if (value != old_value) {
++ ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
++ udelay(ch->cmt->reg_delay);
++ }
+ }
+
+ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
+@@ -268,14 +282,33 @@ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
+ return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
+ }
+
+-static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
++static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
+ {
++ /* Tests showed that we need to wait 3 clocks here */
++ unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2);
++ u32 reg;
++
++ if (ch->cmt->info->model > SH_CMT_16BIT) {
++ int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg,
++ !(reg & SH_CMT32_CMCSR_WRFLG),
++ 1, cmcnt_delay, false, ch);
++ if (ret < 0)
++ return ret;
++ }
++
+ ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
++ udelay(cmcnt_delay);
++ return 0;
+ }
+
+ static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
+ {
+- ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
++ u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR);
++
++ if (value != old_value) {
++ ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
++ udelay(ch->cmt->reg_delay);
++ }
+ }
+
+ static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
+@@ -319,7 +352,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
+
+ static int sh_cmt_enable(struct sh_cmt_channel *ch)
+ {
+- int k, ret;
++ int ret;
+
+ dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
+
+@@ -347,26 +380,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
+ }
+
+ sh_cmt_write_cmcor(ch, 0xffffffff);
+- sh_cmt_write_cmcnt(ch, 0);
+-
+- /*
+- * According to the sh73a0 user's manual, as CMCNT can be operated
+- * only by the RCLK (Pseudo 32 kHz), there's one restriction on
+- * modifying CMCNT register; two RCLK cycles are necessary before
+- * this register is either read or any modification of the value
+- * it holds is reflected in the LSI's actual operation.
+- *
+- * While at it, we're supposed to clear out the CMCNT as of this
+- * moment, so make sure it's processed properly here. This will
+- * take RCLKx2 at maximum.
+- */
+- for (k = 0; k < 100; k++) {
+- if (!sh_cmt_read_cmcnt(ch))
+- break;
+- udelay(1);
+- }
++ ret = sh_cmt_write_cmcnt(ch, 0);
+
+- if (sh_cmt_read_cmcnt(ch)) {
++ if (ret || sh_cmt_read_cmcnt(ch)) {
+ dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
+ ch->index);
+ ret = -ETIMEDOUT;
+@@ -995,8 +1011,8 @@ MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
+
+ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
+ {
+- unsigned int mask;
+- unsigned int i;
++ unsigned int mask, i;
++ unsigned long rate;
+ int ret;
+
+ cmt->pdev = pdev;
+@@ -1032,10 +1048,16 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
+ if (ret < 0)
+ goto err_clk_unprepare;
+
+- if (cmt->info->width == 16)
+- cmt->rate = clk_get_rate(cmt->clk) / 512;
+- else
+- cmt->rate = clk_get_rate(cmt->clk) / 8;
++ rate = clk_get_rate(cmt->clk);
++ if (!rate) {
++ ret = -EINVAL;
++ goto err_clk_disable;
++ }
++
++ /* We shall wait 2 input clks after register writes */
++ if (cmt->info->model >= SH_CMT_48BIT)
++ cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate);
++ cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8);
+
+ /* Map the memory resource(s). */
+ ret = sh_cmt_map_memory(cmt);
+diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
+index 2737407ff0698..632523c1232f6 100644
+--- a/drivers/clocksource/timer-ti-dm-systimer.c
++++ b/drivers/clocksource/timer-ti-dm-systimer.c
+@@ -345,8 +345,10 @@ static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
+ return error;
+
+ r = clk_get_rate(clock);
+- if (!r)
++ if (!r) {
++ clk_disable_unprepare(clock);
+ return -ENODEV;
++ }
+
+ if (is_ick)
+ t->ick = clock;
+diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
+index cad29ded3a48f..00af1a8e34fbd 100644
+--- a/drivers/clocksource/timer-ti-dm.c
++++ b/drivers/clocksource/timer-ti-dm.c
+@@ -1258,7 +1258,7 @@ static struct platform_driver omap_dm_timer_driver = {
+ .remove = omap_dm_timer_remove,
+ .driver = {
+ .name = "omap_timer",
+- .of_match_table = of_match_ptr(omap_timer_match),
++ .of_match_table = omap_timer_match,
+ .pm = &omap_dm_timer_pm_ops,
+ },
+ };
+diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
+index d6b80b6dfc287..8439755559b21 100644
+--- a/drivers/counter/stm32-lptimer-cnt.c
++++ b/drivers/counter/stm32-lptimer-cnt.c
+@@ -69,7 +69,7 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
+
+ /* ensure CMP & ARR registers are properly written */
+ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+- (val & STM32_LPTIM_CMPOK_ARROK),
++ (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
+ 100, 1000);
+ if (ret)
+ return ret;
+diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
+index 6448e03bcf488..59b19b9975e8c 100644
+--- a/drivers/cpufreq/amd_freq_sensitivity.c
++++ b/drivers/cpufreq/amd_freq_sensitivity.c
+@@ -125,6 +125,8 @@ static int __init amd_freq_sensitivity_init(void)
+ if (!pcidev) {
+ if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+ return -ENODEV;
++ } else {
++ pci_dev_put(pcidev);
+ }
+
+ if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 833589bc95e40..3c623a0bc147f 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -125,7 +125,35 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
+ return 0;
+ }
+
++static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
++{
++ unsigned int lval;
++
++ if (data->soc_data->reg_current_vote)
++ lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
++ else
++ lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
++
++ return lval * xo_rate;
++}
++
++/* Get the current frequency of the CPU (after throttling) */
+ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
++{
++ struct qcom_cpufreq_data *data;
++ struct cpufreq_policy *policy;
++
++ policy = cpufreq_cpu_get_raw(cpu);
++ if (!policy)
++ return 0;
++
++ data = policy->driver_data;
++
++ return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
++}
++
++/* Get the frequency requested by the cpufreq core for the CPU */
++static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+ {
+ struct qcom_cpufreq_data *data;
+ const struct qcom_cpufreq_soc_data *soc_data;
+@@ -193,6 +221,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(cpu_dev, "Invalid opp table in device tree\n");
++ kfree(table);
+ return ret;
+ } else {
+ policy->fast_switch_possible = true;
+@@ -286,18 +315,6 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
+ }
+ }
+
+-static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+-{
+- unsigned int lval;
+-
+- if (data->soc_data->reg_current_vote)
+- lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
+- else
+- lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
+-
+- return lval * xo_rate;
+-}
+-
+ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+ {
+ struct cpufreq_policy *policy = data->policy;
+@@ -341,7 +358,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+ * If h/w throttled frequency is higher than what cpufreq has requested
+ * for, then stop polling and switch back to interrupt mechanism.
+ */
+- if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
++ if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
+ enable_irq(data->throttle_irq);
+ else
+ mod_delayed_work(system_highpri_wq, &data->throttle_work,
+diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
+index 252f2a9686a62..448bc796b0b40 100644
+--- a/drivers/cpuidle/dt_idle_states.c
++++ b/drivers/cpuidle/dt_idle_states.c
+@@ -223,6 +223,6 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
+ * also be 0 on platforms with missing DT idle states or legacy DT
+ * configuration predating the DT idle states bindings.
+ */
+- return i;
++ return state_idx - start_idx;
+ }
+ EXPORT_SYMBOL_GPL(dt_init_idle_driver);
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index 55e75fbb658ee..c30b5a39c2ac2 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -669,7 +669,12 @@ config CRYPTO_DEV_IMGTEC_HASH
+ config CRYPTO_DEV_ROCKCHIP
+ tristate "Rockchip's Cryptographic Engine driver"
+ depends on OF && ARCH_ROCKCHIP
++ depends on PM
++ select CRYPTO_ECB
++ select CRYPTO_CBC
++ select CRYPTO_DES
+ select CRYPTO_AES
++ select CRYPTO_ENGINE
+ select CRYPTO_LIB_DES
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+index 910d6751644cf..902f6be057ec6 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+@@ -124,7 +124,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
+ int i = 0;
+- u32 a;
++ dma_addr_t a;
+ int err;
+
+ rctx->ivlen = ivsize;
+diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
+index 6e7ae896717cd..937187027ad57 100644
+--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
++++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
+@@ -237,7 +237,6 @@ static int meson_crypto_probe(struct platform_device *pdev)
+ return err;
+ }
+
+- mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL);
+ for (i = 0; i < MAXFLOW; i++) {
+ mc->irqs[i] = platform_get_irq(pdev, i);
+ if (mc->irqs[i] < 0)
+diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
+index dc0f142324a3c..8c0746a1d6d43 100644
+--- a/drivers/crypto/amlogic/amlogic-gxl.h
++++ b/drivers/crypto/amlogic/amlogic-gxl.h
+@@ -95,7 +95,7 @@ struct meson_dev {
+ struct device *dev;
+ struct meson_flow *chanlist;
+ atomic_t flow;
+- int *irqs;
++ int irqs[MAXFLOW];
+ #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ struct dentry *dbgfs_dir;
+ #endif
+diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+index 9e7308e39b304..d4e06999af9b7 100644
+--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+@@ -195,6 +195,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev)
+ ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
+ if (!ndev->iov.pf2vf_wq) {
+ kfree(ndev->iov.vfdev);
++ ndev->iov.vfdev = NULL;
+ return -ENOMEM;
+ }
+ /* enable pf2vf mailbox interrupts */
+diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
+index 7083767602fcf..8f008f024f8f1 100644
+--- a/drivers/crypto/ccree/cc_debugfs.c
++++ b/drivers/crypto/ccree/cc_debugfs.c
+@@ -55,7 +55,7 @@ void __init cc_debugfs_global_init(void)
+ cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
+ }
+
+-void __exit cc_debugfs_global_fini(void)
++void cc_debugfs_global_fini(void)
+ {
+ debugfs_remove(cc_debugfs_dir);
+ }
+diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
+index cadead18b59e8..d489c6f808925 100644
+--- a/drivers/crypto/ccree/cc_driver.c
++++ b/drivers/crypto/ccree/cc_driver.c
+@@ -651,9 +651,17 @@ static struct platform_driver ccree_driver = {
+
+ static int __init ccree_init(void)
+ {
++ int rc;
++
+ cc_debugfs_global_init();
+
+- return platform_driver_register(&ccree_driver);
++ rc = platform_driver_register(&ccree_driver);
++ if (rc) {
++ cc_debugfs_global_fini();
++ return rc;
++ }
++
++ return 0;
+ }
+ module_init(ccree_init);
+
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index 471e5ca720f57..baf1faec7046f 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -1437,18 +1437,12 @@ err_with_qm_init:
+ static void hpre_remove(struct pci_dev *pdev)
+ {
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+- int ret;
+
+ hisi_qm_pm_uninit(qm);
+ hisi_qm_wait_task_finish(qm, &hpre_devices);
+ hisi_qm_alg_unregister(qm, &hpre_devices);
+- if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
+- ret = hisi_qm_sriov_disable(pdev, true);
+- if (ret) {
+- pci_err(pdev, "Disable SRIOV fail!\n");
+- return;
+- }
+- }
++ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
++ hisi_qm_sriov_disable(pdev, true);
+
+ hpre_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index 8b387de69d229..07e1e39a5e378 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -250,7 +250,6 @@
+ #define QM_QOS_MIN_CIR_B 100
+ #define QM_QOS_MAX_CIR_U 6
+ #define QM_QOS_MAX_CIR_S 11
+-#define QM_QOS_VAL_MAX_LEN 32
+ #define QM_DFX_BASE 0x0100000
+ #define QM_DFX_STATE1 0x0104000
+ #define QM_DFX_STATE2 0x01040C8
+@@ -359,7 +358,7 @@ static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
+ static const struct hisi_qm_cap_info qm_basic_info[] = {
+ {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
+ {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
+- {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800},
++ {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800},
+ {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
+ {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
+ {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
+@@ -909,8 +908,8 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
+ u32 depth;
+
+ depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
+- *high_bits = depth & QM_XQ_DEPTH_MASK;
+- *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
++ *low_bits = depth & QM_XQ_DEPTH_MASK;
++ *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
+ }
+
+ static u32 qm_get_irq_num(struct hisi_qm *qm)
+@@ -4614,7 +4613,7 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
+ unsigned int *fun_index)
+ {
+ char tbuf_bdf[QM_DBG_READ_LEN] = {0};
+- char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
++ char val_buf[QM_DBG_READ_LEN] = {0};
+ u32 tmp1, device, function;
+ int ret, bus;
+
+@@ -5725,6 +5724,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm)
+ cmd = QM_VF_START_FAIL;
+ }
+
++ qm_cmd_init(qm);
+ ret = qm_ping_pf(qm, cmd);
+ if (ret)
+ dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
+@@ -5786,7 +5786,6 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm,
+ goto err_get_status;
+
+ qm_pf_reset_vf_done(qm);
+- qm_cmd_init(qm);
+
+ dev_info(dev, "device reset done.\n");
+
+diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
+index d8e82d69745d8..9629e98bd68b7 100644
+--- a/drivers/crypto/img-hash.c
++++ b/drivers/crypto/img-hash.c
+@@ -358,12 +358,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
+ static void img_hash_dma_task(unsigned long d)
+ {
+ struct img_hash_dev *hdev = (struct img_hash_dev *)d;
+- struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
++ struct img_hash_request_ctx *ctx;
+ u8 *addr;
+ size_t nbytes, bleft, wsend, len, tbc;
+ struct scatterlist tsg;
+
+- if (!hdev->req || !ctx->sg)
++ if (!hdev->req)
++ return;
++
++ ctx = ahash_request_ctx(hdev->req);
++ if (!ctx->sg)
+ return;
+
+ addr = sg_virt(ctx->sg);
+diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
+index 655a7f5a406a1..cbeda59c6b191 100644
+--- a/drivers/crypto/omap-sham.c
++++ b/drivers/crypto/omap-sham.c
+@@ -2114,7 +2114,7 @@ static int omap_sham_probe(struct platform_device *pdev)
+
+ pm_runtime_enable(dev);
+
+- err = pm_runtime_get_sync(dev);
++ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get sync: %d\n", err);
+ goto err_pm;
+diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
+index 2f212561acc47..670a58b25cb16 100644
+--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
++++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
+@@ -261,6 +261,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
++ ret = -EINVAL;
+ goto out_err;
+ }
+
+diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
+index 35d73061d1569..14a0aef18ab13 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto.c
++++ b/drivers/crypto/rockchip/rk3288_crypto.c
+@@ -65,186 +65,24 @@ static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
+ clk_disable_unprepare(dev->sclk);
+ }
+
+-static int check_alignment(struct scatterlist *sg_src,
+- struct scatterlist *sg_dst,
+- int align_mask)
+-{
+- int in, out, align;
+-
+- in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
+- IS_ALIGNED((uint32_t)sg_src->length, align_mask);
+- if (!sg_dst)
+- return in;
+- out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
+- IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
+- align = in && out;
+-
+- return (align && (sg_src->length == sg_dst->length));
+-}
+-
+-static int rk_load_data(struct rk_crypto_info *dev,
+- struct scatterlist *sg_src,
+- struct scatterlist *sg_dst)
+-{
+- unsigned int count;
+-
+- dev->aligned = dev->aligned ?
+- check_alignment(sg_src, sg_dst, dev->align_size) :
+- dev->aligned;
+- if (dev->aligned) {
+- count = min(dev->left_bytes, sg_src->length);
+- dev->left_bytes -= count;
+-
+- if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
+- dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n",
+- __func__, __LINE__);
+- return -EINVAL;
+- }
+- dev->addr_in = sg_dma_address(sg_src);
+-
+- if (sg_dst) {
+- if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
+- dev_err(dev->dev,
+- "[%s:%d] dma_map_sg(dst) error\n",
+- __func__, __LINE__);
+- dma_unmap_sg(dev->dev, sg_src, 1,
+- DMA_TO_DEVICE);
+- return -EINVAL;
+- }
+- dev->addr_out = sg_dma_address(sg_dst);
+- }
+- } else {
+- count = (dev->left_bytes > PAGE_SIZE) ?
+- PAGE_SIZE : dev->left_bytes;
+-
+- if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
+- dev->addr_vir, count,
+- dev->total - dev->left_bytes)) {
+- dev_err(dev->dev, "[%s:%d] pcopy err\n",
+- __func__, __LINE__);
+- return -EINVAL;
+- }
+- dev->left_bytes -= count;
+- sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
+- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
+- dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n",
+- __func__, __LINE__);
+- return -ENOMEM;
+- }
+- dev->addr_in = sg_dma_address(&dev->sg_tmp);
+-
+- if (sg_dst) {
+- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
+- DMA_FROM_DEVICE)) {
+- dev_err(dev->dev,
+- "[%s:%d] dma_map_sg(sg_tmp) error\n",
+- __func__, __LINE__);
+- dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
+- DMA_TO_DEVICE);
+- return -ENOMEM;
+- }
+- dev->addr_out = sg_dma_address(&dev->sg_tmp);
+- }
+- }
+- dev->count = count;
+- return 0;
+-}
+-
+-static void rk_unload_data(struct rk_crypto_info *dev)
+-{
+- struct scatterlist *sg_in, *sg_out;
+-
+- sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
+- dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
+-
+- if (dev->sg_dst) {
+- sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
+- dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
+- }
+-}
+-
+ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
+ {
+ struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
+ u32 interrupt_status;
+
+- spin_lock(&dev->lock);
+ interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+
++ dev->status = 1;
+ if (interrupt_status & 0x0a) {
+ dev_warn(dev->dev, "DMA Error\n");
+- dev->err = -EFAULT;
++ dev->status = 0;
+ }
+- tasklet_schedule(&dev->done_task);
++ complete(&dev->complete);
+
+- spin_unlock(&dev->lock);
+ return IRQ_HANDLED;
+ }
+
+-static int rk_crypto_enqueue(struct rk_crypto_info *dev,
+- struct crypto_async_request *async_req)
+-{
+- unsigned long flags;
+- int ret;
+-
+- spin_lock_irqsave(&dev->lock, flags);
+- ret = crypto_enqueue_request(&dev->queue, async_req);
+- if (dev->busy) {
+- spin_unlock_irqrestore(&dev->lock, flags);
+- return ret;
+- }
+- dev->busy = true;
+- spin_unlock_irqrestore(&dev->lock, flags);
+- tasklet_schedule(&dev->queue_task);
+-
+- return ret;
+-}
+-
+-static void rk_crypto_queue_task_cb(unsigned long data)
+-{
+- struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+- struct crypto_async_request *async_req, *backlog;
+- unsigned long flags;
+- int err = 0;
+-
+- dev->err = 0;
+- spin_lock_irqsave(&dev->lock, flags);
+- backlog = crypto_get_backlog(&dev->queue);
+- async_req = crypto_dequeue_request(&dev->queue);
+-
+- if (!async_req) {
+- dev->busy = false;
+- spin_unlock_irqrestore(&dev->lock, flags);
+- return;
+- }
+- spin_unlock_irqrestore(&dev->lock, flags);
+-
+- if (backlog) {
+- backlog->complete(backlog, -EINPROGRESS);
+- backlog = NULL;
+- }
+-
+- dev->async_req = async_req;
+- err = dev->start(dev);
+- if (err)
+- dev->complete(dev->async_req, err);
+-}
+-
+-static void rk_crypto_done_task_cb(unsigned long data)
+-{
+- struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+-
+- if (dev->err) {
+- dev->complete(dev->async_req, dev->err);
+- return;
+- }
+-
+- dev->err = dev->update(dev);
+- if (dev->err)
+- dev->complete(dev->async_req, dev->err);
+-}
+-
+ static struct rk_crypto_tmp *rk_cipher_algs[] = {
+ &rk_ecb_aes_alg,
+ &rk_cbc_aes_alg,
+@@ -337,8 +175,6 @@ static int rk_crypto_probe(struct platform_device *pdev)
+ if (err)
+ goto err_crypto;
+
+- spin_lock_init(&crypto_info->lock);
+-
+ crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(crypto_info->reg)) {
+ err = PTR_ERR(crypto_info->reg);
+@@ -389,18 +225,11 @@ static int rk_crypto_probe(struct platform_device *pdev)
+ crypto_info->dev = &pdev->dev;
+ platform_set_drvdata(pdev, crypto_info);
+
+- tasklet_init(&crypto_info->queue_task,
+- rk_crypto_queue_task_cb, (unsigned long)crypto_info);
+- tasklet_init(&crypto_info->done_task,
+- rk_crypto_done_task_cb, (unsigned long)crypto_info);
+- crypto_init_queue(&crypto_info->queue, 50);
++ crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true);
++ crypto_engine_start(crypto_info->engine);
++ init_completion(&crypto_info->complete);
+
+- crypto_info->enable_clk = rk_crypto_enable_clk;
+- crypto_info->disable_clk = rk_crypto_disable_clk;
+- crypto_info->load_data = rk_load_data;
+- crypto_info->unload_data = rk_unload_data;
+- crypto_info->enqueue = rk_crypto_enqueue;
+- crypto_info->busy = false;
++ rk_crypto_enable_clk(crypto_info);
+
+ err = rk_crypto_register(crypto_info);
+ if (err) {
+@@ -412,9 +241,9 @@ static int rk_crypto_probe(struct platform_device *pdev)
+ return 0;
+
+ err_register_alg:
+- tasklet_kill(&crypto_info->queue_task);
+- tasklet_kill(&crypto_info->done_task);
++ crypto_engine_exit(crypto_info->engine);
+ err_crypto:
++ dev_err(dev, "Crypto Accelerator not successfully registered\n");
+ return err;
+ }
+
+@@ -423,8 +252,8 @@ static int rk_crypto_remove(struct platform_device *pdev)
+ struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
+
+ rk_crypto_unregister();
+- tasklet_kill(&crypto_tmp->done_task);
+- tasklet_kill(&crypto_tmp->queue_task);
++ rk_crypto_disable_clk(crypto_tmp);
++ crypto_engine_exit(crypto_tmp->engine);
+ return 0;
+ }
+
+diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
+index 97278c2574ff9..045e811b4af84 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto.h
++++ b/drivers/crypto/rockchip/rk3288_crypto.h
+@@ -5,9 +5,11 @@
+ #include <crypto/aes.h>
+ #include <crypto/internal/des.h>
+ #include <crypto/algapi.h>
++#include <linux/dma-mapping.h>
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
+ #include <linux/scatterlist.h>
++#include <crypto/engine.h>
+ #include <crypto/internal/hash.h>
+ #include <crypto/internal/skcipher.h>
+
+@@ -193,45 +195,15 @@ struct rk_crypto_info {
+ struct reset_control *rst;
+ void __iomem *reg;
+ int irq;
+- struct crypto_queue queue;
+- struct tasklet_struct queue_task;
+- struct tasklet_struct done_task;
+- struct crypto_async_request *async_req;
+- int err;
+- /* device lock */
+- spinlock_t lock;
+-
+- /* the public variable */
+- struct scatterlist *sg_src;
+- struct scatterlist *sg_dst;
+- struct scatterlist sg_tmp;
+- struct scatterlist *first;
+- unsigned int left_bytes;
+- void *addr_vir;
+- int aligned;
+- int align_size;
+- size_t src_nents;
+- size_t dst_nents;
+- unsigned int total;
+- unsigned int count;
+- dma_addr_t addr_in;
+- dma_addr_t addr_out;
+- bool busy;
+- int (*start)(struct rk_crypto_info *dev);
+- int (*update)(struct rk_crypto_info *dev);
+- void (*complete)(struct crypto_async_request *base, int err);
+- int (*enable_clk)(struct rk_crypto_info *dev);
+- void (*disable_clk)(struct rk_crypto_info *dev);
+- int (*load_data)(struct rk_crypto_info *dev,
+- struct scatterlist *sg_src,
+- struct scatterlist *sg_dst);
+- void (*unload_data)(struct rk_crypto_info *dev);
+- int (*enqueue)(struct rk_crypto_info *dev,
+- struct crypto_async_request *async_req);
++
++ struct crypto_engine *engine;
++ struct completion complete;
++ int status;
+ };
+
+ /* the private variable of hash */
+ struct rk_ahash_ctx {
++ struct crypto_engine_ctx enginectx;
+ struct rk_crypto_info *dev;
+ /* for fallback */
+ struct crypto_ahash *fallback_tfm;
+@@ -241,14 +213,23 @@ struct rk_ahash_ctx {
+ struct rk_ahash_rctx {
+ struct ahash_request fallback_req;
+ u32 mode;
++ int nrsg;
+ };
+
+ /* the private variable of cipher */
+ struct rk_cipher_ctx {
++ struct crypto_engine_ctx enginectx;
+ struct rk_crypto_info *dev;
+ unsigned int keylen;
+- u32 mode;
++ u8 key[AES_MAX_KEY_SIZE];
+ u8 iv[AES_BLOCK_SIZE];
++ struct crypto_skcipher *fallback_tfm;
++};
++
++struct rk_cipher_rctx {
++ u8 backup_iv[AES_BLOCK_SIZE];
++ u32 mode;
++ struct skcipher_request fallback_req; // keep at the end
+ };
+
+ enum alg_type {
+diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+index ed03058497bc2..edd40e16a3f0a 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+@@ -9,6 +9,7 @@
+ * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
+ */
+ #include <linux/device.h>
++#include <asm/unaligned.h>
+ #include "rk3288_crypto.h"
+
+ /*
+@@ -16,6 +17,40 @@
+ * so we put the fixed hash out when met zero message.
+ */
+
++static bool rk_ahash_need_fallback(struct ahash_request *req)
++{
++ struct scatterlist *sg;
++
++ sg = req->src;
++ while (sg) {
++ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
++ return true;
++ }
++ if (sg->length % 4) {
++ return true;
++ }
++ sg = sg_next(sg);
++ }
++ return false;
++}
++
++static int rk_ahash_digest_fb(struct ahash_request *areq)
++{
++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
++
++ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
++ rctx->fallback_req.base.flags = areq->base.flags &
++ CRYPTO_TFM_REQ_MAY_SLEEP;
++
++ rctx->fallback_req.nbytes = areq->nbytes;
++ rctx->fallback_req.src = areq->src;
++ rctx->fallback_req.result = areq->result;
++
++ return crypto_ahash_digest(&rctx->fallback_req);
++}
++
+ static int zero_message_process(struct ahash_request *req)
+ {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+@@ -38,16 +73,12 @@ static int zero_message_process(struct ahash_request *req)
+ return 0;
+ }
+
+-static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
+-{
+- if (base->complete)
+- base->complete(base, err);
+-}
+-
+-static void rk_ahash_reg_init(struct rk_crypto_info *dev)
++static void rk_ahash_reg_init(struct ahash_request *req)
+ {
+- struct ahash_request *req = ahash_request_cast(dev->async_req);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
++ struct rk_crypto_info *dev = tctx->dev;
+ int reg_status;
+
+ reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
+@@ -74,7 +105,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
+ RK_CRYPTO_BYTESWAP_BRFIFO |
+ RK_CRYPTO_BYTESWAP_BTFIFO);
+
+- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
++ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes);
+ }
+
+ static int rk_ahash_init(struct ahash_request *req)
+@@ -167,48 +198,64 @@ static int rk_ahash_digest(struct ahash_request *req)
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct rk_crypto_info *dev = tctx->dev;
+
++ if (rk_ahash_need_fallback(req))
++ return rk_ahash_digest_fb(req);
++
+ if (!req->nbytes)
+ return zero_message_process(req);
+- else
+- return dev->enqueue(dev, &req->base);
++
++ return crypto_transfer_hash_request_to_engine(dev->engine, req);
+ }
+
+-static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
++static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
+ {
+- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
+- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
++ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg));
++ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
+ (RK_CRYPTO_HASH_START << 16));
+ }
+
+-static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
++static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
++{
++ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
++ int ret;
++
++ ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
++ if (ret <= 0)
++ return -EINVAL;
++
++ rctx->nrsg = ret;
++
++ return 0;
++}
++
++static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
+ {
+- int err;
++ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+
+- err = dev->load_data(dev, dev->sg_src, NULL);
+- if (!err)
+- crypto_ahash_dma_start(dev);
+- return err;
++ dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
++ return 0;
+ }
+
+-static int rk_ahash_start(struct rk_crypto_info *dev)
++static int rk_hash_run(struct crypto_engine *engine, void *breq)
+ {
+- struct ahash_request *req = ahash_request_cast(dev->async_req);
+- struct crypto_ahash *tfm;
+- struct rk_ahash_rctx *rctx;
+-
+- dev->total = req->nbytes;
+- dev->left_bytes = req->nbytes;
+- dev->aligned = 0;
+- dev->align_size = 4;
+- dev->sg_dst = NULL;
+- dev->sg_src = req->src;
+- dev->first = req->src;
+- dev->src_nents = sg_nents(req->src);
+- rctx = ahash_request_ctx(req);
++ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
++ struct scatterlist *sg = areq->src;
++ int err = 0;
++ int i;
++ u32 v;
++
+ rctx->mode = 0;
+
+- tfm = crypto_ahash_reqtfm(req);
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_SHA1;
+@@ -220,32 +267,26 @@ static int rk_ahash_start(struct rk_crypto_info *dev)
+ rctx->mode = RK_CRYPTO_HASH_MD5;
+ break;
+ default:
+- return -EINVAL;
++ err = -EINVAL;
++ goto theend;
+ }
+
+- rk_ahash_reg_init(dev);
+- return rk_ahash_set_data_start(dev);
+-}
+-
+-static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
+-{
+- int err = 0;
+- struct ahash_request *req = ahash_request_cast(dev->async_req);
+- struct crypto_ahash *tfm;
+-
+- dev->unload_data(dev);
+- if (dev->left_bytes) {
+- if (dev->aligned) {
+- if (sg_is_last(dev->sg_src)) {
+- dev_warn(dev->dev, "[%s:%d], Lack of data\n",
+- __func__, __LINE__);
+- err = -ENOMEM;
+- goto out_rx;
+- }
+- dev->sg_src = sg_next(dev->sg_src);
++ rk_ahash_reg_init(areq);
++
++ while (sg) {
++ reinit_completion(&tctx->dev->complete);
++ tctx->dev->status = 0;
++ crypto_ahash_dma_start(tctx->dev, sg);
++ wait_for_completion_interruptible_timeout(&tctx->dev->complete,
++ msecs_to_jiffies(2000));
++ if (!tctx->dev->status) {
++ dev_err(tctx->dev->dev, "DMA timeout\n");
++ err = -EFAULT;
++ goto theend;
+ }
+- err = rk_ahash_set_data_start(dev);
+- } else {
++ sg = sg_next(sg);
++ }
++
+ /*
+ * it will take some time to process date after last dma
+ * transmission.
+@@ -256,18 +297,20 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
+ * efficiency, and make it response quickly when dma
+ * complete.
+ */
+- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
+- udelay(10);
+-
+- tfm = crypto_ahash_reqtfm(req);
+- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
+- crypto_ahash_digestsize(tfm));
+- dev->complete(dev->async_req, 0);
+- tasklet_schedule(&dev->queue_task);
++ while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS))
++ udelay(10);
++
++ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
++ v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
++ put_unaligned_le32(v, areq->result + i * 4);
+ }
+
+-out_rx:
+- return err;
++theend:
++ local_bh_disable();
++ crypto_finalize_hash_request(engine, breq, err);
++ local_bh_enable();
++
++ return 0;
+ }
+
+ static int rk_cra_hash_init(struct crypto_tfm *tfm)
+@@ -281,14 +324,6 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm)
+ algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+
+ tctx->dev = algt->dev;
+- tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
+- if (!tctx->dev->addr_vir) {
+- dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
+- return -ENOMEM;
+- }
+- tctx->dev->start = rk_ahash_start;
+- tctx->dev->update = rk_ahash_crypto_rx;
+- tctx->dev->complete = rk_ahash_crypto_complete;
+
+ /* for fallback */
+ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
+@@ -297,19 +332,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm)
+ dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
+ return PTR_ERR(tctx->fallback_tfm);
+ }
++
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct rk_ahash_rctx) +
+ crypto_ahash_reqsize(tctx->fallback_tfm));
+
+- return tctx->dev->enable_clk(tctx->dev);
++ tctx->enginectx.op.do_one_request = rk_hash_run;
++ tctx->enginectx.op.prepare_request = rk_hash_prepare;
++ tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
++
++ return 0;
+ }
+
+ static void rk_cra_hash_exit(struct crypto_tfm *tfm)
+ {
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+- free_page((unsigned long)tctx->dev->addr_vir);
+- return tctx->dev->disable_clk(tctx->dev);
++ crypto_free_ahash(tctx->fallback_tfm);
+ }
+
+ struct rk_crypto_tmp rk_ahash_sha1 = {
+diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+index 5bbf0d2722e11..67a7e05d5ae31 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+@@ -9,23 +9,77 @@
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+ */
+ #include <linux/device.h>
++#include <crypto/scatterwalk.h>
+ #include "rk3288_crypto.h"
+
+ #define RK_CRYPTO_DEC BIT(0)
+
+-static void rk_crypto_complete(struct crypto_async_request *base, int err)
++static int rk_cipher_need_fallback(struct skcipher_request *req)
+ {
+- if (base->complete)
+- base->complete(base, err);
++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
++ unsigned int bs = crypto_skcipher_blocksize(tfm);
++ struct scatterlist *sgs, *sgd;
++ unsigned int stodo, dtodo, len;
++
++ if (!req->cryptlen)
++ return true;
++
++ len = req->cryptlen;
++ sgs = req->src;
++ sgd = req->dst;
++ while (sgs && sgd) {
++ if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
++ return true;
++ }
++ if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
++ return true;
++ }
++ stodo = min(len, sgs->length);
++ if (stodo % bs) {
++ return true;
++ }
++ dtodo = min(len, sgd->length);
++ if (dtodo % bs) {
++ return true;
++ }
++ if (stodo != dtodo) {
++ return true;
++ }
++ len -= stodo;
++ sgs = sg_next(sgs);
++ sgd = sg_next(sgd);
++ }
++ return false;
++}
++
++static int rk_cipher_fallback(struct skcipher_request *areq)
++{
++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
++ struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
++ int err;
++
++ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
++ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
++ areq->base.complete, areq->base.data);
++ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
++ areq->cryptlen, areq->iv);
++ if (rctx->mode & RK_CRYPTO_DEC)
++ err = crypto_skcipher_decrypt(&rctx->fallback_req);
++ else
++ err = crypto_skcipher_encrypt(&rctx->fallback_req);
++ return err;
+ }
+
+ static int rk_handle_req(struct rk_crypto_info *dev,
+ struct skcipher_request *req)
+ {
+- if (!IS_ALIGNED(req->cryptlen, dev->align_size))
+- return -EINVAL;
+- else
+- return dev->enqueue(dev, &req->base);
++ struct crypto_engine *engine = dev->engine;
++
++ if (rk_cipher_need_fallback(req))
++ return rk_cipher_fallback(req);
++
++ return crypto_transfer_skcipher_request_to_engine(engine, req);
+ }
+
+ static int rk_aes_setkey(struct crypto_skcipher *cipher,
+@@ -38,8 +92,9 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ ctx->keylen = keylen;
+- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
+- return 0;
++ memcpy(ctx->key, key, keylen);
++
++ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+ }
+
+ static int rk_des_setkey(struct crypto_skcipher *cipher,
+@@ -53,8 +108,9 @@ static int rk_des_setkey(struct crypto_skcipher *cipher,
+ return err;
+
+ ctx->keylen = keylen;
+- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+- return 0;
++ memcpy(ctx->key, key, keylen);
++
++ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+ }
+
+ static int rk_tdes_setkey(struct crypto_skcipher *cipher,
+@@ -68,17 +124,19 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher,
+ return err;
+
+ ctx->keylen = keylen;
+- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+- return 0;
++ memcpy(ctx->key, key, keylen);
++
++ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+ }
+
+ static int rk_aes_ecb_encrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_AES_ECB_MODE;
++ rctx->mode = RK_CRYPTO_AES_ECB_MODE;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -86,9 +144,10 @@ static int rk_aes_ecb_decrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
++ rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -96,9 +155,10 @@ static int rk_aes_cbc_encrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_AES_CBC_MODE;
++ rctx->mode = RK_CRYPTO_AES_CBC_MODE;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -106,9 +166,10 @@ static int rk_aes_cbc_decrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
++ rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -116,9 +177,10 @@ static int rk_des_ecb_encrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = 0;
++ rctx->mode = 0;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -126,9 +188,10 @@ static int rk_des_ecb_decrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_DEC;
++ rctx->mode = RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -136,9 +199,10 @@ static int rk_des_cbc_encrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
++ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -146,9 +210,10 @@ static int rk_des_cbc_decrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
++ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -156,9 +221,10 @@ static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_TDES_SELECT;
++ rctx->mode = RK_CRYPTO_TDES_SELECT;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -166,9 +232,10 @@ static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
++ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -176,9 +243,10 @@ static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
++ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_handle_req(dev, req);
+ }
+
+@@ -186,43 +254,42 @@ static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
+ {
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
++ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+ }
+
+-static void rk_ablk_hw_init(struct rk_crypto_info *dev)
++static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req)
+ {
+- struct skcipher_request *req =
+- skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+- u32 ivsize, block, conf_reg = 0;
++ u32 block, conf_reg = 0;
+
+ block = crypto_tfm_alg_blocksize(tfm);
+- ivsize = crypto_skcipher_ivsize(cipher);
+
+ if (block == DES_BLOCK_SIZE) {
+- ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
++ rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ RK_CRYPTO_TDES_BYTESWAP_KEY |
+ RK_CRYPTO_TDES_BYTESWAP_IV;
+- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
+- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
++ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode);
++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen);
+ conf_reg = RK_CRYPTO_DESSEL;
+ } else {
+- ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
++ rctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ RK_CRYPTO_AES_KEY_CHANGE |
+ RK_CRYPTO_AES_BYTESWAP_KEY |
+ RK_CRYPTO_AES_BYTESWAP_IV;
+ if (ctx->keylen == AES_KEYSIZE_192)
+- ctx->mode |= RK_CRYPTO_AES_192BIT_key;
++ rctx->mode |= RK_CRYPTO_AES_192BIT_key;
+ else if (ctx->keylen == AES_KEYSIZE_256)
+- ctx->mode |= RK_CRYPTO_AES_256BIT_key;
+- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
+- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
++ rctx->mode |= RK_CRYPTO_AES_256BIT_key;
++ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode);
++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen);
+ }
+ conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
+ RK_CRYPTO_BYTESWAP_BRFIFO;
+@@ -231,146 +298,138 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev)
+ RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
+ }
+
+-static void crypto_dma_start(struct rk_crypto_info *dev)
++static void crypto_dma_start(struct rk_crypto_info *dev,
++ struct scatterlist *sgs,
++ struct scatterlist *sgd, unsigned int todo)
+ {
+- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
+- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
+- CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
++ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs));
++ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo);
++ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd));
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
+ _SBF(RK_CRYPTO_BLOCK_START, 16));
+ }
+
+-static int rk_set_data_start(struct rk_crypto_info *dev)
++static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
+ {
+- int err;
+- struct skcipher_request *req =
+- skcipher_request_cast(dev->async_req);
+- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
++ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+- u32 ivsize = crypto_skcipher_ivsize(tfm);
+- u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
+- dev->sg_src->offset + dev->sg_src->length - ivsize;
+-
+- /* Store the iv that need to be updated in chain mode.
+- * And update the IV buffer to contain the next IV for decryption mode.
+- */
+- if (ctx->mode & RK_CRYPTO_DEC) {
+- memcpy(ctx->iv, src_last_blk, ivsize);
+- sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
+- ivsize, dev->total - ivsize);
+- }
+-
+- err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
+- if (!err)
+- crypto_dma_start(dev);
+- return err;
+-}
+-
+-static int rk_ablk_start(struct rk_crypto_info *dev)
+-{
+- struct skcipher_request *req =
+- skcipher_request_cast(dev->async_req);
+- unsigned long flags;
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
++ struct scatterlist *sgs, *sgd;
+ int err = 0;
++ int ivsize = crypto_skcipher_ivsize(tfm);
++ int offset;
++ u8 iv[AES_BLOCK_SIZE];
++ u8 biv[AES_BLOCK_SIZE];
++ u8 *ivtouse = areq->iv;
++ unsigned int len = areq->cryptlen;
++ unsigned int todo;
++
++ ivsize = crypto_skcipher_ivsize(tfm);
++ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
++ if (rctx->mode & RK_CRYPTO_DEC) {
++ offset = areq->cryptlen - ivsize;
++ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
++ offset, ivsize, 0);
++ }
++ }
+
+- dev->left_bytes = req->cryptlen;
+- dev->total = req->cryptlen;
+- dev->sg_src = req->src;
+- dev->first = req->src;
+- dev->src_nents = sg_nents(req->src);
+- dev->sg_dst = req->dst;
+- dev->dst_nents = sg_nents(req->dst);
+- dev->aligned = 1;
+-
+- spin_lock_irqsave(&dev->lock, flags);
+- rk_ablk_hw_init(dev);
+- err = rk_set_data_start(dev);
+- spin_unlock_irqrestore(&dev->lock, flags);
+- return err;
+-}
++ sgs = areq->src;
++ sgd = areq->dst;
+
+-static void rk_iv_copyback(struct rk_crypto_info *dev)
+-{
+- struct skcipher_request *req =
+- skcipher_request_cast(dev->async_req);
+- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+- u32 ivsize = crypto_skcipher_ivsize(tfm);
+-
+- /* Update the IV buffer to contain the next IV for encryption mode. */
+- if (!(ctx->mode & RK_CRYPTO_DEC)) {
+- if (dev->aligned) {
+- memcpy(req->iv, sg_virt(dev->sg_dst) +
+- dev->sg_dst->length - ivsize, ivsize);
++ while (sgs && sgd && len) {
++ if (!sgs->length) {
++ sgs = sg_next(sgs);
++ sgd = sg_next(sgd);
++ continue;
++ }
++ if (rctx->mode & RK_CRYPTO_DEC) {
++ /* we backup last block of source to be used as IV at next step */
++ offset = sgs->length - ivsize;
++ scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
++ }
++ if (sgs == sgd) {
++ err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
++ if (err <= 0) {
++ err = -EINVAL;
++ goto theend_iv;
++ }
++ } else {
++ err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
++ if (err <= 0) {
++ err = -EINVAL;
++ goto theend_iv;
++ }
++ err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
++ if (err <= 0) {
++ err = -EINVAL;
++ goto theend_sgs;
++ }
++ }
++ err = 0;
++ rk_ablk_hw_init(ctx->dev, areq);
++ if (ivsize) {
++ if (ivsize == DES_BLOCK_SIZE)
++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
++ else
++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
++ }
++ reinit_completion(&ctx->dev->complete);
++ ctx->dev->status = 0;
++
++ todo = min(sg_dma_len(sgs), len);
++ len -= todo;
++ crypto_dma_start(ctx->dev, sgs, sgd, todo / 4);
++ wait_for_completion_interruptible_timeout(&ctx->dev->complete,
++ msecs_to_jiffies(2000));
++ if (!ctx->dev->status) {
++ dev_err(ctx->dev->dev, "DMA timeout\n");
++ err = -EFAULT;
++ goto theend;
++ }
++ if (sgs == sgd) {
++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
++ } else {
++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
++ dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
++ }
++ if (rctx->mode & RK_CRYPTO_DEC) {
++ memcpy(iv, biv, ivsize);
++ ivtouse = iv;
+ } else {
+- memcpy(req->iv, dev->addr_vir +
+- dev->count - ivsize, ivsize);
++ offset = sgd->length - ivsize;
++ scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0);
++ ivtouse = iv;
+ }
++ sgs = sg_next(sgs);
++ sgd = sg_next(sgd);
+ }
+-}
+-
+-static void rk_update_iv(struct rk_crypto_info *dev)
+-{
+- struct skcipher_request *req =
+- skcipher_request_cast(dev->async_req);
+- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+- u32 ivsize = crypto_skcipher_ivsize(tfm);
+- u8 *new_iv = NULL;
+
+- if (ctx->mode & RK_CRYPTO_DEC) {
+- new_iv = ctx->iv;
+- } else {
+- new_iv = page_address(sg_page(dev->sg_dst)) +
+- dev->sg_dst->offset + dev->sg_dst->length - ivsize;
++ if (areq->iv && ivsize > 0) {
++ offset = areq->cryptlen - ivsize;
++ if (rctx->mode & RK_CRYPTO_DEC) {
++ memcpy(areq->iv, rctx->backup_iv, ivsize);
++ memzero_explicit(rctx->backup_iv, ivsize);
++ } else {
++ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
++ ivsize, 0);
++ }
+ }
+
+- if (ivsize == DES_BLOCK_SIZE)
+- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
+- else if (ivsize == AES_BLOCK_SIZE)
+- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
+-}
++theend:
++ local_bh_disable();
++ crypto_finalize_skcipher_request(engine, areq, err);
++ local_bh_enable();
++ return 0;
+
+-/* return:
+- * true some err was occurred
+- * fault no err, continue
+- */
+-static int rk_ablk_rx(struct rk_crypto_info *dev)
+-{
+- int err = 0;
+- struct skcipher_request *req =
+- skcipher_request_cast(dev->async_req);
+-
+- dev->unload_data(dev);
+- if (!dev->aligned) {
+- if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
+- dev->addr_vir, dev->count,
+- dev->total - dev->left_bytes -
+- dev->count)) {
+- err = -EINVAL;
+- goto out_rx;
+- }
+- }
+- if (dev->left_bytes) {
+- rk_update_iv(dev);
+- if (dev->aligned) {
+- if (sg_is_last(dev->sg_src)) {
+- dev_err(dev->dev, "[%s:%d] Lack of data\n",
+- __func__, __LINE__);
+- err = -ENOMEM;
+- goto out_rx;
+- }
+- dev->sg_src = sg_next(dev->sg_src);
+- dev->sg_dst = sg_next(dev->sg_dst);
+- }
+- err = rk_set_data_start(dev);
++theend_sgs:
++ if (sgs == sgd) {
++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
+ } else {
+- rk_iv_copyback(dev);
+- /* here show the calculation is over without any err */
+- dev->complete(dev->async_req, 0);
+- tasklet_schedule(&dev->queue_task);
++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
++ dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
+ }
+-out_rx:
++theend_iv:
+ return err;
+ }
+
+@@ -378,26 +437,34 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
+ {
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
++ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct rk_crypto_tmp *algt;
+
+ algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+
+ ctx->dev = algt->dev;
+- ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
+- ctx->dev->start = rk_ablk_start;
+- ctx->dev->update = rk_ablk_rx;
+- ctx->dev->complete = rk_crypto_complete;
+- ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
+
+- return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
++ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
++ if (IS_ERR(ctx->fallback_tfm)) {
++ dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
++ name, PTR_ERR(ctx->fallback_tfm));
++ return PTR_ERR(ctx->fallback_tfm);
++ }
++
++ tfm->reqsize = sizeof(struct rk_cipher_rctx) +
++ crypto_skcipher_reqsize(ctx->fallback_tfm);
++
++ ctx->enginectx.op.do_one_request = rk_cipher_run;
++
++ return 0;
+ }
+
+ static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
+ {
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+- free_page((unsigned long)ctx->dev->addr_vir);
+- ctx->dev->disable_clk(ctx->dev);
++ memzero_explicit(ctx->key, ctx->keylen);
++ crypto_free_skcipher(ctx->fallback_tfm);
+ }
+
+ struct rk_crypto_tmp rk_ecb_aes_alg = {
+@@ -406,7 +473,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-rk",
+ .base.cra_priority = 300,
+- .base.cra_flags = CRYPTO_ALG_ASYNC,
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x0f,
+@@ -428,7 +495,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-rk",
+ .base.cra_priority = 300,
+- .base.cra_flags = CRYPTO_ALG_ASYNC,
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x0f,
+@@ -451,7 +518,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-rk",
+ .base.cra_priority = 300,
+- .base.cra_flags = CRYPTO_ALG_ASYNC,
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+@@ -473,7 +540,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-rk",
+ .base.cra_priority = 300,
+- .base.cra_flags = CRYPTO_ALG_ASYNC,
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+@@ -496,7 +563,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-ede-rk",
+ .base.cra_priority = 300,
+- .base.cra_flags = CRYPTO_ALG_ASYNC,
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+@@ -518,7 +585,7 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-ede-rk",
+ .base.cra_priority = 300,
+- .base.cra_flags = CRYPTO_ALG_ASYNC,
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
+index 0e5a5662d5a40..0a051d6568800 100644
+--- a/drivers/dio/dio.c
++++ b/drivers/dio/dio.c
+@@ -109,6 +109,12 @@ static char dio_no_name[] = { 0 };
+
+ #endif /* CONFIG_DIO_CONSTANTS */
+
++static void dio_dev_release(struct device *dev)
++{
++ struct dio_dev *ddev = container_of(dev, typeof(struct dio_dev), dev);
++ kfree(ddev);
++}
++
+ int __init dio_find(int deviceid)
+ {
+ /* Called to find a DIO device before the full bus scan has run.
+@@ -225,6 +231,7 @@ static int __init dio_init(void)
+ dev->bus = &dio_bus;
+ dev->dev.parent = &dio_bus.dev;
+ dev->dev.bus = &dio_bus_type;
++ dev->dev.release = dio_dev_release;
+ dev->scode = scode;
+ dev->resource.start = pa;
+ dev->resource.end = pa + DIO_SIZE(scode, va);
+@@ -252,6 +259,7 @@ static int __init dio_init(void)
+ if (error) {
+ pr_err("DIO: Error registering device %s\n",
+ dev->name);
++ put_device(&dev->dev);
+ continue;
+ }
+ error = dio_create_sysfs_dev_files(dev);
+diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
+index a2cc520225d32..90f28bda29c8b 100644
+--- a/drivers/dma/apple-admac.c
++++ b/drivers/dma/apple-admac.c
+@@ -21,6 +21,12 @@
+ #define NCHANNELS_MAX 64
+ #define IRQ_NOUTPUTS 4
+
++/*
++ * For allocation purposes we split the cache
++ * memory into blocks of fixed size (given in bytes).
++ */
++#define SRAM_BLOCK 2048
++
+ #define RING_WRITE_SLOT GENMASK(1, 0)
+ #define RING_READ_SLOT GENMASK(5, 4)
+ #define RING_FULL BIT(9)
+@@ -36,6 +42,9 @@
+ #define REG_TX_STOP 0x0004
+ #define REG_RX_START 0x0008
+ #define REG_RX_STOP 0x000c
++#define REG_IMPRINT 0x0090
++#define REG_TX_SRAM_SIZE 0x0094
++#define REG_RX_SRAM_SIZE 0x0098
+
+ #define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200)
+ #define REG_CHAN_CTL_RST_RINGS BIT(0)
+@@ -53,7 +62,9 @@
+ #define BUS_WIDTH_FRAME_2_WORDS 0x10
+ #define BUS_WIDTH_FRAME_4_WORDS 0x20
+
+-#define CHAN_BUFSIZE 0x8000
++#define REG_CHAN_SRAM_CARVEOUT(ch) (0x8050 + (ch) * 0x200)
++#define CHAN_SRAM_CARVEOUT_SIZE GENMASK(31, 16)
++#define CHAN_SRAM_CARVEOUT_BASE GENMASK(15, 0)
+
+ #define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200)
+ #define CHAN_FIFOCTL_LIMIT GENMASK(31, 16)
+@@ -76,6 +87,8 @@ struct admac_chan {
+ struct dma_chan chan;
+ struct tasklet_struct tasklet;
+
++ u32 carveout;
++
+ spinlock_t lock;
+ struct admac_tx *current_tx;
+ int nperiod_acks;
+@@ -92,12 +105,24 @@ struct admac_chan {
+ struct list_head to_free;
+ };
+
++struct admac_sram {
++ u32 size;
++ /*
++ * SRAM_CARVEOUT has 16-bit fields, so the SRAM cannot be larger than
++ * 64K and a 32-bit bitfield over 2K blocks covers it.
++ */
++ u32 allocated;
++};
++
+ struct admac_data {
+ struct dma_device dma;
+ struct device *dev;
+ __iomem void *base;
+ struct reset_control *rstc;
+
++ struct mutex cache_alloc_lock;
++ struct admac_sram txcache, rxcache;
++
+ int irq;
+ int irq_index;
+ int nchannels;
+@@ -118,6 +143,60 @@ struct admac_tx {
+ struct list_head node;
+ };
+
++static int admac_alloc_sram_carveout(struct admac_data *ad,
++ enum dma_transfer_direction dir,
++ u32 *out)
++{
++ struct admac_sram *sram;
++ int i, ret = 0, nblocks;
++
++ if (dir == DMA_MEM_TO_DEV)
++ sram = &ad->txcache;
++ else
++ sram = &ad->rxcache;
++
++ mutex_lock(&ad->cache_alloc_lock);
++
++ nblocks = sram->size / SRAM_BLOCK;
++ for (i = 0; i < nblocks; i++)
++ if (!(sram->allocated & BIT(i)))
++ break;
++
++ if (i < nblocks) {
++ *out = FIELD_PREP(CHAN_SRAM_CARVEOUT_BASE, i * SRAM_BLOCK) |
++ FIELD_PREP(CHAN_SRAM_CARVEOUT_SIZE, SRAM_BLOCK);
++ sram->allocated |= BIT(i);
++ } else {
++ ret = -EBUSY;
++ }
++
++ mutex_unlock(&ad->cache_alloc_lock);
++
++ return ret;
++}
++
++static void admac_free_sram_carveout(struct admac_data *ad,
++ enum dma_transfer_direction dir,
++ u32 carveout)
++{
++ struct admac_sram *sram;
++ u32 base = FIELD_GET(CHAN_SRAM_CARVEOUT_BASE, carveout);
++ int i;
++
++ if (dir == DMA_MEM_TO_DEV)
++ sram = &ad->txcache;
++ else
++ sram = &ad->rxcache;
++
++ if (WARN_ON(base >= sram->size))
++ return;
++
++ mutex_lock(&ad->cache_alloc_lock);
++ i = base / SRAM_BLOCK;
++ sram->allocated &= ~BIT(i);
++ mutex_unlock(&ad->cache_alloc_lock);
++}
++
+ static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val)
+ {
+ void __iomem *addr = ad->base + reg;
+@@ -466,15 +545,28 @@ static void admac_synchronize(struct dma_chan *chan)
+ static int admac_alloc_chan_resources(struct dma_chan *chan)
+ {
+ struct admac_chan *adchan = to_admac_chan(chan);
++ struct admac_data *ad = adchan->host;
++ int ret;
+
+ dma_cookie_init(&adchan->chan);
++ ret = admac_alloc_sram_carveout(ad, admac_chan_direction(adchan->no),
++ &adchan->carveout);
++ if (ret < 0)
++ return ret;
++
++ writel_relaxed(adchan->carveout,
++ ad->base + REG_CHAN_SRAM_CARVEOUT(adchan->no));
+ return 0;
+ }
+
+ static void admac_free_chan_resources(struct dma_chan *chan)
+ {
++ struct admac_chan *adchan = to_admac_chan(chan);
++
+ admac_terminate_all(chan);
+ admac_synchronize(chan);
++ admac_free_sram_carveout(adchan->host, admac_chan_direction(adchan->no),
++ adchan->carveout);
+ }
+
+ static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
+@@ -712,6 +804,7 @@ static int admac_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, ad);
+ ad->dev = &pdev->dev;
+ ad->nchannels = nchannels;
++ mutex_init(&ad->cache_alloc_lock);
+
+ /*
+ * The controller has 4 IRQ outputs. Try them all until
+@@ -801,6 +894,13 @@ static int admac_probe(struct platform_device *pdev)
+ goto free_irq;
+ }
+
++ ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
++ ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
++
++ dev_info(&pdev->dev, "Audio DMA Controller\n");
++ dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
++ readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
++
+ return 0;
+
+ free_irq:
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 7269bd54554f6..3229dfc786507 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -528,6 +528,22 @@ static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr,
+ !idxd->hw.group_cap.progress_limit;
+ }
+
++static bool idxd_group_attr_read_buffers_invisible(struct attribute *attr,
++ struct idxd_device *idxd)
++{
++ /*
++ * Intel IAA does not support Read Buffer allocation control,
++ * make these attributes invisible.
++ */
++ return (attr == &dev_attr_group_use_token_limit.attr ||
++ attr == &dev_attr_group_use_read_buffer_limit.attr ||
++ attr == &dev_attr_group_tokens_allowed.attr ||
++ attr == &dev_attr_group_read_buffers_allowed.attr ||
++ attr == &dev_attr_group_tokens_reserved.attr ||
++ attr == &dev_attr_group_read_buffers_reserved.attr) &&
++ idxd->data->type == IDXD_TYPE_IAX;
++}
++
+ static umode_t idxd_group_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+ {
+@@ -538,6 +554,9 @@ static umode_t idxd_group_attr_visible(struct kobject *kobj,
+ if (idxd_group_attr_progress_limit_invisible(attr, idxd))
+ return 0;
+
++ if (idxd_group_attr_read_buffers_invisible(attr, idxd))
++ return 0;
++
+ return attr->mode;
+ }
+
+@@ -1233,6 +1252,14 @@ static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
+ !idxd->hw.wq_cap.op_config;
+ }
+
++static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
++ struct idxd_device *idxd)
++{
++ /* Intel IAA does not support batch processing, make it invisible */
++ return attr == &dev_attr_wq_max_batch_size.attr &&
++ idxd->data->type == IDXD_TYPE_IAX;
++}
++
+ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+ {
+@@ -1243,6 +1270,9 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
+ if (idxd_wq_attr_op_config_invisible(attr, idxd))
+ return 0;
+
++ if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
++ return 0;
++
+ return attr->mode;
+ }
+
+@@ -1533,6 +1563,43 @@ static ssize_t cmd_status_store(struct device *dev, struct device_attribute *att
+ }
+ static DEVICE_ATTR_RW(cmd_status);
+
++static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr,
++ struct idxd_device *idxd)
++{
++ /* Intel IAA does not support batch processing, make it invisible */
++ return attr == &dev_attr_max_batch_size.attr &&
++ idxd->data->type == IDXD_TYPE_IAX;
++}
++
++static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr,
++ struct idxd_device *idxd)
++{
++ /*
++ * Intel IAA does not support Read Buffer allocation control,
++ * make these attributes invisible.
++ */
++ return (attr == &dev_attr_max_tokens.attr ||
++ attr == &dev_attr_max_read_buffers.attr ||
++ attr == &dev_attr_token_limit.attr ||
++ attr == &dev_attr_read_buffer_limit.attr) &&
++ idxd->data->type == IDXD_TYPE_IAX;
++}
++
++static umode_t idxd_device_attr_visible(struct kobject *kobj,
++ struct attribute *attr, int n)
++{
++ struct device *dev = container_of(kobj, struct device, kobj);
++ struct idxd_device *idxd = confdev_to_idxd(dev);
++
++ if (idxd_device_attr_max_batch_size_invisible(attr, idxd))
++ return 0;
++
++ if (idxd_device_attr_read_buffers_invisible(attr, idxd))
++ return 0;
++
++ return attr->mode;
++}
++
+ static struct attribute *idxd_device_attributes[] = {
+ &dev_attr_version.attr,
+ &dev_attr_max_groups.attr,
+@@ -1560,6 +1627,7 @@ static struct attribute *idxd_device_attributes[] = {
+
+ static const struct attribute_group idxd_device_attribute_group = {
+ .attrs = idxd_device_attributes,
++ .is_visible = idxd_device_attr_visible,
+ };
+
+ static const struct attribute_group *idxd_attribute_groups[] = {
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
+index a22ea053f8e1c..8af4d2523194a 100644
+--- a/drivers/edac/i10nm_base.c
++++ b/drivers/edac/i10nm_base.c
+@@ -304,11 +304,10 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
+ if (unlikely(pci_enable_device(pdev) < 0)) {
+ edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
+ bus, dev, fun);
++ pci_dev_put(pdev);
+ return NULL;
+ }
+
+- pci_dev_get(pdev);
+-
+ return pdev;
+ }
+
+diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
+index 2a120d8d3c272..9dfa545427ca1 100644
+--- a/drivers/extcon/extcon-usbc-tusb320.c
++++ b/drivers/extcon/extcon-usbc-tusb320.c
+@@ -313,9 +313,9 @@ static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9)
+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB);
+ }
+
+-static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
++static irqreturn_t tusb320_state_update_handler(struct tusb320_priv *priv,
++ bool force_update)
+ {
+- struct tusb320_priv *priv = dev_id;
+ unsigned int reg;
+
+ if (regmap_read(priv->regmap, TUSB320_REG9, &reg)) {
+@@ -323,7 +323,7 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
+ return IRQ_NONE;
+ }
+
+- if (!(reg & TUSB320_REG9_INTERRUPT_STATUS))
++ if (!force_update && !(reg & TUSB320_REG9_INTERRUPT_STATUS))
+ return IRQ_NONE;
+
+ tusb320_extcon_irq_handler(priv, reg);
+@@ -340,6 +340,13 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
+ return IRQ_HANDLED;
+ }
+
++static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
++{
++ struct tusb320_priv *priv = dev_id;
++
++ return tusb320_state_update_handler(priv, false);
++}
++
+ static const struct regmap_config tusb320_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+@@ -466,7 +473,7 @@ static int tusb320_probe(struct i2c_client *client,
+ return ret;
+
+ /* update initial state */
+- tusb320_irq_handler(client->irq, priv);
++ tusb320_state_update_handler(priv, true);
+
+ /* Reset chip to its default state */
+ ret = tusb320_reset(priv);
+@@ -477,7 +484,7 @@ static int tusb320_probe(struct i2c_client *client,
+ * State and polarity might change after a reset, so update
+ * them again and make sure the interrupt status bit is cleared.
+ */
+- tusb320_irq_handler(client->irq, priv);
++ tusb320_state_update_handler(priv, true);
+
+ ret = devm_request_threaded_irq(priv->dev, client->irq, NULL,
+ tusb320_irq_handler,
+diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
+index 4b8978b254f9a..dba315f675bc7 100644
+--- a/drivers/firmware/raspberrypi.c
++++ b/drivers/firmware/raspberrypi.c
+@@ -272,6 +272,7 @@ static int rpi_firmware_probe(struct platform_device *pdev)
+ int ret = PTR_ERR(fw->chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get mbox channel: %d\n", ret);
++ kfree(fw);
+ return ret;
+ }
+
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index ebc32bbd9b833..6281e7153b475 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -429,15 +429,14 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+ * during noirq phase, so we must manually poll the completion.
+ */
+ ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
+- true, 1,
++ done_state, 1,
+ info->desc->max_rx_timeout_ms * 1000,
+ false, &xfer->done);
+ }
+
+- if (ret == -ETIMEDOUT || !done_state) {
++ if (ret == -ETIMEDOUT)
+ dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
+ (void *)_RET_IP_);
+- }
+
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index 0cb6b468f364f..6ab1cf489d035 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -55,6 +55,50 @@ static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
+ * interface to gpiolib GPIOs via ioctl()s.
+ */
+
++typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *);
++typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long);
++typedef ssize_t (*read_fn)(struct file *, char __user *,
++ size_t count, loff_t *);
++
++static __poll_t call_poll_locked(struct file *file,
++ struct poll_table_struct *wait,
++ struct gpio_device *gdev, poll_fn func)
++{
++ __poll_t ret;
++
++ down_read(&gdev->sem);
++ ret = func(file, wait);
++ up_read(&gdev->sem);
++
++ return ret;
++}
++
++static long call_ioctl_locked(struct file *file, unsigned int cmd,
++ unsigned long arg, struct gpio_device *gdev,
++ ioctl_fn func)
++{
++ long ret;
++
++ down_read(&gdev->sem);
++ ret = func(file, cmd, arg);
++ up_read(&gdev->sem);
++
++ return ret;
++}
++
++static ssize_t call_read_locked(struct file *file, char __user *buf,
++ size_t count, loff_t *f_ps,
++ struct gpio_device *gdev, read_fn func)
++{
++ ssize_t ret;
++
++ down_read(&gdev->sem);
++ ret = func(file, buf, count, f_ps);
++ up_read(&gdev->sem);
++
++ return ret;
++}
++
+ /*
+ * GPIO line handle management
+ */
+@@ -191,8 +235,8 @@ static long linehandle_set_config(struct linehandle_state *lh,
+ return 0;
+ }
+
+-static long linehandle_ioctl(struct file *file, unsigned int cmd,
+- unsigned long arg)
++static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd,
++ unsigned long arg)
+ {
+ struct linehandle_state *lh = file->private_data;
+ void __user *ip = (void __user *)arg;
+@@ -201,6 +245,9 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd,
+ unsigned int i;
+ int ret;
+
++ if (!lh->gdev->chip)
++ return -ENODEV;
++
+ switch (cmd) {
+ case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
+ /* NOTE: It's okay to read values of output lines */
+@@ -247,6 +294,15 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd,
+ }
+ }
+
++static long linehandle_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ struct linehandle_state *lh = file->private_data;
++
++ return call_ioctl_locked(file, cmd, arg, lh->gdev,
++ linehandle_ioctl_unlocked);
++}
++
+ #ifdef CONFIG_COMPAT
+ static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+@@ -1378,12 +1434,15 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
+ return ret;
+ }
+
+-static long linereq_ioctl(struct file *file, unsigned int cmd,
+- unsigned long arg)
++static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd,
++ unsigned long arg)
+ {
+ struct linereq *lr = file->private_data;
+ void __user *ip = (void __user *)arg;
+
++ if (!lr->gdev->chip)
++ return -ENODEV;
++
+ switch (cmd) {
+ case GPIO_V2_LINE_GET_VALUES_IOCTL:
+ return linereq_get_values(lr, ip);
+@@ -1396,6 +1455,15 @@ static long linereq_ioctl(struct file *file, unsigned int cmd,
+ }
+ }
+
++static long linereq_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ struct linereq *lr = file->private_data;
++
++ return call_ioctl_locked(file, cmd, arg, lr->gdev,
++ linereq_ioctl_unlocked);
++}
++
+ #ifdef CONFIG_COMPAT
+ static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+@@ -1404,12 +1472,15 @@ static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
+ }
+ #endif
+
+-static __poll_t linereq_poll(struct file *file,
+- struct poll_table_struct *wait)
++static __poll_t linereq_poll_unlocked(struct file *file,
++ struct poll_table_struct *wait)
+ {
+ struct linereq *lr = file->private_data;
+ __poll_t events = 0;
+
++ if (!lr->gdev->chip)
++ return EPOLLHUP | EPOLLERR;
++
+ poll_wait(file, &lr->wait, wait);
+
+ if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
+@@ -1419,16 +1490,25 @@ static __poll_t linereq_poll(struct file *file,
+ return events;
+ }
+
+-static ssize_t linereq_read(struct file *file,
+- char __user *buf,
+- size_t count,
+- loff_t *f_ps)
++static __poll_t linereq_poll(struct file *file,
++ struct poll_table_struct *wait)
++{
++ struct linereq *lr = file->private_data;
++
++ return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked);
++}
++
++static ssize_t linereq_read_unlocked(struct file *file, char __user *buf,
++ size_t count, loff_t *f_ps)
+ {
+ struct linereq *lr = file->private_data;
+ struct gpio_v2_line_event le;
+ ssize_t bytes_read = 0;
+ int ret;
+
++ if (!lr->gdev->chip)
++ return -ENODEV;
++
+ if (count < sizeof(le))
+ return -EINVAL;
+
+@@ -1473,6 +1553,15 @@ static ssize_t linereq_read(struct file *file,
+ return bytes_read;
+ }
+
++static ssize_t linereq_read(struct file *file, char __user *buf,
++ size_t count, loff_t *f_ps)
++{
++ struct linereq *lr = file->private_data;
++
++ return call_read_locked(file, buf, count, f_ps, lr->gdev,
++ linereq_read_unlocked);
++}
++
+ static void linereq_free(struct linereq *lr)
+ {
+ unsigned int i;
+@@ -1710,12 +1799,15 @@ struct lineevent_state {
+ (GPIOEVENT_REQUEST_RISING_EDGE | \
+ GPIOEVENT_REQUEST_FALLING_EDGE)
+
+-static __poll_t lineevent_poll(struct file *file,
+- struct poll_table_struct *wait)
++static __poll_t lineevent_poll_unlocked(struct file *file,
++ struct poll_table_struct *wait)
+ {
+ struct lineevent_state *le = file->private_data;
+ __poll_t events = 0;
+
++ if (!le->gdev->chip)
++ return EPOLLHUP | EPOLLERR;
++
+ poll_wait(file, &le->wait, wait);
+
+ if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
+@@ -1724,15 +1816,21 @@ static __poll_t lineevent_poll(struct file *file,
+ return events;
+ }
+
++static __poll_t lineevent_poll(struct file *file,
++ struct poll_table_struct *wait)
++{
++ struct lineevent_state *le = file->private_data;
++
++ return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked);
++}
++
+ struct compat_gpioeevent_data {
+ compat_u64 timestamp;
+ u32 id;
+ };
+
+-static ssize_t lineevent_read(struct file *file,
+- char __user *buf,
+- size_t count,
+- loff_t *f_ps)
++static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf,
++ size_t count, loff_t *f_ps)
+ {
+ struct lineevent_state *le = file->private_data;
+ struct gpioevent_data ge;
+@@ -1740,6 +1838,9 @@ static ssize_t lineevent_read(struct file *file,
+ ssize_t ge_size;
+ int ret;
+
++ if (!le->gdev->chip)
++ return -ENODEV;
++
+ /*
+ * When compatible system call is being used the struct gpioevent_data,
+ * in case of at least ia32, has different size due to the alignment
+@@ -1797,6 +1898,15 @@ static ssize_t lineevent_read(struct file *file,
+ return bytes_read;
+ }
+
++static ssize_t lineevent_read(struct file *file, char __user *buf,
++ size_t count, loff_t *f_ps)
++{
++ struct lineevent_state *le = file->private_data;
++
++ return call_read_locked(file, buf, count, f_ps, le->gdev,
++ lineevent_read_unlocked);
++}
++
+ static void lineevent_free(struct lineevent_state *le)
+ {
+ if (le->irq)
+@@ -1814,13 +1924,16 @@ static int lineevent_release(struct inode *inode, struct file *file)
+ return 0;
+ }
+
+-static long lineevent_ioctl(struct file *file, unsigned int cmd,
+- unsigned long arg)
++static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd,
++ unsigned long arg)
+ {
+ struct lineevent_state *le = file->private_data;
+ void __user *ip = (void __user *)arg;
+ struct gpiohandle_data ghd;
+
++ if (!le->gdev->chip)
++ return -ENODEV;
++
+ /*
+ * We can get the value for an event line but not set it,
+ * because it is input by definition.
+@@ -1843,6 +1956,15 @@ static long lineevent_ioctl(struct file *file, unsigned int cmd,
+ return -EINVAL;
+ }
+
++static long lineevent_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ struct lineevent_state *le = file->private_data;
++
++ return call_ioctl_locked(file, cmd, arg, le->gdev,
++ lineevent_ioctl_unlocked);
++}
++
+ #ifdef CONFIG_COMPAT
+ static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+@@ -2401,12 +2523,15 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
+ return NOTIFY_OK;
+ }
+
+-static __poll_t lineinfo_watch_poll(struct file *file,
+- struct poll_table_struct *pollt)
++static __poll_t lineinfo_watch_poll_unlocked(struct file *file,
++ struct poll_table_struct *pollt)
+ {
+ struct gpio_chardev_data *cdev = file->private_data;
+ __poll_t events = 0;
+
++ if (!cdev->gdev->chip)
++ return EPOLLHUP | EPOLLERR;
++
+ poll_wait(file, &cdev->wait, pollt);
+
+ if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
+@@ -2416,8 +2541,17 @@ static __poll_t lineinfo_watch_poll(struct file *file,
+ return events;
+ }
+
+-static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
+- size_t count, loff_t *off)
++static __poll_t lineinfo_watch_poll(struct file *file,
++ struct poll_table_struct *pollt)
++{
++ struct gpio_chardev_data *cdev = file->private_data;
++
++ return call_poll_locked(file, pollt, cdev->gdev,
++ lineinfo_watch_poll_unlocked);
++}
++
++static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf,
++ size_t count, loff_t *off)
+ {
+ struct gpio_chardev_data *cdev = file->private_data;
+ struct gpio_v2_line_info_changed event;
+@@ -2425,6 +2559,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
+ int ret;
+ size_t event_size;
+
++ if (!cdev->gdev->chip)
++ return -ENODEV;
++
+ #ifndef CONFIG_GPIO_CDEV_V1
+ event_size = sizeof(struct gpio_v2_line_info_changed);
+ if (count < event_size)
+@@ -2492,6 +2629,15 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
+ return bytes_read;
+ }
+
++static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
++ size_t count, loff_t *off)
++{
++ struct gpio_chardev_data *cdev = file->private_data;
++
++ return call_read_locked(file, buf, count, off, cdev->gdev,
++ lineinfo_watch_read_unlocked);
++}
++
+ /**
+ * gpio_chrdev_open() - open the chardev for ioctl operations
+ * @inode: inode for this chardev
+@@ -2505,13 +2651,17 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
+ struct gpio_chardev_data *cdev;
+ int ret = -ENOMEM;
+
++ down_read(&gdev->sem);
++
+ /* Fail on open if the backing gpiochip is gone */
+- if (!gdev->chip)
+- return -ENODEV;
++ if (!gdev->chip) {
++ ret = -ENODEV;
++ goto out_unlock;
++ }
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+- return -ENOMEM;
++ goto out_unlock;
+
+ cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
+ if (!cdev->watched_lines)
+@@ -2534,6 +2684,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
+ if (ret)
+ goto out_unregister_notifier;
+
++ up_read(&gdev->sem);
++
+ return ret;
+
+ out_unregister_notifier:
+@@ -2543,6 +2695,8 @@ out_free_bitmap:
+ bitmap_free(cdev->watched_lines);
+ out_free_cdev:
+ kfree(cdev);
++out_unlock:
++ up_read(&gdev->sem);
+ return ret;
+ }
+
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index a70522aef3557..5974cfc61b417 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -735,6 +735,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&gdev->notifier);
++ init_rwsem(&gdev->sem);
+
+ #ifdef CONFIG_PINCTRL
+ INIT_LIST_HEAD(&gdev->pin_ranges);
+@@ -875,6 +876,8 @@ void gpiochip_remove(struct gpio_chip *gc)
+ unsigned long flags;
+ unsigned int i;
+
++ down_write(&gdev->sem);
++
+ /* FIXME: should the legacy sysfs handling be moved to gpio_device? */
+ gpiochip_sysfs_unregister(gdev);
+ gpiochip_free_hogs(gc);
+@@ -909,6 +912,7 @@ void gpiochip_remove(struct gpio_chip *gc)
+ * gone.
+ */
+ gcdev_unregister(gdev);
++ up_write(&gdev->sem);
+ put_device(&gdev->dev);
+ }
+ EXPORT_SYMBOL_GPL(gpiochip_remove);
+diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
+index d900ecdbac46d..9ad68a0adf4a8 100644
+--- a/drivers/gpio/gpiolib.h
++++ b/drivers/gpio/gpiolib.h
+@@ -15,6 +15,7 @@
+ #include <linux/device.h>
+ #include <linux/module.h>
+ #include <linux/cdev.h>
++#include <linux/rwsem.h>
+
+ #define GPIOCHIP_NAME "gpiochip"
+
+@@ -39,6 +40,9 @@
+ * @list: links gpio_device:s together for traversal
+ * @notifier: used to notify subscribers about lines being requested, released
+ * or reconfigured
++ * @sem: protects the structure from a NULL-pointer dereference of @chip by
++ * user-space operations when the device gets unregistered during
++ * a hot-unplug event
+ * @pin_ranges: range of pins served by the GPIO driver
+ *
+ * This state container holds most of the runtime variable data
+@@ -60,6 +64,7 @@ struct gpio_device {
+ void *data;
+ struct list_head list;
+ struct blocking_notifier_head notifier;
++ struct rw_semaphore sem;
+
+ #ifdef CONFIG_PINCTRL
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 1f76e27f1a354..fe87b3402f06a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -2256,7 +2256,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
+
+ ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
+ if (ret) {
+- kfree(mem);
++ kfree(*mem);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index e363f56c72af1..30c28a69e847d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -317,6 +317,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
+
+ if (!found)
+ return false;
++ pci_dev_put(pdev);
+
+ adev->bios = kmalloc(size, GFP_KERNEL);
+ if (!adev->bios) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index f1e9663b40510..913f22d41673d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2462,6 +2462,11 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
+ if (!amdgpu_sriov_vf(adev)) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+
++ if (WARN_ON(!hive)) {
++ r = -ENOENT;
++ goto init_failed;
++ }
++
+ if (!hive->reset_domain ||
+ !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
+ r = -ENOENT;
+@@ -5027,6 +5032,8 @@ static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
+ pm_runtime_enable(&(p->dev));
+ pm_runtime_resume(&(p->dev));
+ }
++
++ pci_dev_put(p);
+ }
+
+ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+@@ -5065,6 +5072,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+
+ if (expires < ktime_get_mono_fast_ns()) {
+ dev_warn(adev->dev, "failed to suspend display audio\n");
++ pci_dev_put(p);
+ /* TODO: abort the succeeding gpu reset? */
+ return -ETIMEDOUT;
+ }
+@@ -5072,6 +5080,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+
+ pm_runtime_disable(&(p->dev));
+
++ pci_dev_put(p);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+index 49c4347d154ce..2b9d806e23afb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -75,6 +75,8 @@ struct amdgpu_vf_error_buffer {
+ uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ };
+
++enum idh_request;
++
+ /**
+ * struct amdgpu_virt_ops - amdgpu device virt operations
+ */
+@@ -84,7 +86,8 @@ struct amdgpu_virt_ops {
+ int (*req_init_data)(struct amdgpu_device *adev);
+ int (*reset_gpu)(struct amdgpu_device *adev);
+ int (*wait_reset)(struct amdgpu_device *adev);
+- void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
++ void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
++ u32 data1, u32 data2, u32 data3);
+ };
+
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+index 47159e9a08848..4b9e7b050ccd2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+@@ -386,7 +386,6 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+ if (ret) {
+ dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
+ kobject_put(&hive->kobj);
+- kfree(hive);
+ hive = NULL;
+ goto pro_end;
+ }
+@@ -410,7 +409,6 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+ dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
+ ret = -ENOMEM;
+ kobject_put(&hive->kobj);
+- kfree(hive);
+ hive = NULL;
+ goto pro_end;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index b3fba8dea63ca..6853b93ac82e7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -82,10 +82,10 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode =
+ /* Navi1x */
+ static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
+ {
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+@@ -100,10 +100,10 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
+ /* Sienna Cichlid */
+ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
+ {
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+@@ -125,10 +125,10 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
+
+ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
+ {
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+@@ -149,7 +149,7 @@ static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
+
+ /* Beige Goby*/
+ static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = {
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ };
+@@ -166,7 +166,7 @@ static const struct amdgpu_video_codecs bg_video_codecs_encode = {
+
+ /* Yellow Carp*/
+ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index e3b2b6b4f1a66..7cd17dda32ceb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -103,10 +103,10 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
+ /* Vega */
+ static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
+ {
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ };
+@@ -120,10 +120,10 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
+ /* Raven */
+ static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
+ {
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
+@@ -138,10 +138,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
+ /* Renoir, Arcturus */
+ static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
+ {
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index e08044008186e..8b297ade69a24 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -61,7 +61,7 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
+
+ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
+ {
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index f0b01c8dc4a6b..f72c013d3a5b0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -42,39 +42,6 @@
+ #include "dm_helpers.h"
+ #include "ddc_service_types.h"
+
+-struct monitor_patch_info {
+- unsigned int manufacturer_id;
+- unsigned int product_id;
+- void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param);
+- unsigned int patch_param;
+-};
+-static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param);
+-
+-static const struct monitor_patch_info monitor_patch_table[] = {
+-{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15},
+-{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15},
+-};
+-
+-static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param)
+-{
+- if (edid_caps)
+- edid_caps->panel_patch.max_dsc_target_bpp_limit = param;
+-}
+-
+-static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
+-{
+- int i, ret = 0;
+-
+- for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++)
+- if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id)
+- && (edid_caps->product_id == monitor_patch_table[i].product_id)) {
+- monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param);
+- ret++;
+- }
+-
+- return ret;
+-}
+-
+ /* dm_helpers_parse_edid_caps
+ *
+ * Parse edid caps
+@@ -149,8 +116,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
+ kfree(sads);
+ kfree(sadb);
+
+- amdgpu_dm_patch_edid_caps(edid_caps);
+-
+ return result;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index e0c8d6f09bb4b..074e70a5c458e 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -462,6 +462,7 @@ static enum bp_result get_gpio_i2c_info(
+ uint32_t count = 0;
+ unsigned int table_index = 0;
+ bool find_valid = false;
++ struct atom_gpio_pin_assignment *pin;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+@@ -489,20 +490,17 @@ static enum bp_result get_gpio_i2c_info(
+ - sizeof(struct atom_common_table_header))
+ / sizeof(struct atom_gpio_pin_assignment);
+
++ pin = (struct atom_gpio_pin_assignment *) header->gpio_pin;
++
+ for (table_index = 0; table_index < count; table_index++) {
+- if (((record->i2c_id & I2C_HW_CAP) == (
+- header->gpio_pin[table_index].gpio_id &
+- I2C_HW_CAP)) &&
+- ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) ==
+- (header->gpio_pin[table_index].gpio_id &
+- I2C_HW_ENGINE_ID_MASK)) &&
+- ((record->i2c_id & I2C_HW_LANE_MUX) ==
+- (header->gpio_pin[table_index].gpio_id &
+- I2C_HW_LANE_MUX))) {
++ if (((record->i2c_id & I2C_HW_CAP) == (pin->gpio_id & I2C_HW_CAP)) &&
++ ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) &&
++ ((record->i2c_id & I2C_HW_LANE_MUX) == (pin->gpio_id & I2C_HW_LANE_MUX))) {
+ /* still valid */
+ find_valid = true;
+ break;
+ }
++ pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment));
+ }
+
+ /* If we don't find the entry that we are looking for then
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+index 6f77d8e538ab1..9eb9fe5b8d2c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+@@ -438,7 +438,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
+ }
+
+ if (!new_clocks->dtbclk_en) {
+- new_clocks->ref_dtbclk_khz = 0;
++ new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
+ }
+
+ /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 997ab031f816d..5260ad6de8038 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1070,6 +1070,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+ int i, j;
+ struct dc_state *dangling_context = dc_create_state(dc);
+ struct dc_state *current_ctx;
++ struct pipe_ctx *pipe;
+
+ if (dangling_context == NULL)
+ return;
+@@ -1112,6 +1113,16 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+ }
+
+ if (should_disable && old_stream) {
++ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++ /* When disabling plane for a phantom pipe, we must turn on the
++ * phantom OTG so the disable programming gets the double buffer
++ * update. Otherwise the pipe will be left in a partially disabled
++ * state that can result in underflow or hang when enabling it
++ * again for different use.
++ */
++ if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
++ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
++ }
+ dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+ disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
+
+@@ -1760,6 +1771,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ context->stream_count == 0)
+ dc->hwss.prepare_bandwidth(dc, context);
+
++ /* When SubVP is active, all HW programming must be done while
++ * SubVP lock is acquired
++ */
++ if (dc->hwss.subvp_pipe_control_lock)
++ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
++
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
+@@ -1787,9 +1804,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+ }
+
+- if (dc->hwss.subvp_pipe_control_lock)
+- dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+-
+ result = dc->hwss.apply_ctx_to_hw(dc, context);
+
+ if (result != DC_OK) {
+@@ -3576,7 +3590,6 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+
+ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ bool force_minimal_pipe_splitting = false;
+- uint32_t i;
+
+ *is_plane_addition = false;
+
+@@ -3608,27 +3621,11 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+ }
+ }
+
+- /* For SubVP pipe split case when adding MPO video
+- * we need to add a minimal transition. In this case
+- * there will be 2 streams (1 main stream, 1 phantom
+- * stream).
++ /* For SubVP when adding MPO video we need to add a minimal transition.
+ */
+- if (cur_stream_status &&
+- dc->current_state->stream_count == 2 &&
+- stream->mall_stream_config.type == SUBVP_MAIN) {
+- bool is_pipe_split = false;
+-
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream &&
+- (dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe ||
+- dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) {
+- is_pipe_split = true;
+- break;
+- }
+- }
+-
++ if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) {
+ /* determine if minimal transition is required due to SubVP*/
+- if (surface_count > 0 && is_pipe_split) {
++ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+@@ -3650,10 +3647,32 @@ static bool commit_minimal_transition_state(struct dc *dc,
+ bool temp_subvp_policy;
+ enum dc_status ret = DC_ERROR_UNEXPECTED;
+ unsigned int i, j;
++ unsigned int pipe_in_use = 0;
+
+ if (!transition_context)
+ return false;
+
++ /* check current pipes in use*/
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
++
++ if (pipe->plane_state)
++ pipe_in_use++;
++ }
++
++ /* When the OS add a new surface if we have been used all of pipes with odm combine
++ * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
++ * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
++ * call it again. Otherwise return true to skip.
++ *
++ * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
++ * enter/exit MPO when DCN still have enough resources.
++ */
++ if (pipe_in_use != dc->res_pool->pipe_count) {
++ dc_release_state(transition_context);
++ return true;
++ }
++
+ if (!dc->config.is_vmin_only_asic) {
+ tmp_mpc_policy = dc->debug.pipe_split_policy;
+ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+index fc6aa098bda06..8db9f75144662 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+@@ -1128,6 +1128,7 @@ struct resource_pool *dce60_create_resource_pool(
+ if (dce60_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
++ kfree(pool);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+@@ -1325,6 +1326,7 @@ struct resource_pool *dce61_create_resource_pool(
+ if (dce61_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
++ kfree(pool);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+@@ -1518,6 +1520,7 @@ struct resource_pool *dce64_create_resource_pool(
+ if (dce64_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
++ kfree(pool);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index b28025960050c..5825e6f412bd7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -1137,6 +1137,7 @@ struct resource_pool *dce80_create_resource_pool(
+ if (dce80_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
++ kfree(pool);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+@@ -1336,6 +1337,7 @@ struct resource_pool *dce81_create_resource_pool(
+ if (dce81_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
++ kfree(pool);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 11e4c4e469473..c06538c37a11f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -867,6 +867,32 @@ static void false_optc_underflow_wa(
+ tg->funcs->clear_optc_underflow(tg);
+ }
+
++static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
++{
++ struct pipe_ctx *other_pipe;
++ int vready_offset = pipe->pipe_dlg_param.vready_offset;
++
++ /* Always use the largest vready_offset of all connected pipes */
++ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++ }
++ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++ }
++ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++ }
++ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++ }
++
++ return vready_offset;
++}
++
+ enum dc_status dcn10_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+@@ -910,7 +936,7 @@ enum dc_status dcn10_enable_stream_timing(
+ pipe_ctx->stream_res.tg->funcs->program_timing(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+- pipe_ctx->pipe_dlg_param.vready_offset,
++ calculate_vready_offset_for_group(pipe_ctx),
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+@@ -2900,7 +2926,7 @@ void dcn10_program_pipe(
+
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+- pipe_ctx->pipe_dlg_param.vready_offset,
++ calculate_vready_offset_for_group(pipe_ctx),
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index a7e0001a8f46d..f348bc15a9256 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1616,6 +1616,31 @@ static void dcn20_update_dchubp_dpp(
+ hubp->funcs->phantom_hubp_post_enable(hubp);
+ }
+
++static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
++{
++ struct pipe_ctx *other_pipe;
++ int vready_offset = pipe->pipe_dlg_param.vready_offset;
++
++ /* Always use the largest vready_offset of all connected pipes */
++ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++ }
++ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++ }
++ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++ }
++ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++ }
++
++ return vready_offset;
++}
+
+ static void dcn20_program_pipe(
+ struct dc *dc,
+@@ -1634,16 +1659,14 @@ static void dcn20_program_pipe(
+ && !pipe_ctx->prev_odm_pipe) {
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+- pipe_ctx->pipe_dlg_param.vready_offset,
++ calculate_vready_offset_for_group(pipe_ctx),
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
+
+ if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+- pipe_ctx->stream_res.tg->funcs->wait_for_state(
+- pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+- pipe_ctx->stream_res.tg->funcs->wait_for_state(
+- pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ }
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+@@ -2037,7 +2060,7 @@ bool dcn20_update_bandwidth(
+
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+- pipe_ctx->pipe_dlg_param.vready_offset,
++ calculate_vready_offset_for_group(pipe_ctx),
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+index df4f251191424..e4472c6be6c32 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+@@ -225,11 +225,7 @@ static void dccg32_set_dtbclk_dto(
+ } else {
+ REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_ENABLE[params->otg_inst], 0,
+- PIPE_DTO_SRC_SEL[params->otg_inst], 1);
+- if (params->is_hdmi)
+- REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+- PIPE_DTO_SRC_SEL[params->otg_inst], 0);
+-
++ PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1);
+ REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
+ REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index d1598e3131f66..33ab6fdc36175 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -1901,7 +1901,7 @@ int dcn32_populate_dml_pipes_from_context(
+
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ if (context->stream_count == 1 &&
+- context->stream_status[0].plane_count <= 1 &&
++ context->stream_status[0].plane_count == 1 &&
+ !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
+ is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
+ pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index 2abe3967f7fbd..d1bf49d207de4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -531,9 +531,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
+ unsigned int i, pipe_idx;
+ struct pipe_ctx *pipe;
+ uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
++ unsigned int num_dpp;
+ unsigned int vlevel = context->bw_ctx.dml.vba.VoltageLevel;
+ unsigned int dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ unsigned int socclk = context->bw_ctx.dml.vba.SOCCLKPerState[vlevel];
++ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ dc_assert_fp_enabled();
+
+@@ -569,6 +571,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
+ phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
+ pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
+
++ // W/A for DCC corruption with certain high resolution timings.
++ // Determing if pipesplit is used. If so, add meta_row_height to the phantom vactive.
++ num_dpp = vba->NoOfDPP[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]];
++ phantom_vactive += num_dpp > 1 ? vba->meta_row_height[vba->pipe_plane[pipe_idx]] : 0;
++
+ // For backporch of phantom pipe, use vstartup of the main pipe
+ phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index a40ead44778af..d18162e9ed1da 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -354,7 +354,8 @@ struct amd_pm_funcs {
+ int (*get_power_profile_mode)(void *handle, char *buf);
+ int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+ int (*set_fine_grain_clk_vol)(void *handle, uint32_t type, long *input, uint32_t size);
+- int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
++ int (*odn_edit_dpm_table)(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
++ long *input, uint32_t size);
+ int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state);
+ int (*smu_i2c_bus_access)(void *handle, bool acquire);
+ int (*gfx_state_change_set)(void *handle, uint32_t state);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+index ec055858eb95a..1159ae114dd02 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+@@ -838,7 +838,8 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u
+ return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
+ }
+
+-static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
++static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
++ long *input, uint32_t size)
+ {
+ struct pp_hwmgr *hwmgr = handle;
+
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+index 67d7da0b6fed5..1d829402cd2e2 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+@@ -75,8 +75,10 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+ for (i = 0; i < table_entries; i++) {
+ result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
+ if (result) {
++ kfree(hwmgr->current_ps);
+ kfree(hwmgr->request_ps);
+ kfree(hwmgr->ps);
++ hwmgr->current_ps = NULL;
+ hwmgr->request_ps = NULL;
+ hwmgr->ps = NULL;
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+index 190af79f3236f..dad3e3741a4e8 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+@@ -67,21 +67,22 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+ int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
+ uint32_t *speed)
+ {
+- struct amdgpu_device *adev = hwmgr->adev;
+- uint32_t duty100, duty;
+- uint64_t tmp64;
++ uint32_t current_rpm;
++ uint32_t percent = 0;
+
+- duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+- CG_FDO_CTRL1, FMAX_DUTY100);
+- duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+- CG_THERMAL_STATUS, FDO_PWM_DUTY);
++ if (hwmgr->thermal_controller.fanInfo.bNoFan)
++ return 0;
+
+- if (!duty100)
+- return -EINVAL;
++ if (vega10_get_current_rpm(hwmgr, &current_rpm))
++ return -1;
++
++ if (hwmgr->thermal_controller.
++ advanceFanControlParameters.usMaxFanRPM != 0)
++ percent = current_rpm * 255 /
++ hwmgr->thermal_controller.
++ advanceFanControlParameters.usMaxFanRPM;
+
+- tmp64 = (uint64_t)duty * 255;
+- do_div(tmp64, duty100);
+- *speed = MIN((uint32_t)tmp64, 255);
++ *speed = MIN(percent, 255);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+index 97b3ad3690467..b30684c84e20e 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+@@ -2961,7 +2961,8 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ data->od8_settings.od8_settings_array;
+ OverDriveTable_t *od_table =
+ &(data->smc_state_table.overdrive_table);
+- int32_t input_index, input_clk, input_vol, i;
++ int32_t input_clk, input_vol, i;
++ uint32_t input_index;
+ int od8_id;
+ int ret;
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+index 70b560737687e..ad5f6a15a1d7d 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+@@ -1588,6 +1588,10 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
+ if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
+ return false;
+
++ /* return true if ASIC is in BACO state already */
++ if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
++ return true;
++
+ /* Arcturus does not support this bit mask */
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index d74debc584f89..39deb06a86ba3 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -1436,7 +1436,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
+
+ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf)
+ {
+- DpmActivityMonitorCoeffIntExternal_t activity_monitor_external[PP_SMC_POWER_PROFILE_COUNT];
++ DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external;
+ uint32_t i, j, size = 0;
+ int16_t workload_type = 0;
+ int result = 0;
+@@ -1444,6 +1444,12 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
+ if (!buf)
+ return -EINVAL;
+
++ activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT,
++ sizeof(*activity_monitor_external),
++ GFP_KERNEL);
++ if (!activity_monitor_external)
++ return -ENOMEM;
++
+ size += sysfs_emit_at(buf, size, " ");
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
+ size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i],
+@@ -1456,15 +1462,17 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
+- if (workload_type < 0)
+- return -EINVAL;
++ if (workload_type < 0) {
++ result = -EINVAL;
++ goto out;
++ }
+
+ result = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
+ (void *)(&activity_monitor_external[i]), false);
+ if (result) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+- return result;
++ goto out;
+ }
+ }
+
+@@ -1492,7 +1500,10 @@ do { \
+ PRINT_DPM_MONITOR(Fclk_BoosterFreq);
+ #undef PRINT_DPM_MONITOR
+
+- return size;
++ result = size;
++out:
++ kfree(activity_monitor_external);
++ return result;
+ }
+
+ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
+index 94de73cbeb2dd..17445800248dd 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
+@@ -402,7 +402,8 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+
+ void adv7533_dsi_power_on(struct adv7511 *adv);
+ void adv7533_dsi_power_off(struct adv7511 *adv);
+-void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
++enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
++ const struct drm_display_mode *mode);
+ int adv7533_patch_registers(struct adv7511 *adv);
+ int adv7533_patch_cec_registers(struct adv7511 *adv);
+ int adv7533_attach_dsi(struct adv7511 *adv);
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+index f887200e8abc9..78b72739e5c3e 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+@@ -697,7 +697,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
+ }
+
+ static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
+- struct drm_display_mode *mode)
++ const struct drm_display_mode *mode)
+ {
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+@@ -791,9 +791,6 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
+ regmap_update_bits(adv7511->regmap, 0x17,
+ 0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+
+- if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
+- adv7533_mode_set(adv7511, adj_mode);
+-
+ drm_mode_copy(&adv7511->curr_mode, adj_mode);
+
+ /*
+@@ -913,6 +910,18 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
+ adv7511_mode_set(adv, mode, adj_mode);
+ }
+
++static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
++ const struct drm_display_info *info,
++ const struct drm_display_mode *mode)
++{
++ struct adv7511 *adv = bridge_to_adv7511(bridge);
++
++ if (adv->type == ADV7533 || adv->type == ADV7535)
++ return adv7533_mode_valid(adv, mode);
++ else
++ return adv7511_mode_valid(adv, mode);
++}
++
+ static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+ {
+@@ -960,6 +969,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = {
+ .enable = adv7511_bridge_enable,
+ .disable = adv7511_bridge_disable,
+ .mode_set = adv7511_bridge_mode_set,
++ .mode_valid = adv7511_bridge_mode_valid,
+ .attach = adv7511_bridge_attach,
+ .detect = adv7511_bridge_detect,
+ .get_edid = adv7511_bridge_get_edid,
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+index ef6270806d1d3..258c79d4dab0a 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+@@ -100,26 +100,27 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
+ regmap_write(adv->regmap_cec, 0x27, 0x0b);
+ }
+
+-void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode)
++enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
++ const struct drm_display_mode *mode)
+ {
++ int lanes;
+ struct mipi_dsi_device *dsi = adv->dsi;
+- int lanes, ret;
+-
+- if (adv->num_dsi_lanes != 4)
+- return;
+
+ if (mode->clock > 80000)
+ lanes = 4;
+ else
+ lanes = 3;
+
+- if (lanes != dsi->lanes) {
+- mipi_dsi_detach(dsi);
+- dsi->lanes = lanes;
+- ret = mipi_dsi_attach(dsi);
+- if (ret)
+- dev_err(&dsi->dev, "failed to change host lanes\n");
+- }
++ /*
++ * TODO: add support for dynamic switching of lanes
++ * by using the bridge pre_enable() op . Till then filter
++ * out the modes which shall need different number of lanes
++ * than what was configured in the device tree.
++ */
++ if (lanes != dsi->lanes)
++ return MODE_BAD;
++
++ return MODE_OK;
+ }
+
+ int adv7533_patch_registers(struct adv7511 *adv)
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index dfe4351c9bdd3..99123eec45511 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -2860,10 +2860,7 @@ static int it6505_bridge_attach(struct drm_bridge *bridge,
+ }
+
+ /* Register aux channel */
+- it6505->aux.name = "DP-AUX";
+- it6505->aux.dev = dev;
+ it6505->aux.drm_dev = bridge->dev;
+- it6505->aux.transfer = it6505_aux_transfer;
+
+ ret = drm_dp_aux_register(&it6505->aux);
+
+@@ -3316,6 +3313,11 @@ static int it6505_i2c_probe(struct i2c_client *client,
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505 device name: %s", dev_name(dev));
+ debugfs_init(it6505);
+
++ it6505->aux.name = "DP-AUX";
++ it6505->aux.dev = dev;
++ it6505->aux.transfer = it6505_aux_transfer;
++ drm_dp_aux_init(&it6505->aux);
++
+ it6505->bridge.funcs = &it6505_bridge_funcs;
+ it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+ it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index 98cc3137c0625..02b4a7dc92f5e 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -945,7 +945,6 @@ int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state,
+ bool can_disable_primary_planes)
+ {
+ struct drm_device *dev = crtc_state->crtc->dev;
+- struct drm_atomic_state *state = crtc_state->state;
+
+ if (!crtc_state->enable)
+ return 0;
+@@ -956,14 +955,7 @@ int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state,
+ struct drm_plane *plane;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+- struct drm_plane_state *plane_state;
+-
+- if (plane->type != DRM_PLANE_TYPE_PRIMARY)
+- continue;
+- plane_state = drm_atomic_get_plane_state(state, plane);
+- if (IS_ERR(plane_state))
+- return PTR_ERR(plane_state);
+- if (plane_state->fb && plane_state->crtc) {
++ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ has_primary_plane = true;
+ break;
+ }
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 4005dab6147d9..b36abfa915813 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -87,6 +87,8 @@ static int oui(u8 first, u8 second, u8 third)
+ #define EDID_QUIRK_FORCE_10BPC (1 << 11)
+ /* Non desktop display (i.e. HMD) */
+ #define EDID_QUIRK_NON_DESKTOP (1 << 12)
++/* Cap the DSC target bitrate to 15bpp */
++#define EDID_QUIRK_CAP_DSC_15BPP (1 << 13)
+
+ #define MICROSOFT_IEEE_OUI 0xca125c
+
+@@ -147,6 +149,12 @@ static const struct edid_quirk {
+ EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ EDID_QUIRK_DETAILED_IN_CM),
+
++ /* LG 27GP950 */
++ EDID_QUIRK('G', 'S', 'M', 0x5bbf, EDID_QUIRK_CAP_DSC_15BPP),
++
++ /* LG 27GN950 */
++ EDID_QUIRK('G', 'S', 'M', 0x5b9a, EDID_QUIRK_CAP_DSC_15BPP),
++
+ /* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
+ EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
+
+@@ -6166,6 +6174,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
+
+ info->mso_stream_count = 0;
+ info->mso_pixel_overlap = 0;
++ info->max_dsc_bpp = 0;
+ }
+
+ static u32 update_display_info(struct drm_connector *connector,
+@@ -6252,6 +6261,9 @@ out:
+ info->non_desktop = true;
+ }
+
++ if (quirks & EDID_QUIRK_CAP_DSC_15BPP)
++ info->max_dsc_bpp = 15;
++
+ return quirks;
+ }
+
+diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
+index e09331bb3bc73..6242dfbe92402 100644
+--- a/drivers/gpu/drm/drm_fourcc.c
++++ b/drivers/gpu/drm/drm_fourcc.c
+@@ -297,12 +297,12 @@ const struct drm_format_info *__drm_format_info(u32 format)
+ .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_Q410, .depth = 0,
+ .num_planes = 3, .char_per_block = { 2, 2, 2 },
+- .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0,
+- .vsub = 0, .is_yuv = true },
++ .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1,
++ .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_Q401, .depth = 0,
+ .num_planes = 3, .char_per_block = { 2, 2, 2 },
+- .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0,
+- .vsub = 0, .is_yuv = true },
++ .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1,
++ .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2,
+ .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+index 37018bc55810d..f667e7906d1f4 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -416,6 +416,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
+ if (gpu->identity.model == chipModel_GC700)
+ gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
+
++ /* These models/revisions don't have the 2D pipe bit */
++ if ((gpu->identity.model == chipModel_GC500 &&
++ gpu->identity.revision <= 2) ||
++ gpu->identity.model == chipModel_GC300)
++ gpu->identity.features |= chipFeatures_PIPE_2D;
++
+ if ((gpu->identity.model == chipModel_GC500 &&
+ gpu->identity.revision < 2) ||
+ (gpu->identity.model == chipModel_GC300 &&
+@@ -449,8 +455,9 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
+ }
+
+- /* GC600 idle register reports zero bits where modules aren't present */
+- if (gpu->identity.model == chipModel_GC600)
++ /* GC600/300 idle register reports zero bits where modules aren't present */
++ if (gpu->identity.model == chipModel_GC600 ||
++ gpu->identity.model == chipModel_GC300)
+ gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
+ VIVS_HI_IDLE_STATE_RA |
+ VIVS_HI_IDLE_STATE_SE |
+diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+index 4d4a715b429d1..2c2b92324a2e9 100644
+--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+@@ -60,8 +60,9 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
+ return drm_panel_get_modes(fsl_connector->panel, connector);
+ }
+
+-static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
++static enum drm_mode_status
++fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+ if (mode->hdisplay & 0xf)
+ return MODE_ERROR;
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index 28bdb936cd1fc..edbdb949b6ced 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -414,7 +414,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
+ ptrs->lvds_entries++;
+
+ if (size != 0 || ptrs->lvds_entries != 3) {
+- kfree(ptrs);
++ kfree(ptrs_block);
+ return NULL;
+ }
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 2b5bc95a8b0df..78b3427471bd7 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -3675,61 +3675,6 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
+ }
+ }
+
+-static void
+-intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
+- const struct intel_crtc_state *crtc_state)
+-{
+- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+- struct drm_device *dev = dig_port->base.base.dev;
+- struct drm_i915_private *dev_priv = to_i915(dev);
+- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+- enum pipe pipe = crtc->pipe;
+- u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+-
+- trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+- TRANS_DDI_FUNC_CTL(pipe));
+- trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+- dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+-
+- trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
+- TGL_TRANS_DDI_PORT_MASK);
+- trans_conf_value &= ~PIPECONF_ENABLE;
+- dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
+-
+- intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+- trans_ddi_func_ctl_value);
+- intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+-}
+-
+-static void
+-intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
+- const struct intel_crtc_state *crtc_state)
+-{
+- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+- struct drm_device *dev = dig_port->base.base.dev;
+- struct drm_i915_private *dev_priv = to_i915(dev);
+- enum port port = dig_port->base.port;
+- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+- enum pipe pipe = crtc->pipe;
+- u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+-
+- trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+- TRANS_DDI_FUNC_CTL(pipe));
+- trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+- dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+-
+- trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
+- TGL_TRANS_DDI_SELECT_PORT(port);
+- trans_conf_value |= PIPECONF_ENABLE;
+- dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
+-
+- intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+- intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+- trans_ddi_func_ctl_value);
+-}
+-
+ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+ {
+@@ -3748,14 +3693,10 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
+ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
+ link_status);
+
+- intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
+-
+ intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
+
+ intel_dp_phy_pattern_update(intel_dp, crtc_state);
+
+- intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
+-
+ drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
+ intel_dp->train_set, crtc_state->lane_count);
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index 73d9eda1d6b7a..e63329bc80659 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -413,7 +413,7 @@ retry:
+ vma->mmo = mmo;
+
+ if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+- intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
++ intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
+
+ if (write) {
+@@ -557,11 +557,13 @@ void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *
+
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
+
+- if (obj->userfault_count) {
+- /* rpm wakeref provide exclusive access */
+- list_del(&obj->userfault_link);
+- obj->userfault_count = 0;
+- }
++ /*
++ * We have exclusive access here via runtime suspend. All other callers
++ * must first grab the rpm wakeref.
++ */
++ GEM_BUG_ON(!obj->userfault_count);
++ list_del(&obj->userfault_link);
++ obj->userfault_count = 0;
+ }
+
+ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
+@@ -587,13 +589,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
+ spin_lock(&obj->mmo.lock);
+ }
+ spin_unlock(&obj->mmo.lock);
+-
+- if (obj->userfault_count) {
+- mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+- list_del(&obj->userfault_link);
+- mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+- obj->userfault_count = 0;
+- }
+ }
+
+ static struct i915_mmap_offset *
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+index 3428f735e786c..8d30db5e678c4 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+@@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
+ {
+ GEM_TRACE("%s\n", dev_name(i915->drm.dev));
+
+- intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
++ intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
+ flush_workqueue(i915->wq);
+
+ /*
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+index 0d6d640225fc8..be4c081e7e13d 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+@@ -279,7 +279,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
+ struct i915_ttm_tt *i915_tt;
+ int ret;
+
+- if (!obj)
++ if (i915_ttm_is_ghost_object(bo))
+ return NULL;
+
+ i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
+@@ -362,7 +362,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
+ {
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+
+- if (!obj)
++ if (i915_ttm_is_ghost_object(bo))
+ return false;
+
+ /*
+@@ -509,18 +509,9 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
+ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
+ {
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+- intel_wakeref_t wakeref = 0;
+-
+- if (bo->resource && likely(obj)) {
+- /* ttm_bo_release() already has dma_resv_lock */
+- if (i915_ttm_cpu_maps_iomem(bo->resource))
+- wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
++ if (bo->resource && !i915_ttm_is_ghost_object(bo)) {
+ __i915_gem_object_pages_fini(obj);
+-
+- if (wakeref)
+- intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+-
+ i915_ttm_free_cached_io_rsgt(obj);
+ }
+ }
+@@ -628,7 +619,7 @@ static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ int ret;
+
+- if (!obj)
++ if (i915_ttm_is_ghost_object(bo))
+ return;
+
+ ret = i915_ttm_move_notify(bo);
+@@ -661,7 +652,7 @@ static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo);
+ bool unknown_state;
+
+- if (!obj)
++ if (i915_ttm_is_ghost_object(mem->bo))
+ return -EINVAL;
+
+ if (!kref_get_unless_zero(&obj->base.refcount))
+@@ -694,7 +685,7 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long base;
+ unsigned int ofs;
+
+- GEM_BUG_ON(!obj);
++ GEM_BUG_ON(i915_ttm_is_ghost_object(bo));
+ GEM_WARN_ON(bo->ttm);
+
+ base = obj->mm.region->iomap.base - obj->mm.region->region.start;
+@@ -994,13 +985,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
+ struct vm_area_struct *area = vmf->vma;
+ struct ttm_buffer_object *bo = area->vm_private_data;
+ struct drm_device *dev = bo->base.dev;
+- struct drm_i915_gem_object *obj;
++ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ intel_wakeref_t wakeref = 0;
+ vm_fault_t ret;
+ int idx;
+
+- obj = i915_ttm_to_gem(bo);
+- if (!obj)
++ if (i915_ttm_is_ghost_object(bo))
+ return VM_FAULT_SIGBUS;
+
+ /* Sanity check that we allow writing into this object */
+@@ -1057,16 +1047,19 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ goto out_rpm;
+
+- /* ttm_bo_vm_reserve() already has dma_resv_lock */
++ /*
++ * ttm_bo_vm_reserve() already has dma_resv_lock.
++ * userfault_count is protected by dma_resv lock and rpm wakeref.
++ */
+ if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
+ obj->userfault_count = 1;
+- mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+- list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
+- mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
++ spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
++ list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
++ spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+ }
+
+ if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+- intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
++ intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
+
+ i915_ttm_adjust_lru(obj);
+@@ -1098,7 +1091,7 @@ static void ttm_vm_open(struct vm_area_struct *vma)
+ struct drm_i915_gem_object *obj =
+ i915_ttm_to_gem(vma->vm_private_data);
+
+- GEM_BUG_ON(!obj);
++ GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data));
+ i915_gem_object_get(obj);
+ }
+
+@@ -1107,7 +1100,7 @@ static void ttm_vm_close(struct vm_area_struct *vma)
+ struct drm_i915_gem_object *obj =
+ i915_ttm_to_gem(vma->vm_private_data);
+
+- GEM_BUG_ON(!obj);
++ GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data));
+ i915_gem_object_put(obj);
+ }
+
+@@ -1128,7 +1121,27 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
+
+ static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
+ {
++ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
++ intel_wakeref_t wakeref = 0;
++
++ assert_object_held_shared(obj);
++
++ if (i915_ttm_cpu_maps_iomem(bo->resource)) {
++ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
++
++ /* userfault_count is protected by obj lock and rpm wakeref. */
++ if (obj->userfault_count) {
++ spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
++ list_del(&obj->userfault_link);
++ spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
++ obj->userfault_count = 0;
++ }
++ }
++
+ ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
++
++ if (wakeref)
++ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+ }
+
+ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+index e4842b4296fc2..2a94a99ef76b4 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+@@ -27,19 +27,27 @@ i915_gem_to_ttm(struct drm_i915_gem_object *obj)
+ */
+ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo);
+
++/**
++ * i915_ttm_is_ghost_object - Check if the ttm bo is a ghost object.
++ * @bo: Pointer to the ttm buffer object
++ *
++ * Return: True if the ttm bo is not a i915 object but a ghost ttm object,
++ * False otherwise.
++ */
++static inline bool i915_ttm_is_ghost_object(struct ttm_buffer_object *bo)
++{
++ return bo->destroy != i915_ttm_bo_destroy;
++}
++
+ /**
+ * i915_ttm_to_gem - Convert a struct ttm_buffer_object to an embedding
+ * struct drm_i915_gem_object.
+ *
+- * Return: Pointer to the embedding struct ttm_buffer_object, or NULL
+- * if the object was not an i915 ttm object.
++ * Return: Pointer to the embedding struct ttm_buffer_object.
+ */
+ static inline struct drm_i915_gem_object *
+ i915_ttm_to_gem(struct ttm_buffer_object *bo)
+ {
+- if (bo->destroy != i915_ttm_bo_destroy)
+- return NULL;
+-
+ return container_of(bo, struct drm_i915_gem_object, __do_not_access);
+ }
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+index 9a7e50534b84b..f59f812dc6d29 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+@@ -560,7 +560,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
+ bool clear;
+ int ret;
+
+- if (GEM_WARN_ON(!obj)) {
++ if (GEM_WARN_ON(i915_ttm_is_ghost_object(bo))) {
+ ttm_bo_move_null(bo, dst_mem);
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
+index 04e435bce79bd..cbc8b857d5f7a 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine.h
+@@ -348,4 +348,10 @@ intel_engine_get_hung_context(struct intel_engine_cs *engine)
+ return engine->hung_ce;
+ }
+
++u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value);
++u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value);
++u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value);
++u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value);
++u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value);
++
+ #endif /* _INTEL_RINGBUFFER_H_ */
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index 1f7188129cd1f..83bfeb872bdaa 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -486,6 +486,17 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ engine->logical_mask = BIT(logical_instance);
+ __sprint_engine_name(engine);
+
++ if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
++ __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
++ engine->class == RENDER_CLASS)
++ engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
++
++ /* features common between engines sharing EUs */
++ if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
++ engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
++ engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
++ }
++
+ engine->props.heartbeat_interval_ms =
+ CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.max_busywait_duration_ns =
+@@ -498,19 +509,28 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ CONFIG_DRM_I915_TIMESLICE_DURATION;
+
+ /* Override to uninterruptible for OpenCL workloads. */
+- if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
++ if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
+ engine->props.preempt_timeout_ms = 0;
+
+- if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
+- __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
+- engine->class == RENDER_CLASS)
+- engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
+-
+- /* features common between engines sharing EUs */
+- if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
+- engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
+- engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
+- }
++ /* Cap properties according to any system limits */
++#define CLAMP_PROP(field) \
++ do { \
++ u64 clamp = intel_clamp_##field(engine, engine->props.field); \
++ if (clamp != engine->props.field) { \
++ drm_notice(&engine->i915->drm, \
++ "Warning, clamping %s to %lld to prevent overflow\n", \
++ #field, clamp); \
++ engine->props.field = clamp; \
++ } \
++ } while (0)
++
++ CLAMP_PROP(heartbeat_interval_ms);
++ CLAMP_PROP(max_busywait_duration_ns);
++ CLAMP_PROP(preempt_timeout_ms);
++ CLAMP_PROP(stop_timeout_ms);
++ CLAMP_PROP(timeslice_duration_ms);
++
++#undef CLAMP_PROP
+
+ engine->defaults = engine->props; /* never to change again */
+
+@@ -534,6 +554,55 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ return 0;
+ }
+
++u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value)
++{
++ value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
++
++ return value;
++}
++
++u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value)
++{
++ value = min(value, jiffies_to_nsecs(2));
++
++ return value;
++}
++
++u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
++{
++ /*
++ * NB: The GuC API only supports 32bit values. However, the limit is further
++ * reduced due to internal calculations which would otherwise overflow.
++ */
++ if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
++ value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
++
++ value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
++
++ return value;
++}
++
++u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value)
++{
++ value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
++
++ return value;
++}
++
++u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
++{
++ /*
++ * NB: The GuC API only supports 32bit values. However, the limit is further
++ * reduced due to internal calculations which would otherwise overflow.
++ */
++ if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
++ value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
++
++ value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
++
++ return value;
++}
++
+ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
+ {
+ struct drm_i915_private *i915 = engine->i915;
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
+index 7caa3412a2446..c7db49749a636 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt.c
+@@ -40,8 +40,6 @@ void intel_gt_common_init_early(struct intel_gt *gt)
+ {
+ spin_lock_init(gt->irq_lock);
+
+- INIT_LIST_HEAD(&gt->lmem_userfault_list);
+- mutex_init(&gt->lmem_userfault_lock);
+ INIT_LIST_HEAD(&gt->closed_vma);
+ spin_lock_init(&gt->closed_lock);
+
+@@ -812,7 +810,6 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
+ }
+
+ intel_uncore_init_early(gt->uncore, gt);
+- intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm);
+
+ ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
+ if (ret)
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
+index f19c2de77ff66..184ee9b11a4da 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
+@@ -141,20 +141,6 @@ struct intel_gt {
+ struct intel_wakeref wakeref;
+ atomic_t user_wakeref;
+
+- /**
+- * Protects access to lmem usefault list.
+- * It is required, if we are outside of the runtime suspend path,
+- * access to @lmem_userfault_list requires always first grabbing the
+- * runtime pm, to ensure we can't race against runtime suspend.
+- * Once we have that we also need to grab @lmem_userfault_lock,
+- * at which point we have exclusive access.
+- * The runtime suspend path is special since it doesn't really hold any locks,
+- * but instead has exclusive access by virtue of all other accesses requiring
+- * holding the runtime pm wakeref.
+- */
+- struct mutex lmem_userfault_lock;
+- struct list_head lmem_userfault_list;
+-
+ struct list_head closed_vma;
+ spinlock_t closed_lock; /* guards the list of closed_vma */
+
+@@ -170,9 +156,6 @@ struct intel_gt {
+ */
+ intel_wakeref_t awake;
+
+- /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
+- struct intel_wakeref_auto userfault_wakeref;
+-
+ u32 clock_frequency;
+ u32 clock_period_ns;
+
+diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
+index 9670310562029..f2d9858d827c2 100644
+--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
++++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
+@@ -144,7 +144,7 @@ max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+- unsigned long long duration;
++ unsigned long long duration, clamped;
+ int err;
+
+ /*
+@@ -168,7 +168,8 @@ max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+ if (err)
+ return err;
+
+- if (duration > jiffies_to_nsecs(2))
++ clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
++ if (duration != clamped)
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
+@@ -203,7 +204,7 @@ timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+- unsigned long long duration;
++ unsigned long long duration, clamped;
+ int err;
+
+ /*
+@@ -218,7 +219,8 @@ timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+ if (err)
+ return err;
+
+- if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
++ clamped = intel_clamp_timeslice_duration_ms(engine, duration);
++ if (duration != clamped)
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
+@@ -256,7 +258,7 @@ stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+- unsigned long long duration;
++ unsigned long long duration, clamped;
+ int err;
+
+ /*
+@@ -272,7 +274,8 @@ stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ if (err)
+ return err;
+
+- if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
++ clamped = intel_clamp_stop_timeout_ms(engine, duration);
++ if (duration != clamped)
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.stop_timeout_ms, duration);
+@@ -306,7 +309,7 @@ preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+- unsigned long long timeout;
++ unsigned long long timeout, clamped;
+ int err;
+
+ /*
+@@ -322,7 +325,8 @@ preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+ if (err)
+ return err;
+
+- if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
++ clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
++ if (timeout != clamped)
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
+@@ -362,7 +366,7 @@ heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+- unsigned long long delay;
++ unsigned long long delay, clamped;
+ int err;
+
+ /*
+@@ -379,7 +383,8 @@ heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+ if (err)
+ return err;
+
+- if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
++ clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
++ if (delay != clamped)
+ return -EINVAL;
+
+ err = intel_engine_set_heartbeat(engine, delay);
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+index 8f11651460131..685ddccc0f26a 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+@@ -165,7 +165,7 @@ static const struct __guc_mmio_reg_descr empty_regs_list[] = {
+ }
+
+ /* List of lists */
+-static struct __guc_mmio_reg_descr_group default_lists[] = {
++static const struct __guc_mmio_reg_descr_group default_lists[] = {
+ MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
+ MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+@@ -419,6 +419,44 @@ guc_capture_get_device_reglist(struct intel_guc *guc)
+ return default_lists;
+ }
+
++static const char *
++__stringify_type(u32 type)
++{
++ switch (type) {
++ case GUC_CAPTURE_LIST_TYPE_GLOBAL:
++ return "Global";
++ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
++ return "Class";
++ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
++ return "Instance";
++ default:
++ break;
++ }
++
++ return "unknown";
++}
++
++static const char *
++__stringify_engclass(u32 class)
++{
++ switch (class) {
++ case GUC_RENDER_CLASS:
++ return "Render";
++ case GUC_VIDEO_CLASS:
++ return "Video";
++ case GUC_VIDEOENHANCE_CLASS:
++ return "VideoEnhance";
++ case GUC_BLITTER_CLASS:
++ return "Blitter";
++ case GUC_COMPUTE_CLASS:
++ return "Compute";
++ default:
++ break;
++ }
++
++ return "unknown";
++}
++
+ static int
+ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ struct guc_mmio_reg *ptr, u16 num_entries)
+@@ -482,32 +520,55 @@ guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u
+ return num_regs;
+ }
+
+-int
+-intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+- size_t *size)
++static int
++guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
++ size_t *size, bool is_purpose_est)
+ {
+ struct intel_guc_state_capture *gc = guc->capture;
++ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
+ int num_regs;
+
+- if (!gc->reglists)
++ if (!gc->reglists) {
++ drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n");
+ return -ENODEV;
++ }
+
+ if (cache->is_valid) {
+ *size = cache->size;
+ return cache->status;
+ }
+
++ if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
++ !guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
++ if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
++ drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n");
++ else
++ drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n",
++ __stringify_type(type), type,
++ __stringify_engclass(classid), classid);
++ return -ENODATA;
++ }
++
+ num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
++ /* intentional empty lists can exist depending on hw config */
+ if (!num_regs)
+ return -ENODATA;
+
+- *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
+- (num_regs * sizeof(struct guc_mmio_reg)));
++ if (size)
++ *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
++ (num_regs * sizeof(struct guc_mmio_reg)));
+
+ return 0;
+ }
+
++int
++intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
++ size_t *size)
++{
++ return guc_capture_getlistsize(guc, owner, type, classid, size, false);
++}
++
+ static void guc_capture_create_prealloc_nodes(struct intel_guc *guc);
+
+ int
+@@ -606,7 +667,7 @@ guc_capture_output_min_size_est(struct intel_guc *guc)
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+- int worst_min_size = 0, num_regs = 0;
++ int worst_min_size = 0;
+ size_t tmp = 0;
+
+ if (!guc->capture)
+@@ -627,21 +688,19 @@ guc_capture_output_min_size_est(struct intel_guc *guc)
+ worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
+ (3 * sizeof(struct guc_state_capture_header_t));
+
+- if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp))
+- num_regs += tmp;
++ if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp, true))
++ worst_min_size += tmp;
+
+- if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+- engine->class, &tmp)) {
+- num_regs += tmp;
++ if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
++ engine->class, &tmp, true)) {
++ worst_min_size += tmp;
+ }
+- if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+- engine->class, &tmp)) {
+- num_regs += tmp;
++ if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
++ engine->class, &tmp, true)) {
++ worst_min_size += tmp;
+ }
+ }
+
+- worst_min_size += (num_regs * sizeof(struct guc_mmio_reg));
+-
+ return worst_min_size;
+ }
+
+@@ -658,15 +717,23 @@ static void check_guc_capture_size(struct intel_guc *guc)
+ int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
+ u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
+
++ /*
++ * NOTE: min_size is much smaller than the capture region allocation (DG2: <80K vs 1MB)
++ * Additionally, its based on space needed to fit all engines getting reset at once
++ * within the same G2H handler task slot. This is very unlikely. However, if GuC really
++ * does run out of space for whatever reason, we will see an separate warning message
++ * when processing the G2H event capture-notification, search for:
++ * INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
++ */
+ if (min_size < 0)
+ drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
+ min_size);
+ else if (min_size > buffer_size)
+- drm_warn(&i915->drm, "GuC error state capture buffer is too small: %d < %d\n",
++ drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n",
+ buffer_size, min_size);
+ else if (spare_size > buffer_size)
+- drm_notice(&i915->drm, "GuC error state capture buffer maybe too small: %d < %d (min = %d)\n",
+- buffer_size, spare_size, min_size);
++ drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n",
++ buffer_size, spare_size, min_size);
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+index 323b055e5db97..502e7cb5a3025 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+@@ -305,6 +305,27 @@ struct guc_update_context_policy {
+
+ #define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
+
++/*
++ * GuC converts the timeout to clock ticks internally. Different platforms have
++ * different GuC clocks. Thus, the maximum value before overflow is platform
++ * dependent. Current worst case scenario is about 110s. So, the spec says to
++ * limit to 100s to be safe.
++ */
++#define GUC_POLICY_MAX_EXEC_QUANTUM_US (100 * 1000 * 1000UL)
++#define GUC_POLICY_MAX_PREEMPT_TIMEOUT_US (100 * 1000 * 1000UL)
++
++static inline u32 guc_policy_max_exec_quantum_ms(void)
++{
++ BUILD_BUG_ON(GUC_POLICY_MAX_EXEC_QUANTUM_US >= UINT_MAX);
++ return GUC_POLICY_MAX_EXEC_QUANTUM_US / 1000;
++}
++
++static inline u32 guc_policy_max_preempt_timeout_ms(void)
++{
++ BUILD_BUG_ON(GUC_POLICY_MAX_PREEMPT_TIMEOUT_US >= UINT_MAX);
++ return GUC_POLICY_MAX_PREEMPT_TIMEOUT_US / 1000;
++}
++
+ struct guc_policies {
+ u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
+ /* In micro seconds. How much time to allow before DPC processing is
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+index 55d3ef93e86f8..68331c538b0a7 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+@@ -16,15 +16,15 @@
+ #if defined(CONFIG_DRM_I915_DEBUG_GUC)
+ #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
+ #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
+-#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M
++#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
+ #elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+ #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
+ #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
+-#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M
++#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
+ #else
+ #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_8K
+ #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_64K
+-#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_2M
++#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
+ #endif
+
+ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index 1db59eeb34db9..1a23e901cc663 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -2429,6 +2429,10 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
+ int ret;
+
+ /* NB: For both of these, zero means disabled. */
++ GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
++ execution_quantum));
++ GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
++ preemption_timeout));
+ execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+
+@@ -2462,6 +2466,10 @@ static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
+ desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
+
+ /* NB: For both of these, zero means disabled. */
++ GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
++ desc->execution_quantum));
++ GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
++ desc->preemption_timeout));
+ desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
+index f2a15d8155f4a..2ce30cff461a0 100644
+--- a/drivers/gpu/drm/i915/i915_driver.c
++++ b/drivers/gpu/drm/i915/i915_driver.c
+@@ -1662,7 +1662,8 @@ static int intel_runtime_suspend(struct device *kdev)
+
+ intel_runtime_pm_enable_interrupts(dev_priv);
+
+- intel_gt_runtime_resume(to_gt(dev_priv));
++ for_each_gt(gt, dev_priv, i)
++ intel_gt_runtime_resume(gt);
+
+ enable_rpm_wakeref_asserts(rpm);
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 2bdddb61ebd7a..38c26668b9602 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -843,7 +843,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
+ __i915_gem_object_release_mmap_gtt(obj);
+
+ list_for_each_entry_safe(obj, on,
+- &to_gt(i915)->lmem_userfault_list, userfault_link)
++ &i915->runtime_pm.lmem_userfault_list, userfault_link)
+ i915_gem_object_runtime_pm_release_mmap_offset(obj);
+
+ /*
+@@ -1128,6 +1128,8 @@ void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+
+ int i915_gem_init(struct drm_i915_private *dev_priv)
+ {
++ struct intel_gt *gt;
++ unsigned int i;
+ int ret;
+
+ /* We need to fallback to 4K pages if host doesn't support huge gtt. */
+@@ -1158,9 +1160,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
+ */
+ intel_init_clock_gating(dev_priv);
+
+- ret = intel_gt_init(to_gt(dev_priv));
+- if (ret)
+- goto err_unlock;
++ for_each_gt(gt, dev_priv, i) {
++ ret = intel_gt_init(gt);
++ if (ret)
++ goto err_unlock;
++ }
+
+ return 0;
+
+@@ -1173,8 +1177,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
+ err_unlock:
+ i915_gem_drain_workqueue(dev_priv);
+
+- if (ret != -EIO)
+- intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
++ if (ret != -EIO) {
++ for_each_gt(gt, dev_priv, i) {
++ intel_gt_driver_remove(gt);
++ intel_gt_driver_release(gt);
++ intel_uc_cleanup_firmwares(&gt->uc);
++ }
++ }
+
+ if (ret == -EIO) {
+ /*
+@@ -1182,10 +1191,12 @@ err_unlock:
+ * as wedged. But we only want to do this when the GPU is angry,
+ * for all other failure, such as an allocation failure, bail.
+ */
+- if (!intel_gt_is_wedged(to_gt(dev_priv))) {
+- i915_probe_error(dev_priv,
+- "Failed to initialize GPU, declaring it wedged!\n");
+- intel_gt_set_wedged(to_gt(dev_priv));
++ for_each_gt(gt, dev_priv, i) {
++ if (!intel_gt_is_wedged(gt)) {
++ i915_probe_error(dev_priv,
++ "Failed to initialize GPU, declaring it wedged!\n");
++ intel_gt_set_wedged(gt);
++ }
+ }
+
+ /* Minimal basic recovery for KMS */
+@@ -1213,10 +1224,12 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
+
+ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
+ {
+- intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
++ struct intel_gt *gt;
++ unsigned int i;
+
+ i915_gem_suspend_late(dev_priv);
+- intel_gt_driver_remove(to_gt(dev_priv));
++ for_each_gt(gt, dev_priv, i)
++ intel_gt_driver_remove(gt);
+ dev_priv->uabi_engines = RB_ROOT;
+
+ /* Flush any outstanding unpin_work. */
+@@ -1227,9 +1240,13 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
+
+ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
+ {
+- intel_gt_driver_release(to_gt(dev_priv));
++ struct intel_gt *gt;
++ unsigned int i;
+
+- intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
++ for_each_gt(gt, dev_priv, i) {
++ intel_gt_driver_release(gt);
++ intel_uc_cleanup_firmwares(&gt->uc);
++ }
+
+ /* Flush any outstanding work, including i915_gem_context.release_work. */
+ i915_gem_drain_workqueue(dev_priv);
+diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
+index 744cca507946b..129746713d072 100644
+--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
++++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
+@@ -633,6 +633,8 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
+ runtime_pm);
+ int count = atomic_read(&rpm->wakeref_count);
+
++ intel_wakeref_auto_fini(&rpm->userfault_wakeref);
++
+ drm_WARN(&i915->drm, count,
+ "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
+ intel_rpm_raw_wakeref_count(count),
+@@ -652,4 +654,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
+ rpm->available = HAS_RUNTIME_PM(i915);
+
+ init_intel_runtime_pm_wakeref(rpm);
++ INIT_LIST_HEAD(&rpm->lmem_userfault_list);
++ spin_lock_init(&rpm->lmem_userfault_lock);
++ intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
+index d9160e3ff4afc..98b8b28baaa15 100644
+--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
++++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
+@@ -53,6 +53,28 @@ struct intel_runtime_pm {
+ bool irqs_enabled;
+ bool no_wakeref_tracking;
+
++ /*
++ * Protects access to lmem usefault list.
++ * It is required, if we are outside of the runtime suspend path,
++ * access to @lmem_userfault_list requires always first grabbing the
++ * runtime pm, to ensure we can't race against runtime suspend.
++ * Once we have that we also need to grab @lmem_userfault_lock,
++ * at which point we have exclusive access.
++ * The runtime suspend path is special since it doesn't really hold any locks,
++ * but instead has exclusive access by virtue of all other accesses requiring
++ * holding the runtime pm wakeref.
++ */
++ spinlock_t lmem_userfault_lock;
++
++ /*
++ * Keep list of userfaulted gem obj, which require to release their
++ * mmap mappings at runtime suspend path.
++ */
++ struct list_head lmem_userfault_list;
++
++ /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
++ struct intel_wakeref_auto userfault_wakeref;
++
+ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ /*
+ * To aide detection of wakeref leaks and general misuse, we
+diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
+index 508a6d994e831..1f5d39a4077cd 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
+@@ -461,9 +461,6 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
+ if (--dpi->refcount != 0)
+ return;
+
+- if (dpi->pinctrl && dpi->pins_gpio)
+- pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+-
+ mtk_dpi_disable(dpi);
+ clk_disable_unprepare(dpi->pixel_clk);
+ clk_disable_unprepare(dpi->engine_clk);
+@@ -488,9 +485,6 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
+ goto err_pixel;
+ }
+
+- if (dpi->pinctrl && dpi->pins_dpi)
+- pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
+-
+ return 0;
+
+ err_pixel:
+@@ -721,12 +715,18 @@ static void mtk_dpi_bridge_disable(struct drm_bridge *bridge)
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+
+ mtk_dpi_power_off(dpi);
++
++ if (dpi->pinctrl && dpi->pins_gpio)
++ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+ }
+
+ static void mtk_dpi_bridge_enable(struct drm_bridge *bridge)
+ {
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+
++ if (dpi->pinctrl && dpi->pins_dpi)
++ pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
++
+ mtk_dpi_power_on(dpi);
+ mtk_dpi_set_display_mode(dpi, &dpi->mode);
+ mtk_dpi_enable(dpi);
+diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+index 4c80b6896dc3d..6e8f99554f548 100644
+--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
++++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+@@ -1202,9 +1202,10 @@ static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi)
+ return mtk_hdmi_update_plugged_status(hdmi);
+ }
+
+-static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+- const struct drm_display_info *info,
+- const struct drm_display_mode *mode)
++static enum drm_mode_status
++mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
++ const struct drm_display_info *info,
++ const struct drm_display_mode *mode)
+ {
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ struct drm_bridge *next_bridge;
+diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+index 5675bc2a92cf8..3f73b211fa8e3 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
++++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+@@ -116,9 +116,10 @@ static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge,
+ return i;
+ }
+
+-static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
+- const struct drm_display_info *display_info,
+- const struct drm_display_mode *mode)
++static enum drm_mode_status
++meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
++ const struct drm_display_info *display_info,
++ const struct drm_display_mode *mode)
+ {
+ if (meson_cvbs_get_mode(mode))
+ return MODE_OK;
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index fdc578016e0bf..e846e629c00d8 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1906,7 +1906,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
+
+ if (val == UINT_MAX) {
+ DRM_DEV_ERROR(dev,
+- "missing support for speed-bin: %u. Some OPPs may not be supported by hardware",
++ "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
+ fuse);
+ return UINT_MAX;
+ }
+@@ -1916,7 +1916,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
+
+ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
+ {
+- u32 supp_hw = UINT_MAX;
++ u32 supp_hw;
+ u32 speedbin;
+ int ret;
+
+@@ -1928,15 +1928,13 @@ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
+ if (ret == -ENOENT) {
+ return 0;
+ } else if (ret) {
+- DRM_DEV_ERROR(dev,
+- "failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
+- ret);
+- goto done;
++ dev_err_probe(dev, ret,
++ "failed to read speed-bin. Some OPPs may not be supported by hardware\n");
++ return ret;
+ }
+
+ supp_hw = fuse_to_supp_hw(dev, rev, speedbin);
+
+-done:
+ ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+ if (ret)
+ return ret;
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+index f2ddcfb6f7ee6..3662df698dae5 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+@@ -42,7 +42,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ u32 initial_lines)
+ {
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+- u32 data, lsb, bpp;
++ u32 data;
+ u32 slice_last_group_size;
+ u32 det_thresh_flatness;
+ bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
+@@ -56,14 +56,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ data = (initial_lines << 20);
+ data |= ((slice_last_group_size - 1) << 18);
+ /* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+- data |= dsc->bits_per_pixel << 12;
+- lsb = dsc->bits_per_pixel % 4;
+- bpp = dsc->bits_per_pixel / 4;
+- bpp *= 4;
+- bpp <<= 4;
+- bpp |= lsb;
+-
+- data |= bpp << 8;
++ data |= (dsc->bits_per_pixel << 8);
+ data |= (dsc->block_pred_enable << 7);
+ data |= (dsc->line_buf_depth << 3);
+ data |= (dsc->simple_422 << 2);
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+index b0d21838a1343..29ae5c9613f36 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+@@ -203,7 +203,7 @@ static int mdp5_set_split_display(struct msm_kms *kms,
+ slave_encoder);
+ }
+
+-static void mdp5_destroy(struct platform_device *pdev);
++static void mdp5_destroy(struct mdp5_kms *mdp5_kms);
+
+ static void mdp5_kms_destroy(struct msm_kms *kms)
+ {
+@@ -223,7 +223,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
+ }
+
+ mdp_kms_destroy(&mdp5_kms->base);
+- mdp5_destroy(mdp5_kms->pdev);
++ mdp5_destroy(mdp5_kms);
+ }
+
+ #ifdef CONFIG_DEBUG_FS
+@@ -559,6 +559,8 @@ static int mdp5_kms_init(struct drm_device *dev)
+ int irq, i, ret;
+
+ ret = mdp5_init(to_platform_device(dev->dev), dev);
++ if (ret)
++ return ret;
+
+ /* priv->kms would have been populated by the MDP5 driver */
+ kms = priv->kms;
+@@ -632,9 +634,8 @@ fail:
+ return ret;
+ }
+
+-static void mdp5_destroy(struct platform_device *pdev)
++static void mdp5_destroy(struct mdp5_kms *mdp5_kms)
+ {
+- struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+ int i;
+
+ if (mdp5_kms->ctlm)
+@@ -648,7 +649,7 @@ static void mdp5_destroy(struct platform_device *pdev)
+ kfree(mdp5_kms->intfs[i]);
+
+ if (mdp5_kms->rpm_enabled)
+- pm_runtime_disable(&pdev->dev);
++ pm_runtime_disable(&mdp5_kms->pdev->dev);
+
+ drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
+ drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
+@@ -797,8 +798,6 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+ goto fail;
+ }
+
+- platform_set_drvdata(pdev, mdp5_kms);
+-
+ spin_lock_init(&mdp5_kms->resource_lock);
+
+ mdp5_kms->dev = dev;
+@@ -839,6 +838,9 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+ */
+ clk_set_rate(mdp5_kms->core_clk, 200000000);
+
++ /* set uninit-ed kms */
++ priv->kms = &mdp5_kms->base.base;
++
+ pm_runtime_enable(&pdev->dev);
+ mdp5_kms->rpm_enabled = true;
+
+@@ -890,13 +892,10 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+ if (ret)
+ goto fail;
+
+- /* set uninit-ed kms */
+- priv->kms = &mdp5_kms->base.base;
+-
+ return 0;
+ fail:
+ if (mdp5_kms)
+- mdp5_destroy(pdev);
++ mdp5_destroy(mdp5_kms);
+ return ret;
+ }
+
+@@ -953,7 +952,8 @@ static int mdp5_dev_remove(struct platform_device *pdev)
+ static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+- struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
++ struct msm_drm_private *priv = platform_get_drvdata(pdev);
++ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+
+ DBG("");
+
+@@ -963,7 +963,8 @@ static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
+ static __maybe_unused int mdp5_runtime_resume(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+- struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
++ struct msm_drm_private *priv = platform_get_drvdata(pdev);
++ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+
+ DBG("");
+
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index a49f6dbbe8883..c9d9b384ddd03 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -857,7 +857,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display,
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+- dp->panel->dp_mode.drm_mode = mode->drm_mode;
++ drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode);
+ dp->panel->dp_mode.bpp = mode->bpp;
+ dp->panel->dp_mode.capabilities = mode->capabilities;
+ dp_panel_init_panel_info(dp->panel);
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 7fbf391c024f8..89aadd3b3202b 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -21,6 +21,7 @@
+
+ #include <video/mipi_display.h>
+
++#include <drm/display/drm_dsc_helper.h>
+ #include <drm/drm_of.h>
+
+ #include "dsi.h"
+@@ -33,7 +34,7 @@
+
+ #define DSI_RESET_TOGGLE_DELAY_MS 20
+
+-static int dsi_populate_dsc_params(struct drm_dsc_config *dsc);
++static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc);
+
+ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
+ {
+@@ -842,17 +843,15 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
+ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
+ {
+ struct drm_dsc_config *dsc = msm_host->dsc;
+- u32 reg, intf_width, reg_ctrl, reg_ctrl2;
++ u32 reg, reg_ctrl, reg_ctrl2;
+ u32 slice_per_intf, total_bytes_per_intf;
+ u32 pkt_per_line;
+- u32 bytes_in_slice;
+ u32 eol_byte_num;
+
+ /* first calculate dsc parameters and then program
+ * compress mode registers
+ */
+- intf_width = hdisplay;
+- slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
++ slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+
+ /* If slice_per_pkt is greater than slice_per_intf
+ * then default to 1. This can happen during partial
+@@ -861,12 +860,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ if (slice_per_intf > dsc->slice_count)
+ dsc->slice_count = 1;
+
+- slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+- bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bits_per_pixel, 8);
+-
+- dsc->slice_chunk_size = bytes_in_slice;
+-
+- total_bytes_per_intf = bytes_in_slice * slice_per_intf;
++ total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
+
+ eol_byte_num = total_bytes_per_intf % 3;
+ pkt_per_line = slice_per_intf / dsc->slice_count;
+@@ -892,7 +886,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ reg_ctrl |= reg;
+
+ reg_ctrl2 &= ~DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK;
+- reg_ctrl2 |= DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(bytes_in_slice);
++ reg_ctrl2 |= DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(dsc->slice_chunk_size);
+
+ dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
+ dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
+@@ -915,6 +909,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+ u32 va_end = va_start + mode->vdisplay;
+ u32 hdisplay = mode->hdisplay;
+ u32 wc;
++ int ret;
+
+ DBG("");
+
+@@ -950,7 +945,9 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+ /* we do the calculations for dsc parameters here so that
+ * panel can use these parameters
+ */
+- dsi_populate_dsc_params(dsc);
++ ret = dsi_populate_dsc_params(msm_host, dsc);
++ if (ret)
++ return;
+
+ /* Divide the display by 3 but keep back/font porch and
+ * pulse width same
+@@ -1754,18 +1751,20 @@ static char bpg_offset[DSC_NUM_BUF_RANGES] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
+ };
+
+-static int dsi_populate_dsc_params(struct drm_dsc_config *dsc)
+-{
+- int mux_words_size;
+- int groups_per_line, groups_total;
+- int min_rate_buffer_size;
+- int hrd_delay;
+- int pre_num_extra_mux_bits, num_extra_mux_bits;
+- int slice_bits;
+- int target_bpp_x16;
+- int data;
+- int final_value, final_scale;
++static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc)
++{
+ int i;
++ u16 bpp = dsc->bits_per_pixel >> 4;
++
++ if (dsc->bits_per_pixel & 0xf) {
++ DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support fractional bits_per_pixel\n");
++ return -EINVAL;
++ }
++
++ if (dsc->bits_per_component != 8) {
++ DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support bits_per_component != 8 yet\n");
++ return -EOPNOTSUPP;
++ }
+
+ dsc->rc_model_size = 8192;
+ dsc->first_line_bpg_offset = 12;
+@@ -1783,16 +1782,21 @@ static int dsi_populate_dsc_params(struct drm_dsc_config *dsc)
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ dsc->rc_range_params[i].range_min_qp = min_qp[i];
+ dsc->rc_range_params[i].range_max_qp = max_qp[i];
+- dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i];
++ /*
++ * Range BPG Offset contains two's-complement signed values that fill
++ * 8 bits, yet the registers and DCS PPS field are only 6 bits wide.
++ */
++ dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i] & DSC_RANGE_BPG_OFFSET_MASK;
+ }
+
+- dsc->initial_offset = 6144; /* Not bpp 12 */
+- if (dsc->bits_per_pixel != 8)
++ dsc->initial_offset = 6144; /* Not bpp 12 */
++ if (bpp != 8)
+ dsc->initial_offset = 2048; /* bpp = 12 */
+
+- mux_words_size = 48; /* bpc == 8/10 */
+- if (dsc->bits_per_component == 12)
+- mux_words_size = 64;
++ if (dsc->bits_per_component <= 10)
++ dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
++ else
++ dsc->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+
+ dsc->initial_xmit_delay = 512;
+ dsc->initial_scale_value = 32;
+@@ -1804,63 +1808,8 @@ static int dsi_populate_dsc_params(struct drm_dsc_config *dsc)
+ dsc->flatness_max_qp = 12;
+ dsc->rc_quant_incr_limit0 = 11;
+ dsc->rc_quant_incr_limit1 = 11;
+- dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+-
+- /* FIXME: need to call drm_dsc_compute_rc_parameters() so that rest of
+- * params are calculated
+- */
+- groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3);
+- dsc->slice_chunk_size = dsc->slice_width * dsc->bits_per_pixel / 8;
+- if ((dsc->slice_width * dsc->bits_per_pixel) % 8)
+- dsc->slice_chunk_size++;
+
+- /* rbs-min */
+- min_rate_buffer_size = dsc->rc_model_size - dsc->initial_offset +
+- dsc->initial_xmit_delay * dsc->bits_per_pixel +
+- groups_per_line * dsc->first_line_bpg_offset;
+-
+- hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->bits_per_pixel);
+-
+- dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay;
+-
+- dsc->initial_scale_value = 8 * dsc->rc_model_size /
+- (dsc->rc_model_size - dsc->initial_offset);
+-
+- slice_bits = 8 * dsc->slice_chunk_size * dsc->slice_height;
+-
+- groups_total = groups_per_line * dsc->slice_height;
+-
+- data = dsc->first_line_bpg_offset * 2048;
+-
+- dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1));
+-
+- pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->bits_per_component + 4) - 2);
+-
+- num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
+- ((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
+-
+- data = 2048 * (dsc->rc_model_size - dsc->initial_offset + num_extra_mux_bits);
+- dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
+-
+- /* bpp * 16 + 0.5 */
+- data = dsc->bits_per_pixel * 16;
+- data *= 2;
+- data++;
+- data /= 2;
+- target_bpp_x16 = data;
+-
+- data = (dsc->initial_xmit_delay * target_bpp_x16) / 16;
+- final_value = dsc->rc_model_size - data + num_extra_mux_bits;
+- dsc->final_offset = final_value;
+-
+- final_scale = 8 * dsc->rc_model_size / (dsc->rc_model_size - final_value);
+-
+- data = (final_scale - 9) * (dsc->nfl_bpg_offset + dsc->slice_bpg_offset);
+- dsc->scale_increment_interval = (2048 * dsc->final_offset) / data;
+-
+- dsc->scale_decrement_interval = groups_per_line / (dsc->initial_scale_value - 8);
+-
+- return 0;
++ return drm_dsc_compute_rc_parameters(dsc);
+ }
+
+ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
+index f28fb21e38911..8cd5d50639a53 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
+@@ -252,7 +252,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
+ if (hdmi->hpd_gpiod)
+ gpiod_set_consumer_name(hdmi->hpd_gpiod, "HDMI_HPD");
+
+- pm_runtime_enable(&pdev->dev);
++ devm_pm_runtime_enable(&pdev->dev);
+
+ hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
+
+diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
+index b1092aab14231..71546a5d0a48c 100644
+--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
++++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
+@@ -5,6 +5,7 @@
+ * This code is based on drivers/gpu/drm/mxsfb/mxsfb*
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -52,16 +53,22 @@ static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
+ writel(DISP_PARA_LINE_PATTERN_UYVY_H,
+ lcdif->base + LCDC_V8_DISP_PARA);
+
+- /* CSC: BT.601 Full Range RGB to YCbCr coefficients. */
+- writel(CSC0_COEF0_A2(0x096) | CSC0_COEF0_A1(0x04c),
++ /*
++ * CSC: BT.601 Limited Range RGB to YCbCr coefficients.
++ *
++ * |Y | | 0.2568 0.5041 0.0979| |R| |16 |
++ * |Cb| = |-0.1482 -0.2910 0.4392| * |G| + |128|
++ * |Cr| | 0.4392 0.4392 -0.3678| |B| |128|
++ */
++ writel(CSC0_COEF0_A2(0x081) | CSC0_COEF0_A1(0x041),
+ lcdif->base + LCDC_V8_CSC0_COEF0);
+- writel(CSC0_COEF1_B1(0x7d5) | CSC0_COEF1_A3(0x01d),
++ writel(CSC0_COEF1_B1(0x7db) | CSC0_COEF1_A3(0x019),
+ lcdif->base + LCDC_V8_CSC0_COEF1);
+- writel(CSC0_COEF2_B3(0x080) | CSC0_COEF2_B2(0x7ac),
++ writel(CSC0_COEF2_B3(0x070) | CSC0_COEF2_B2(0x7b6),
+ lcdif->base + LCDC_V8_CSC0_COEF2);
+- writel(CSC0_COEF3_C2(0x795) | CSC0_COEF3_C1(0x080),
++ writel(CSC0_COEF3_C2(0x7a2) | CSC0_COEF3_C1(0x070),
+ lcdif->base + LCDC_V8_CSC0_COEF3);
+- writel(CSC0_COEF4_D1(0x000) | CSC0_COEF4_C3(0x7ec),
++ writel(CSC0_COEF4_D1(0x010) | CSC0_COEF4_C3(0x7ee),
+ lcdif->base + LCDC_V8_CSC0_COEF4);
+ writel(CSC0_COEF5_D3(0x080) | CSC0_COEF5_D2(0x080),
+ lcdif->base + LCDC_V8_CSC0_COEF5);
+@@ -142,14 +149,36 @@ static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
+ CTRLDESCL0_1_WIDTH(m->hdisplay),
+ lcdif->base + LCDC_V8_CTRLDESCL0_1);
+
+- writel(CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]),
+- lcdif->base + LCDC_V8_CTRLDESCL0_3);
++ /*
++ * Undocumented P_SIZE and T_SIZE register but those written in the
++ * downstream kernel those registers control the AXI burst size. As of
++ * now there are two known values:
++ * 1 - 128Byte
++ * 2 - 256Byte
++ * Downstream set it to 256B burst size to improve the memory
++ * efficiency so set it here too.
++ */
++ ctrl = CTRLDESCL0_3_P_SIZE(2) | CTRLDESCL0_3_T_SIZE(2) |
++ CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]);
++ writel(ctrl, lcdif->base + LCDC_V8_CTRLDESCL0_3);
+ }
+
+ static void lcdif_enable_controller(struct lcdif_drm_private *lcdif)
+ {
+ u32 reg;
+
++ /* Set FIFO Panic watermarks, low 1/3, high 2/3 . */
++ writel(FIELD_PREP(PANIC0_THRES_LOW_MASK, 1 * PANIC0_THRES_MAX / 3) |
++ FIELD_PREP(PANIC0_THRES_HIGH_MASK, 2 * PANIC0_THRES_MAX / 3),
++ lcdif->base + LCDC_V8_PANIC0_THRES);
++
++ /*
++ * Enable FIFO Panic, this does not generate interrupt, but
++ * boosts NoC priority based on FIFO Panic watermarks.
++ */
++ writel(INT_ENABLE_D1_PLANE_PANIC_EN,
++ lcdif->base + LCDC_V8_INT_ENABLE_D1);
++
+ reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
+ reg |= DISP_PARA_DISP_ON;
+ writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
+@@ -177,6 +206,9 @@ static void lcdif_disable_controller(struct lcdif_drm_private *lcdif)
+ reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
+ reg &= ~DISP_PARA_DISP_ON;
+ writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
++
++ /* Disable FIFO Panic NoC priority booster. */
++ writel(0, lcdif->base + LCDC_V8_INT_ENABLE_D1);
+ }
+
+ static void lcdif_reset_block(struct lcdif_drm_private *lcdif)
+diff --git a/drivers/gpu/drm/mxsfb/lcdif_regs.h b/drivers/gpu/drm/mxsfb/lcdif_regs.h
+index c70220651e3a5..37f0d9a06b104 100644
+--- a/drivers/gpu/drm/mxsfb/lcdif_regs.h
++++ b/drivers/gpu/drm/mxsfb/lcdif_regs.h
+@@ -190,6 +190,10 @@
+ #define CTRLDESCL0_1_WIDTH(n) ((n) & 0xffff)
+ #define CTRLDESCL0_1_WIDTH_MASK GENMASK(15, 0)
+
++#define CTRLDESCL0_3_P_SIZE(n) (((n) << 20) & CTRLDESCL0_3_P_SIZE_MASK)
++#define CTRLDESCL0_3_P_SIZE_MASK GENMASK(22, 20)
++#define CTRLDESCL0_3_T_SIZE(n) (((n) << 16) & CTRLDESCL0_3_T_SIZE_MASK)
++#define CTRLDESCL0_3_T_SIZE_MASK GENMASK(17, 16)
+ #define CTRLDESCL0_3_PITCH(n) ((n) & 0xffff)
+ #define CTRLDESCL0_3_PITCH_MASK GENMASK(15, 0)
+
+@@ -248,6 +252,7 @@
+
+ #define PANIC0_THRES_LOW_MASK GENMASK(24, 16)
+ #define PANIC0_THRES_HIGH_MASK GENMASK(8, 0)
++#define PANIC0_THRES_MAX 511
+
+ #define LCDIF_MIN_XRES 120
+ #define LCDIF_MIN_YRES 120
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+index c481daa4bbceb..225b9884f61a9 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+@@ -244,7 +244,7 @@ static void st7701_init_sequence(struct st7701 *st7701)
+ DSI_CMD2_BK0_INVSEL_ONES_MASK |
+ FIELD_PREP(DSI_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
+ FIELD_PREP(DSI_CMD2_BK0_INVSEL_RTNI_MASK,
+- DIV_ROUND_UP(mode->htotal, 16)));
++ (clamp((u32)mode->htotal, 512U, 1008U) - 512) / 16));
+
+ /* Command2, BK1 */
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+@@ -762,7 +762,15 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
+ st7701->dsi = dsi;
+ st7701->desc = desc;
+
+- return mipi_dsi_attach(dsi);
++ ret = mipi_dsi_attach(dsi);
++ if (ret)
++ goto err_attach;
++
++ return 0;
++
++err_attach:
++ drm_panel_remove(&st7701->panel);
++ return ret;
+ }
+
+ static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index 33121655d50bb..63bdc9f6fc243 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -227,6 +227,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+
+ if (!found)
+ return false;
++ pci_dev_put(pdev);
+
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+ if (!rdev->bios) {
+@@ -612,13 +613,14 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+ acpi_size tbl_size;
+ UEFI_ACPI_VFCT *vfct;
+ unsigned offset;
++ bool r = false;
+
+ if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
+ return false;
+ tbl_size = hdr->length;
+ if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+ DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+- return false;
++ goto out;
+ }
+
+ vfct = (UEFI_ACPI_VFCT *)hdr;
+@@ -631,13 +633,13 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+ offset += sizeof(VFCT_IMAGE_HEADER);
+ if (offset > tbl_size) {
+ DRM_ERROR("ACPI VFCT image header truncated\n");
+- return false;
++ goto out;
+ }
+
+ offset += vhdr->ImageLength;
+ if (offset > tbl_size) {
+ DRM_ERROR("ACPI VFCT image truncated\n");
+- return false;
++ goto out;
+ }
+
+ if (vhdr->ImageLength &&
+@@ -649,15 +651,18 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+ rdev->bios = kmemdup(&vbios->VbiosContent,
+ vhdr->ImageLength,
+ GFP_KERNEL);
++ if (rdev->bios)
++ r = true;
+
+- if (!rdev->bios)
+- return false;
+- return true;
++ goto out;
+ }
+ }
+
+ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+- return false;
++
++out:
++ acpi_put_table(hdr);
++ return r;
+ }
+ #else
+ static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
+index fd2c2eaee26ba..a5518e90d6896 100644
+--- a/drivers/gpu/drm/rcar-du/Kconfig
++++ b/drivers/gpu/drm/rcar-du/Kconfig
+@@ -41,8 +41,6 @@ config DRM_RCAR_LVDS
+ depends on DRM_RCAR_USE_LVDS
+ select DRM_KMS_HELPER
+ select DRM_PANEL
+- select OF_FLATTREE
+- select OF_OVERLAY
+
+ config DRM_RCAR_USE_MIPI_DSI
+ bool "R-Car DU MIPI DSI Encoder Support"
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index 518ee13b1d6f4..8526dda919317 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -571,7 +571,7 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
+ video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+ video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+- memcpy(&dp->mode, adjusted, sizeof(*mode));
++ drm_mode_copy(&dp->mode, adjusted);
+ }
+
+ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
+diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+index f4df9820b295d..912eb4e94c595 100644
+--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
++++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+@@ -1221,7 +1221,7 @@ static int dw_mipi_dsi_dphy_power_on(struct phy *phy)
+ return i;
+ }
+
+- ret = pm_runtime_get_sync(dsi->dev);
++ ret = pm_runtime_resume_and_get(dsi->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "failed to enable device: %d\n", ret);
+ return ret;
+diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
+index 87b2243ea23e3..f51774866f412 100644
+--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
++++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
+@@ -499,7 +499,7 @@ static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ inno_hdmi_setup(hdmi, adj_mode);
+
+ /* Store the display mode for plugin/DPMS poweron events */
+- memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
++ drm_mode_copy(&hdmi->previous_mode, adj_mode);
+ }
+
+ static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
+diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+index cf2cf51091a3e..90145ad969841 100644
+--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
++++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+@@ -395,7 +395,7 @@ rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+
+ /* Store the display mode for plugin/DPMS poweron events. */
+- memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
++ drm_mode_copy(&hdmi->previous_mode, adj_mode);
+ }
+
+ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index c356de5dd2206..fa1f4ee6d1950 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -602,7 +602,7 @@ static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
+ struct vop *vop = to_vop(crtc);
+ int ret, i;
+
+- ret = pm_runtime_get_sync(vop->dev);
++ ret = pm_runtime_resume_and_get(vop->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
+ return ret;
+@@ -1983,7 +1983,7 @@ static int vop_initial(struct vop *vop)
+ return PTR_ERR(vop->dclk);
+ }
+
+- ret = pm_runtime_get_sync(vop->dev);
++ ret = pm_runtime_resume_and_get(vop->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
+ return ret;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 105a548d0abeb..8cecf81a5ae03 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -822,7 +822,7 @@ static void vop2_enable(struct vop2 *vop2)
+ {
+ int ret;
+
+- ret = pm_runtime_get_sync(vop2->dev);
++ ret = pm_runtime_resume_and_get(vop2->dev);
+ if (ret < 0) {
+ drm_err(vop2->drm, "failed to get pm runtime: %d\n", ret);
+ return;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+index 5a284332ec49e..68f6ebb33460b 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+@@ -152,7 +152,7 @@ static int rk3288_lvds_poweron(struct rockchip_lvds *lvds)
+ DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret);
+ return ret;
+ }
+- ret = pm_runtime_get_sync(lvds->dev);
++ ret = pm_runtime_resume_and_get(lvds->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
+ clk_disable(lvds->pclk);
+@@ -336,16 +336,20 @@ static int px30_lvds_poweron(struct rockchip_lvds *lvds)
+ {
+ int ret;
+
+- ret = pm_runtime_get_sync(lvds->dev);
++ ret = pm_runtime_resume_and_get(lvds->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
+ return ret;
+ }
+
+ /* Enable LVDS mode */
+- return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
++ ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
+ PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1));
++ if (ret)
++ pm_runtime_put(lvds->dev);
++
++ return ret;
+ }
+
+ static void px30_lvds_poweroff(struct rockchip_lvds *lvds)
+diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
+index b6ee8a82e656c..577c477b5f467 100644
+--- a/drivers/gpu/drm/sti/sti_dvo.c
++++ b/drivers/gpu/drm/sti/sti_dvo.c
+@@ -288,7 +288,7 @@ static void sti_dvo_set_mode(struct drm_bridge *bridge,
+
+ DRM_DEBUG_DRIVER("\n");
+
+- memcpy(&dvo->mode, mode, sizeof(struct drm_display_mode));
++ drm_mode_copy(&dvo->mode, mode);
+
+ /* According to the path used (main or aux), the dvo clocks should
+ * have a different parent clock. */
+@@ -346,8 +346,9 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector)
+
+ #define CLK_TOLERANCE_HZ 50
+
+-static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
++static enum drm_mode_status
++sti_dvo_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
+index 03cc401ed5934..15097ac679314 100644
+--- a/drivers/gpu/drm/sti/sti_hda.c
++++ b/drivers/gpu/drm/sti/sti_hda.c
+@@ -524,7 +524,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge,
+
+ DRM_DEBUG_DRIVER("\n");
+
+- memcpy(&hda->mode, mode, sizeof(struct drm_display_mode));
++ drm_mode_copy(&hda->mode, mode);
+
+ if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ DRM_ERROR("Undefined mode\n");
+@@ -601,8 +601,9 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector)
+
+ #define CLK_TOLERANCE_HZ 50
+
+-static int sti_hda_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
++static enum drm_mode_status
++sti_hda_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
+index cb82622877d20..8539fe1fedc4c 100644
+--- a/drivers/gpu/drm/sti/sti_hdmi.c
++++ b/drivers/gpu/drm/sti/sti_hdmi.c
+@@ -941,7 +941,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge,
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Copy the drm display mode in the connector local structure */
+- memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode));
++ drm_mode_copy(&hdmi->mode, mode);
+
+ /* Update clock framerate according to the selected mode */
+ ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000);
+@@ -1004,8 +1004,9 @@ fail:
+
+ #define CLK_TOLERANCE_HZ 50
+
+-static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
++static enum drm_mode_status
++sti_hdmi_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index bd0f60704467f..a67453cee8832 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -3205,8 +3205,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
+ usleep_range(2000, 4000);
+
+ err = reset_control_assert(dc->rst);
+- if (err < 0)
++ if (err < 0) {
++ clk_disable_unprepare(dc->clk);
+ return err;
++ }
+
+ usleep_range(2000, 4000);
+
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index 8275bba636119..ab125f79408f2 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -237,6 +237,10 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
+ &cl_data->sensor_dma_addr[i],
+ GFP_KERNEL);
++ if (!in_data->sensor_virt_addr[i]) {
++ rc = -ENOMEM;
++ goto cleanup;
++ }
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ cl_data->sensor_requested_cnt[i] = 0;
+ cl_data->cur_hid_dev = i;
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 6970797cdc56d..c671ce94671ca 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -314,6 +314,7 @@ static const struct apple_key_translation swapped_option_cmd_keys[] = {
+
+ static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
+ { KEY_FN, KEY_LEFTCTRL },
++ { KEY_LEFTCTRL, KEY_FN },
+ { }
+ };
+
+@@ -375,24 +376,40 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ struct apple_sc *asc = hid_get_drvdata(hid);
+ const struct apple_key_translation *trans, *table;
+ bool do_translate;
+- u16 code = 0;
++ u16 code = usage->code;
+ unsigned int real_fnmode;
+
+- u16 fn_keycode = (swap_fn_leftctrl) ? (KEY_LEFTCTRL) : (KEY_FN);
+-
+- if (usage->code == fn_keycode) {
+- asc->fn_on = !!value;
+- input_event_with_scancode(input, usage->type, KEY_FN,
+- usage->hid, value);
+- return 1;
+- }
+-
+ if (fnmode == 3) {
+ real_fnmode = (asc->quirks & APPLE_IS_NON_APPLE) ? 2 : 1;
+ } else {
+ real_fnmode = fnmode;
+ }
+
++ if (swap_fn_leftctrl) {
++ trans = apple_find_translation(swapped_fn_leftctrl_keys, code);
++
++ if (trans)
++ code = trans->to;
++ }
++
++ if (iso_layout > 0 || (iso_layout < 0 && (asc->quirks & APPLE_ISO_TILDE_QUIRK) &&
++ hid->country == HID_COUNTRY_INTERNATIONAL_ISO)) {
++ trans = apple_find_translation(apple_iso_keyboard, code);
++
++ if (trans)
++ code = trans->to;
++ }
++
++ if (swap_opt_cmd) {
++ trans = apple_find_translation(swapped_option_cmd_keys, code);
++
++ if (trans)
++ code = trans->to;
++ }
++
++ if (code == KEY_FN)
++ asc->fn_on = !!value;
++
+ if (real_fnmode) {
+ if (hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI ||
+ hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO ||
+@@ -430,15 +447,18 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ else
+ table = apple_fn_keys;
+
+- trans = apple_find_translation (table, usage->code);
++ trans = apple_find_translation(table, code);
+
+ if (trans) {
+- if (test_bit(trans->from, input->key))
++ bool from_is_set = test_bit(trans->from, input->key);
++ bool to_is_set = test_bit(trans->to, input->key);
++
++ if (from_is_set)
+ code = trans->from;
+- else if (test_bit(trans->to, input->key))
++ else if (to_is_set)
+ code = trans->to;
+
+- if (!code) {
++ if (!(from_is_set || to_is_set)) {
+ if (trans->flags & APPLE_FLAG_FKEY) {
+ switch (real_fnmode) {
+ case 1:
+@@ -455,62 +475,31 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ do_translate = asc->fn_on;
+ }
+
+- code = do_translate ? trans->to : trans->from;
++ if (do_translate)
++ code = trans->to;
+ }
+-
+- input_event_with_scancode(input, usage->type, code,
+- usage->hid, value);
+- return 1;
+ }
+
+ if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
+- (test_bit(usage->code, asc->pressed_numlock) ||
++ (test_bit(code, asc->pressed_numlock) ||
+ test_bit(LED_NUML, input->led))) {
+- trans = apple_find_translation(powerbook_numlock_keys,
+- usage->code);
++ trans = apple_find_translation(powerbook_numlock_keys, code);
+
+ if (trans) {
+ if (value)
+- set_bit(usage->code,
+- asc->pressed_numlock);
++ set_bit(code, asc->pressed_numlock);
+ else
+- clear_bit(usage->code,
+- asc->pressed_numlock);
++ clear_bit(code, asc->pressed_numlock);
+
+- input_event_with_scancode(input, usage->type,
+- trans->to, usage->hid, value);
++ code = trans->to;
+ }
+-
+- return 1;
+ }
+ }
+
+- if (iso_layout > 0 || (iso_layout < 0 && (asc->quirks & APPLE_ISO_TILDE_QUIRK) &&
+- hid->country == HID_COUNTRY_INTERNATIONAL_ISO)) {
+- trans = apple_find_translation(apple_iso_keyboard, usage->code);
+- if (trans) {
+- input_event_with_scancode(input, usage->type,
+- trans->to, usage->hid, value);
+- return 1;
+- }
+- }
+-
+- if (swap_opt_cmd) {
+- trans = apple_find_translation(swapped_option_cmd_keys, usage->code);
+- if (trans) {
+- input_event_with_scancode(input, usage->type,
+- trans->to, usage->hid, value);
+- return 1;
+- }
+- }
++ if (usage->code != code) {
++ input_event_with_scancode(input, usage->type, code, usage->hid, value);
+
+- if (swap_fn_leftctrl) {
+- trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
+- if (trans) {
+- input_event_with_scancode(input, usage->type,
+- trans->to, usage->hid, value);
+- return 1;
+- }
++ return 1;
+ }
+
+ return 0;
+@@ -640,9 +629,6 @@ static void apple_setup_input(struct input_dev *input)
+ apple_setup_key_translation(input, apple2021_fn_keys);
+ apple_setup_key_translation(input, macbookpro_no_esc_fn_keys);
+ apple_setup_key_translation(input, macbookpro_dedicated_esc_fn_keys);
+-
+- if (swap_fn_leftctrl)
+- apple_setup_key_translation(input, swapped_fn_leftctrl_keys);
+ }
+
+ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+@@ -1011,21 +997,21 @@ static const struct hid_device_id apple_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K),
+- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
++ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132),
+- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
++ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680),
+- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
++ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213),
+- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
++ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K),
+- .driver_data = APPLE_HAS_FN },
++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223),
+- .driver_data = APPLE_HAS_FN },
++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K),
+- .driver_data = APPLE_HAS_FN },
++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F),
+- .driver_data = APPLE_HAS_FN },
++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 8f58c3c1bec31..e27fb27a36bfa 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -995,7 +995,10 @@
+ #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
+
+ #define USB_VENDOR_ID_PLANTRONICS 0x047f
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES 0xc055
+ #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058
+
+ #define USB_VENDOR_ID_PANASONIC 0x04da
+ #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 859aeb07542e3..d728a94c642eb 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -340,6 +340,7 @@ static enum power_supply_property hidinput_battery_props[] = {
+ #define HID_BATTERY_QUIRK_PERCENT (1 << 0) /* always reports percent */
+ #define HID_BATTERY_QUIRK_FEATURE (1 << 1) /* ask for feature report */
+ #define HID_BATTERY_QUIRK_IGNORE (1 << 2) /* completely ignore the battery */
++#define HID_BATTERY_QUIRK_AVOID_QUERY (1 << 3) /* do not query the battery */
+
+ static const struct hid_device_id hid_battery_quirks[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+@@ -373,6 +374,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
++ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L),
++ HID_BATTERY_QUIRK_AVOID_QUERY },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100),
+@@ -554,6 +557,9 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ dev->battery_avoid_query = report_type == HID_INPUT_REPORT &&
+ field->physical == HID_DG_STYLUS;
+
++ if (quirks & HID_BATTERY_QUIRK_AVOID_QUERY)
++ dev->battery_avoid_query = true;
++
+ dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
+ if (IS_ERR(dev->battery)) {
+ error = PTR_ERR(dev->battery);
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 8a2aac18dcc51..656757c79f6b8 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -2548,12 +2548,17 @@ static int hidpp_ff_init(struct hidpp_device *hidpp,
+ struct hid_device *hid = hidpp->hid_dev;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
+- const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
+- const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
++ struct usb_device_descriptor *udesc;
++ u16 bcdDevice;
+ struct ff_device *ff;
+ int error, j, num_slots = data->num_effects;
+ u8 version;
+
++ if (!hid_is_usb(hid)) {
++ hid_err(hid, "device is not USB\n");
++ return -ENODEV;
++ }
++
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+@@ -2567,6 +2572,8 @@ static int hidpp_ff_init(struct hidpp_device *hidpp,
+ }
+
+ /* Get firmware release */
++ udesc = &(hid_to_usb_dev(hid)->descriptor);
++ bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ version = bcdDevice & 255;
+
+ /* Set supported force feedback capabilities */
+diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
+index de52e9f7bb8cb..560eeec4035aa 100644
+--- a/drivers/hid/hid-mcp2221.c
++++ b/drivers/hid/hid-mcp2221.c
+@@ -840,12 +840,19 @@ static int mcp2221_probe(struct hid_device *hdev,
+ return ret;
+ }
+
+- ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
++ /*
++ * This driver uses the .raw_event callback and therefore does not need any
++ * HID_CONNECT_xxx flags.
++ */
++ ret = hid_hw_start(hdev, 0);
+ if (ret) {
+ hid_err(hdev, "can't start hardware\n");
+ return ret;
+ }
+
++ hid_info(hdev, "USB HID v%x.%02x Device [%s] on %s\n", hdev->version >> 8,
++ hdev->version & 0xff, hdev->name, hdev->phys);
++
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "can't open device\n");
+@@ -870,8 +877,7 @@ static int mcp2221_probe(struct hid_device *hdev,
+ mcp->adapter.retries = 1;
+ mcp->adapter.dev.parent = &hdev->dev;
+ snprintf(mcp->adapter.name, sizeof(mcp->adapter.name),
+- "MCP2221 usb-i2c bridge on hidraw%d",
+- ((struct hidraw *)hdev->hidraw)->minor);
++ "MCP2221 usb-i2c bridge");
+
+ ret = i2c_add_adapter(&mcp->adapter);
+ if (ret) {
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 91a4d3fc30e08..372cbdd223e09 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1967,6 +1967,10 @@ static const struct hid_device_id mt_devices[] = {
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ELAN, 0x313a) },
+
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_ELAN, 0x3148) },
++
+ /* Elitegroup panel */
+ { .driver_data = MT_CLS_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
+diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
+index e81b7cec2d124..3d414ae194acb 100644
+--- a/drivers/hid/hid-plantronics.c
++++ b/drivers/hid/hid-plantronics.c
+@@ -198,9 +198,18 @@ err:
+ }
+
+ static const struct hid_device_id plantronics_devices[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES),
++ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
+ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES),
++ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
++ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+ { }
+ };
+diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
+index bb1f423f4ace3..84e7ba5314d3f 100644
+--- a/drivers/hid/hid-rmi.c
++++ b/drivers/hid/hid-rmi.c
+@@ -326,6 +326,8 @@ static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
+ if (!(test_bit(RMI_STARTED, &hdata->flags)))
+ return 0;
+
++ pm_wakeup_event(hdev->dev.parent, 0);
++
+ local_irq_save(flags);
+
+ rmi_set_attn_data(rmi_dev, data[1], &data[2], size - 2);
+diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
+index 32c2306e240d6..602465ad27458 100644
+--- a/drivers/hid/hid-sensor-custom.c
++++ b/drivers/hid/hid-sensor-custom.c
+@@ -62,7 +62,7 @@ struct hid_sensor_sample {
+ u32 raw_len;
+ } __packed;
+
+-static struct attribute hid_custom_attrs[] = {
++static struct attribute hid_custom_attrs[HID_CUSTOM_TOTAL_ATTRS] = {
+ {.name = "name", .mode = S_IRUGO},
+ {.name = "units", .mode = S_IRUGO},
+ {.name = "unit-expo", .mode = S_IRUGO},
+diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
+index 34fa991e6267e..cd1233d7e2535 100644
+--- a/drivers/hid/hid-uclogic-params.c
++++ b/drivers/hid/hid-uclogic-params.c
+@@ -18,6 +18,7 @@
+ #include "usbhid/usbhid.h"
+ #include "hid-ids.h"
+ #include <linux/ctype.h>
++#include <linux/string.h>
+ #include <asm/unaligned.h>
+
+ /**
+@@ -1211,6 +1212,69 @@ static int uclogic_params_ugee_v2_init_frame_mouse(struct uclogic_params *p)
+ return rc;
+ }
+
++/**
++ * uclogic_params_ugee_v2_has_battery() - check whether a UGEE v2 device has
++ * battery or not.
++ * @hdev: The HID device of the tablet interface.
++ *
++ * Returns:
++ * True if the device has battery, false otherwise.
++ */
++static bool uclogic_params_ugee_v2_has_battery(struct hid_device *hdev)
++{
++ /* The XP-PEN Deco LW vendor, product and version are identical to the
++ * Deco L. The only difference reported by their firmware is the product
++ * name. Add a quirk to support battery reporting on the wireless
++ * version.
++ */
++ if (hdev->vendor == USB_VENDOR_ID_UGEE &&
++ hdev->product == USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) {
++ struct usb_device *udev = hid_to_usb_dev(hdev);
++
++ if (strstarts(udev->product, "Deco LW"))
++ return true;
++ }
++
++ return false;
++}
++
++/**
++ * uclogic_params_ugee_v2_init_battery() - initialize UGEE v2 battery reporting.
++ * @hdev: The HID device of the tablet interface, cannot be NULL.
++ * @p: Parameters to fill in, cannot be NULL.
++ *
++ * Returns:
++ * Zero, if successful. A negative errno code on error.
++ */
++static int uclogic_params_ugee_v2_init_battery(struct hid_device *hdev,
++ struct uclogic_params *p)
++{
++ int rc = 0;
++
++ if (!hdev || !p)
++ return -EINVAL;
++
++ /* Some tablets contain invalid characters in hdev->uniq, throwing a
++ * "hwmon: '<name>' is not a valid name attribute, please fix" error.
++ * Use the device vendor and product IDs instead.
++ */
++ snprintf(hdev->uniq, sizeof(hdev->uniq), "%x-%x", hdev->vendor,
++ hdev->product);
++
++ rc = uclogic_params_frame_init_with_desc(&p->frame_list[1],
++ uclogic_rdesc_ugee_v2_battery_template_arr,
++ uclogic_rdesc_ugee_v2_battery_template_size,
++ UCLOGIC_RDESC_UGEE_V2_BATTERY_ID);
++ if (rc)
++ return rc;
++
++ p->frame_list[1].suffix = "Battery";
++ p->pen.subreport_list[1].value = 0xf2;
++ p->pen.subreport_list[1].id = UCLOGIC_RDESC_UGEE_V2_BATTERY_ID;
++
++ return rc;
++}
++
+ /**
+ * uclogic_params_ugee_v2_init() - initialize a UGEE graphics tablets by
+ * discovering their parameters.
+@@ -1334,6 +1398,15 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
+ if (rc)
+ goto cleanup;
+
++ /* Initialize the battery interface*/
++ if (uclogic_params_ugee_v2_has_battery(hdev)) {
++ rc = uclogic_params_ugee_v2_init_battery(hdev, &p);
++ if (rc) {
++ hid_err(hdev, "error initializing battery: %d\n", rc);
++ goto cleanup;
++ }
++ }
++
+ output:
+ /* Output parameters */
+ memcpy(params, &p, sizeof(*params));
+diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
+index 6b73eb0df6bd7..fb40775f5f5b3 100644
+--- a/drivers/hid/hid-uclogic-rdesc.c
++++ b/drivers/hid/hid-uclogic-rdesc.c
+@@ -1035,6 +1035,40 @@ const __u8 uclogic_rdesc_ugee_v2_frame_mouse_template_arr[] = {
+ const size_t uclogic_rdesc_ugee_v2_frame_mouse_template_size =
+ sizeof(uclogic_rdesc_ugee_v2_frame_mouse_template_arr);
+
++/* Fixed report descriptor template for UGEE v2 battery reports */
++const __u8 uclogic_rdesc_ugee_v2_battery_template_arr[] = {
++ 0x05, 0x01, /* Usage Page (Desktop), */
++ 0x09, 0x07, /* Usage (Keypad), */
++ 0xA1, 0x01, /* Collection (Application), */
++ 0x85, UCLOGIC_RDESC_UGEE_V2_BATTERY_ID,
++ /* Report ID, */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x95, 0x02, /* Report Count (2), */
++ 0x81, 0x01, /* Input (Constant), */
++ 0x05, 0x84, /* Usage Page (Power Device), */
++ 0x05, 0x85, /* Usage Page (Battery System), */
++ 0x09, 0x65, /* Usage Page (AbsoluteStateOfCharge), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x26, 0xff, 0x00, /* Logical Maximum (255), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x75, 0x01, /* Report Size (1), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x25, 0x01, /* Logical Maximum (1), */
++ 0x09, 0x44, /* Usage Page (Charging), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x95, 0x07, /* Report Count (7), */
++ 0x81, 0x01, /* Input (Constant), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x95, 0x07, /* Report Count (7), */
++ 0x81, 0x01, /* Input (Constant), */
++ 0xC0 /* End Collection */
++};
++const size_t uclogic_rdesc_ugee_v2_battery_template_size =
++ sizeof(uclogic_rdesc_ugee_v2_battery_template_arr);
++
+ /* Fixed report descriptor for Ugee EX07 frame */
+ const __u8 uclogic_rdesc_ugee_ex07_frame_arr[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+diff --git a/drivers/hid/hid-uclogic-rdesc.h b/drivers/hid/hid-uclogic-rdesc.h
+index 0502a06564964..a1f78c07293ff 100644
+--- a/drivers/hid/hid-uclogic-rdesc.h
++++ b/drivers/hid/hid-uclogic-rdesc.h
+@@ -161,6 +161,9 @@ extern const size_t uclogic_rdesc_v2_frame_dial_size;
+ /* Device ID byte offset in v2 frame dial reports */
+ #define UCLOGIC_RDESC_V2_FRAME_DIAL_DEV_ID_BYTE 0x4
+
++/* Report ID for tweaked UGEE v2 battery reports */
++#define UCLOGIC_RDESC_UGEE_V2_BATTERY_ID 0xba
++
+ /* Fixed report descriptor template for UGEE v2 pen reports */
+ extern const __u8 uclogic_rdesc_ugee_v2_pen_template_arr[];
+ extern const size_t uclogic_rdesc_ugee_v2_pen_template_size;
+@@ -177,6 +180,10 @@ extern const size_t uclogic_rdesc_ugee_v2_frame_dial_template_size;
+ extern const __u8 uclogic_rdesc_ugee_v2_frame_mouse_template_arr[];
+ extern const size_t uclogic_rdesc_ugee_v2_frame_mouse_template_size;
+
++/* Fixed report descriptor template for UGEE v2 battery reports */
++extern const __u8 uclogic_rdesc_ugee_v2_battery_template_arr[];
++extern const size_t uclogic_rdesc_ugee_v2_battery_template_size;
++
+ /* Fixed report descriptor for Ugee EX07 frame */
+ extern const __u8 uclogic_rdesc_ugee_ex07_frame_arr[];
+ extern const size_t uclogic_rdesc_ugee_ex07_frame_size;
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index 0667b6022c3b7..a9428b7f34a46 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -554,7 +554,8 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
+
+ if (test_bit(I2C_HID_STARTED, &ihid->flags)) {
+- pm_wakeup_event(&ihid->client->dev, 0);
++ if (ihid->hid->group != HID_GROUP_RMI)
++ pm_wakeup_event(&ihid->client->dev, 0);
+
+ hid_input_report(ihid->hid, HID_INPUT_REPORT,
+ ihid->inbuf + sizeof(__le16),
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 634263e4556b0..fb538a6c4add8 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -155,6 +155,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
+ {
+ struct wacom *wacom = hid_get_drvdata(hdev);
+
++ if (wacom->wacom_wac.features.type == BOOTLOADER)
++ return 0;
++
+ if (size > WACOM_PKGLEN_MAX)
+ return 1;
+
+@@ -2785,6 +2788,11 @@ static int wacom_probe(struct hid_device *hdev,
+ return error;
+ }
+
++ if (features->type == BOOTLOADER) {
++ hid_warn(hdev, "Using device in hidraw-only mode");
++ return hid_hw_start(hdev, HID_CONNECT_HIDRAW);
++ }
++
+ error = wacom_parse_and_register(wacom, false);
+ if (error)
+ return error;
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 0f3d57b426846..9312d611db8e5 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -4882,6 +4882,9 @@ static const struct wacom_features wacom_features_0x3dd =
+ static const struct wacom_features wacom_features_HID_ANY_ID =
+ { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
+
++static const struct wacom_features wacom_features_0x94 =
++ { "Wacom Bootloader", .type = BOOTLOADER };
++
+ #define USB_DEVICE_WACOM(prod) \
+ HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
+ .driver_data = (kernel_ulong_t)&wacom_features_##prod
+@@ -4955,6 +4958,7 @@ const struct hid_device_id wacom_ids[] = {
+ { USB_DEVICE_WACOM(0x84) },
+ { USB_DEVICE_WACOM(0x90) },
+ { USB_DEVICE_WACOM(0x93) },
++ { USB_DEVICE_WACOM(0x94) },
+ { USB_DEVICE_WACOM(0x97) },
+ { USB_DEVICE_WACOM(0x9A) },
+ { USB_DEVICE_WACOM(0x9F) },
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index 5ca6c06d143be..16f221388563d 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -243,6 +243,7 @@ enum {
+ MTTPC,
+ MTTPC_B,
+ HID_GENERIC,
++ BOOTLOADER,
+ MAX_TYPE
+ };
+
+diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
+index eb98201583185..26f2c3c012978 100644
+--- a/drivers/hsi/controllers/omap_ssi_core.c
++++ b/drivers/hsi/controllers/omap_ssi_core.c
+@@ -502,8 +502,10 @@ static int ssi_probe(struct platform_device *pd)
+ platform_set_drvdata(pd, ssi);
+
+ err = ssi_add_controller(ssi, pd);
+- if (err < 0)
++ if (err < 0) {
++ hsi_put_controller(ssi);
+ goto out1;
++ }
+
+ pm_runtime_enable(&pd->dev);
+
+@@ -536,9 +538,9 @@ out3:
+ device_for_each_child(&pd->dev, NULL, ssi_remove_ports);
+ out2:
+ ssi_remove_controller(ssi);
++ pm_runtime_disable(&pd->dev);
+ out1:
+ platform_set_drvdata(pd, NULL);
+- pm_runtime_disable(&pd->dev);
+
+ return err;
+ }
+@@ -629,7 +631,13 @@ static int __init ssi_init(void) {
+ if (ret)
+ return ret;
+
+- return platform_driver_register(&ssi_port_pdriver);
++ ret = platform_driver_register(&ssi_port_pdriver);
++ if (ret) {
++ platform_driver_unregister(&ssi_pdriver);
++ return ret;
++ }
++
++ return 0;
+ }
+ module_init(ssi_init);
+
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 59a4aa86d1f35..c6692fd5ab155 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -280,6 +280,19 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
+ ring_info->pkt_buffer_size = 0;
+ }
+
++/*
++ * Check if the ring buffer spinlock is available to take or not; used on
++ * atomic contexts, like panic path (see the Hyper-V framebuffer driver).
++ */
++
++bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel)
++{
++ struct hv_ring_buffer_info *rinfo = &channel->outbound;
++
++ return spin_is_locked(&rinfo->ring_lock);
++}
++EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy);
++
+ /* Write to the ring buffer. */
+ int hv_ringbuffer_write(struct vmbus_channel *channel,
+ const struct kvec *kv_list, u32 kv_count,
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 7ac3daaf59ce0..d3bccc8176c51 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -799,6 +799,7 @@ config SENSORS_IT87
+ config SENSORS_JC42
+ tristate "JEDEC JC42.4 compliant memory module temperature sensors"
+ depends on I2C
++ select REGMAP_I2C
+ help
+ If you say yes here, you get support for JEDEC JC42.4 compliant
+ temperature sensors, which are used on many DDR3 memory modules for
+diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
+index aa1f25add0b6b..e42ae43f3de46 100644
+--- a/drivers/hwmon/emc2305.c
++++ b/drivers/hwmon/emc2305.c
+@@ -16,7 +16,6 @@ static const unsigned short
+ emc2305_normal_i2c[] = { 0x27, 0x2c, 0x2d, 0x2e, 0x2f, 0x4c, 0x4d, I2C_CLIENT_END };
+
+ #define EMC2305_REG_DRIVE_FAIL_STATUS 0x27
+-#define EMC2305_REG_DEVICE 0xfd
+ #define EMC2305_REG_VENDOR 0xfe
+ #define EMC2305_FAN_MAX 0xff
+ #define EMC2305_FAN_MIN 0x00
+@@ -172,22 +171,12 @@ static int emc2305_get_max_state(struct thermal_cooling_device *cdev, unsigned l
+ return 0;
+ }
+
+-static int emc2305_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
++static int __emc2305_set_cur_state(struct emc2305_data *data, int cdev_idx, unsigned long state)
+ {
+- int cdev_idx, ret;
+- struct emc2305_data *data = cdev->devdata;
++ int ret;
+ struct i2c_client *client = data->client;
+ u8 val, i;
+
+- if (state > data->max_state)
+- return -EINVAL;
+-
+- cdev_idx = emc2305_get_cdev_idx(cdev);
+- if (cdev_idx < 0)
+- return cdev_idx;
+-
+- /* Save thermal state. */
+- data->cdev_data[cdev_idx].last_thermal_state = state;
+ state = max_t(unsigned long, state, data->cdev_data[cdev_idx].last_hwmon_state);
+
+ val = EMC2305_PWM_STATE2DUTY(state, data->max_state, EMC2305_FAN_MAX);
+@@ -212,6 +201,27 @@ static int emc2305_set_cur_state(struct thermal_cooling_device *cdev, unsigned l
+ return 0;
+ }
+
++static int emc2305_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
++{
++ int cdev_idx, ret;
++ struct emc2305_data *data = cdev->devdata;
++
++ if (state > data->max_state)
++ return -EINVAL;
++
++ cdev_idx = emc2305_get_cdev_idx(cdev);
++ if (cdev_idx < 0)
++ return cdev_idx;
++
++ /* Save thermal state. */
++ data->cdev_data[cdev_idx].last_thermal_state = state;
++ ret = __emc2305_set_cur_state(data, cdev_idx, state);
++ if (ret < 0)
++ return ret;
++
++ return 0;
++}
++
+ static const struct thermal_cooling_device_ops emc2305_cooling_ops = {
+ .get_max_state = emc2305_get_max_state,
+ .get_cur_state = emc2305_get_cur_state,
+@@ -402,7 +412,7 @@ emc2305_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int ch
+ */
+ if (data->cdev_data[cdev_idx].last_hwmon_state >=
+ data->cdev_data[cdev_idx].last_thermal_state)
+- return emc2305_set_cur_state(data->cdev_data[cdev_idx].cdev,
++ return __emc2305_set_cur_state(data, cdev_idx,
+ data->cdev_data[cdev_idx].last_hwmon_state);
+ return 0;
+ }
+@@ -524,7 +534,7 @@ static int emc2305_probe(struct i2c_client *client, const struct i2c_device_id *
+ struct device *dev = &client->dev;
+ struct emc2305_data *data;
+ struct emc2305_platform_data *pdata;
+- int vendor, device;
++ int vendor;
+ int ret;
+ int i;
+
+@@ -535,10 +545,6 @@ static int emc2305_probe(struct i2c_client *client, const struct i2c_device_id *
+ if (vendor != EMC2305_VENDOR)
+ return -ENODEV;
+
+- device = i2c_smbus_read_byte_data(client, EMC2305_REG_DEVICE);
+- if (device != EMC2305_DEVICE)
+- return -ENODEV;
+-
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
+index 30888feaf589b..6593d81cb901b 100644
+--- a/drivers/hwmon/jc42.c
++++ b/drivers/hwmon/jc42.c
+@@ -19,6 +19,7 @@
+ #include <linux/err.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
++#include <linux/regmap.h>
+
+ /* Addresses to scan */
+ static const unsigned short normal_i2c[] = {
+@@ -199,31 +200,14 @@ static struct jc42_chips jc42_chips[] = {
+ { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK },
+ };
+
+-enum temp_index {
+- t_input = 0,
+- t_crit,
+- t_min,
+- t_max,
+- t_num_temp
+-};
+-
+-static const u8 temp_regs[t_num_temp] = {
+- [t_input] = JC42_REG_TEMP,
+- [t_crit] = JC42_REG_TEMP_CRITICAL,
+- [t_min] = JC42_REG_TEMP_LOWER,
+- [t_max] = JC42_REG_TEMP_UPPER,
+-};
+-
+ /* Each client has this additional data */
+ struct jc42_data {
+- struct i2c_client *client;
+ struct mutex update_lock; /* protect register access */
++ struct regmap *regmap;
+ bool extended; /* true if extended range supported */
+ bool valid;
+- unsigned long last_updated; /* In jiffies */
+ u16 orig_config; /* original configuration */
+ u16 config; /* current configuration */
+- u16 temp[t_num_temp];/* Temperatures */
+ };
+
+ #define JC42_TEMP_MIN_EXTENDED (-40000)
+@@ -248,85 +232,102 @@ static int jc42_temp_from_reg(s16 reg)
+ return reg * 125 / 2;
+ }
+
+-static struct jc42_data *jc42_update_device(struct device *dev)
+-{
+- struct jc42_data *data = dev_get_drvdata(dev);
+- struct i2c_client *client = data->client;
+- struct jc42_data *ret = data;
+- int i, val;
+-
+- mutex_lock(&data->update_lock);
+-
+- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+- for (i = 0; i < t_num_temp; i++) {
+- val = i2c_smbus_read_word_swapped(client, temp_regs[i]);
+- if (val < 0) {
+- ret = ERR_PTR(val);
+- goto abort;
+- }
+- data->temp[i] = val;
+- }
+- data->last_updated = jiffies;
+- data->valid = true;
+- }
+-abort:
+- mutex_unlock(&data->update_lock);
+- return ret;
+-}
+-
+ static int jc42_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+ {
+- struct jc42_data *data = jc42_update_device(dev);
+- int temp, hyst;
++ struct jc42_data *data = dev_get_drvdata(dev);
++ unsigned int regval;
++ int ret, temp, hyst;
+
+- if (IS_ERR(data))
+- return PTR_ERR(data);
++ mutex_lock(&data->update_lock);
+
+ switch (attr) {
+ case hwmon_temp_input:
+- *val = jc42_temp_from_reg(data->temp[t_input]);
+- return 0;
++ ret = regmap_read(data->regmap, JC42_REG_TEMP, &regval);
++ if (ret)
++ break;
++
++ *val = jc42_temp_from_reg(regval);
++ break;
+ case hwmon_temp_min:
+- *val = jc42_temp_from_reg(data->temp[t_min]);
+- return 0;
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_LOWER, &regval);
++ if (ret)
++ break;
++
++ *val = jc42_temp_from_reg(regval);
++ break;
+ case hwmon_temp_max:
+- *val = jc42_temp_from_reg(data->temp[t_max]);
+- return 0;
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_UPPER, &regval);
++ if (ret)
++ break;
++
++ *val = jc42_temp_from_reg(regval);
++ break;
+ case hwmon_temp_crit:
+- *val = jc42_temp_from_reg(data->temp[t_crit]);
+- return 0;
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL,
++ &regval);
++ if (ret)
++ break;
++
++ *val = jc42_temp_from_reg(regval);
++ break;
+ case hwmon_temp_max_hyst:
+- temp = jc42_temp_from_reg(data->temp[t_max]);
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_UPPER, &regval);
++ if (ret)
++ break;
++
++ temp = jc42_temp_from_reg(regval);
+ hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK)
+ >> JC42_CFG_HYST_SHIFT];
+ *val = temp - hyst;
+- return 0;
++ break;
+ case hwmon_temp_crit_hyst:
+- temp = jc42_temp_from_reg(data->temp[t_crit]);
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL,
++ &regval);
++ if (ret)
++ break;
++
++ temp = jc42_temp_from_reg(regval);
+ hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK)
+ >> JC42_CFG_HYST_SHIFT];
+ *val = temp - hyst;
+- return 0;
++ break;
+ case hwmon_temp_min_alarm:
+- *val = (data->temp[t_input] >> JC42_ALARM_MIN_BIT) & 1;
+- return 0;
++ ret = regmap_read(data->regmap, JC42_REG_TEMP, &regval);
++ if (ret)
++ break;
++
++ *val = (regval >> JC42_ALARM_MIN_BIT) & 1;
++ break;
+ case hwmon_temp_max_alarm:
+- *val = (data->temp[t_input] >> JC42_ALARM_MAX_BIT) & 1;
+- return 0;
++ ret = regmap_read(data->regmap, JC42_REG_TEMP, &regval);
++ if (ret)
++ break;
++
++ *val = (regval >> JC42_ALARM_MAX_BIT) & 1;
++ break;
+ case hwmon_temp_crit_alarm:
+- *val = (data->temp[t_input] >> JC42_ALARM_CRIT_BIT) & 1;
+- return 0;
++ ret = regmap_read(data->regmap, JC42_REG_TEMP, &regval);
++ if (ret)
++ break;
++
++ *val = (regval >> JC42_ALARM_CRIT_BIT) & 1;
++ break;
+ default:
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ break;
+ }
++
++ mutex_unlock(&data->update_lock);
++
++ return ret;
+ }
+
+ static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+ {
+ struct jc42_data *data = dev_get_drvdata(dev);
+- struct i2c_client *client = data->client;
++ unsigned int regval;
+ int diff, hyst;
+ int ret;
+
+@@ -334,21 +335,23 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
+
+ switch (attr) {
+ case hwmon_temp_min:
+- data->temp[t_min] = jc42_temp_to_reg(val, data->extended);
+- ret = i2c_smbus_write_word_swapped(client, temp_regs[t_min],
+- data->temp[t_min]);
++ ret = regmap_write(data->regmap, JC42_REG_TEMP_LOWER,
++ jc42_temp_to_reg(val, data->extended));
+ break;
+ case hwmon_temp_max:
+- data->temp[t_max] = jc42_temp_to_reg(val, data->extended);
+- ret = i2c_smbus_write_word_swapped(client, temp_regs[t_max],
+- data->temp[t_max]);
++ ret = regmap_write(data->regmap, JC42_REG_TEMP_UPPER,
++ jc42_temp_to_reg(val, data->extended));
+ break;
+ case hwmon_temp_crit:
+- data->temp[t_crit] = jc42_temp_to_reg(val, data->extended);
+- ret = i2c_smbus_write_word_swapped(client, temp_regs[t_crit],
+- data->temp[t_crit]);
++ ret = regmap_write(data->regmap, JC42_REG_TEMP_CRITICAL,
++ jc42_temp_to_reg(val, data->extended));
+ break;
+ case hwmon_temp_crit_hyst:
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL,
++ &regval);
++ if (ret)
++ break;
++
+ /*
+ * JC42.4 compliant chips only support four hysteresis values.
+ * Pick best choice and go from there.
+@@ -356,7 +359,7 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
+ val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED
+ : JC42_TEMP_MIN) - 6000,
+ JC42_TEMP_MAX);
+- diff = jc42_temp_from_reg(data->temp[t_crit]) - val;
++ diff = jc42_temp_from_reg(regval) - val;
+ hyst = 0;
+ if (diff > 0) {
+ if (diff < 2250)
+@@ -368,9 +371,8 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
+ }
+ data->config = (data->config & ~JC42_CFG_HYST_MASK) |
+ (hyst << JC42_CFG_HYST_SHIFT);
+- ret = i2c_smbus_write_word_swapped(data->client,
+- JC42_REG_CONFIG,
+- data->config);
++ ret = regmap_write(data->regmap, JC42_REG_CONFIG,
++ data->config);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+@@ -470,51 +472,80 @@ static const struct hwmon_chip_info jc42_chip_info = {
+ .info = jc42_info,
+ };
+
++static bool jc42_readable_reg(struct device *dev, unsigned int reg)
++{
++ return (reg >= JC42_REG_CAP && reg <= JC42_REG_DEVICEID) ||
++ reg == JC42_REG_SMBUS;
++}
++
++static bool jc42_writable_reg(struct device *dev, unsigned int reg)
++{
++ return (reg >= JC42_REG_CONFIG && reg <= JC42_REG_TEMP_CRITICAL) ||
++ reg == JC42_REG_SMBUS;
++}
++
++static bool jc42_volatile_reg(struct device *dev, unsigned int reg)
++{
++ return reg == JC42_REG_CONFIG || reg == JC42_REG_TEMP;
++}
++
++static const struct regmap_config jc42_regmap_config = {
++ .reg_bits = 8,
++ .val_bits = 16,
++ .val_format_endian = REGMAP_ENDIAN_BIG,
++ .max_register = JC42_REG_SMBUS,
++ .writeable_reg = jc42_writable_reg,
++ .readable_reg = jc42_readable_reg,
++ .volatile_reg = jc42_volatile_reg,
++ .cache_type = REGCACHE_RBTREE,
++};
++
+ static int jc42_probe(struct i2c_client *client)
+ {
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
++ unsigned int config, cap;
+ struct jc42_data *data;
+- int config, cap;
++ int ret;
+
+ data = devm_kzalloc(dev, sizeof(struct jc42_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+- data->client = client;
++ data->regmap = devm_regmap_init_i2c(client, &jc42_regmap_config);
++ if (IS_ERR(data->regmap))
++ return PTR_ERR(data->regmap);
++
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+- cap = i2c_smbus_read_word_swapped(client, JC42_REG_CAP);
+- if (cap < 0)
+- return cap;
++ ret = regmap_read(data->regmap, JC42_REG_CAP, &cap);
++ if (ret)
++ return ret;
+
+ data->extended = !!(cap & JC42_CAP_RANGE);
+
+ if (device_property_read_bool(dev, "smbus-timeout-disable")) {
+- int smbus;
+-
+ /*
+ * Not all chips support this register, but from a
+ * quick read of various datasheets no chip appears
+ * incompatible with the below attempt to disable
+ * the timeout. And the whole thing is opt-in...
+ */
+- smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
+- if (smbus < 0)
+- return smbus;
+- i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
+- smbus | SMBUS_STMOUT);
++ ret = regmap_set_bits(data->regmap, JC42_REG_SMBUS,
++ SMBUS_STMOUT);
++ if (ret)
++ return ret;
+ }
+
+- config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
+- if (config < 0)
+- return config;
++ ret = regmap_read(data->regmap, JC42_REG_CONFIG, &config);
++ if (ret)
++ return ret;
+
+ data->orig_config = config;
+ if (config & JC42_CFG_SHUTDOWN) {
+ config &= ~JC42_CFG_SHUTDOWN;
+- i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config);
++ regmap_write(data->regmap, JC42_REG_CONFIG, config);
+ }
+ data->config = config;
+
+@@ -535,7 +566,7 @@ static void jc42_remove(struct i2c_client *client)
+
+ config = (data->orig_config & ~JC42_CFG_HYST_MASK)
+ | (data->config & JC42_CFG_HYST_MASK);
+- i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config);
++ regmap_write(data->regmap, JC42_REG_CONFIG, config);
+ }
+ }
+
+@@ -546,8 +577,11 @@ static int jc42_suspend(struct device *dev)
+ struct jc42_data *data = dev_get_drvdata(dev);
+
+ data->config |= JC42_CFG_SHUTDOWN;
+- i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+- data->config);
++ regmap_write(data->regmap, JC42_REG_CONFIG, data->config);
++
++ regcache_cache_only(data->regmap, true);
++ regcache_mark_dirty(data->regmap);
++
+ return 0;
+ }
+
+@@ -555,10 +589,13 @@ static int jc42_resume(struct device *dev)
+ {
+ struct jc42_data *data = dev_get_drvdata(dev);
+
++ regcache_cache_only(data->regmap, false);
++
+ data->config &= ~JC42_CFG_SHUTDOWN;
+- i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+- data->config);
+- return 0;
++ regmap_write(data->regmap, JC42_REG_CONFIG, data->config);
++
++ /* Restore cached register values to hardware */
++ return regcache_sync(data->regmap);
+ }
+
+ static const struct dev_pm_ops jc42_dev_pm_ops = {
+diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
+index b347837842139..bf43f73dc835f 100644
+--- a/drivers/hwmon/nct6775-platform.c
++++ b/drivers/hwmon/nct6775-platform.c
+@@ -1043,7 +1043,9 @@ static struct platform_device *pdev[2];
+
+ static const char * const asus_wmi_boards[] = {
+ "PRO H410T",
++ "ProArt B550-CREATOR",
+ "ProArt X570-CREATOR WIFI",
++ "ProArt Z490-CREATOR 10G",
+ "Pro B550M-C",
+ "Pro WS X570-ACE",
+ "PRIME B360-PLUS",
+@@ -1055,8 +1057,10 @@ static const char * const asus_wmi_boards[] = {
+ "PRIME X570-P",
+ "PRIME X570-PRO",
+ "ROG CROSSHAIR VIII DARK HERO",
++ "ROG CROSSHAIR VIII EXTREME",
+ "ROG CROSSHAIR VIII FORMULA",
+ "ROG CROSSHAIR VIII HERO",
++ "ROG CROSSHAIR VIII HERO (WI-FI)",
+ "ROG CROSSHAIR VIII IMPACT",
+ "ROG STRIX B550-A GAMING",
+ "ROG STRIX B550-E GAMING",
+@@ -1080,8 +1084,11 @@ static const char * const asus_wmi_boards[] = {
+ "ROG STRIX Z490-G GAMING (WI-FI)",
+ "ROG STRIX Z490-H GAMING",
+ "ROG STRIX Z490-I GAMING",
++ "TUF GAMING B550M-E",
++ "TUF GAMING B550M-E (WI-FI)",
+ "TUF GAMING B550M-PLUS",
+ "TUF GAMING B550M-PLUS (WI-FI)",
++ "TUF GAMING B550M-PLUS WIFI II",
+ "TUF GAMING B550-PLUS",
+ "TUF GAMING B550-PLUS WIFI II",
+ "TUF GAMING B550-PRO",
+diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
+index c6e8c6542f24b..d2cf4f4848e1b 100644
+--- a/drivers/hwtracing/coresight/coresight-cti-core.c
++++ b/drivers/hwtracing/coresight/coresight-cti-core.c
+@@ -564,7 +564,7 @@ static void cti_add_assoc_to_csdev(struct coresight_device *csdev)
+ * if we found a matching csdev then update the ECT
+ * association pointer for the device with this CTI.
+ */
+- coresight_set_assoc_ectdev_mutex(csdev->ect_dev,
++ coresight_set_assoc_ectdev_mutex(csdev,
+ ect_item->csdev);
+ break;
+ }
+diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
+index 2b386bb848f8d..1fc4fd79a1c69 100644
+--- a/drivers/hwtracing/coresight/coresight-trbe.c
++++ b/drivers/hwtracing/coresight/coresight-trbe.c
+@@ -1434,6 +1434,7 @@ static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
+
+ static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
+ {
++ cpuhp_state_remove_instance(drvdata->trbe_online, &drvdata->hotplug_node);
+ cpuhp_remove_multi_state(drvdata->trbe_online);
+ }
+
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+index fe2349590f75e..c74985d77b0ec 100644
+--- a/drivers/i2c/busses/i2c-ismt.c
++++ b/drivers/i2c/busses/i2c-ismt.c
+@@ -509,6 +509,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
+ if (read_write == I2C_SMBUS_WRITE) {
+ /* Block Write */
+ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n");
++ if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
++ return -EINVAL;
++
+ dma_size = data->block[0] + 1;
+ dma_direction = DMA_TO_DEVICE;
+ desc->wr_len_cmd = dma_size;
+diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
+index f614cade432bb..30e38bc8b6db8 100644
+--- a/drivers/i2c/busses/i2c-pxa-pci.c
++++ b/drivers/i2c/busses/i2c-pxa-pci.c
+@@ -105,7 +105,7 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
+ int i;
+ struct ce4100_devices *sds;
+
+- ret = pci_enable_device_mem(dev);
++ ret = pcim_enable_device(dev);
+ if (ret)
+ return ret;
+
+@@ -114,10 +114,8 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
+ return -EINVAL;
+ }
+ sds = kzalloc(sizeof(*sds), GFP_KERNEL);
+- if (!sds) {
+- ret = -ENOMEM;
+- goto err_mem;
+- }
++ if (!sds)
++ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
+ sds->pdev[i] = add_i2c_device(dev, i);
+@@ -133,8 +131,6 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
+
+ err_dev_add:
+ kfree(sds);
+-err_mem:
+- pci_disable_device(dev);
+ return ret;
+ }
+
+diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
+index 0e0679f65cf77..30a6de1694e07 100644
+--- a/drivers/i2c/muxes/i2c-mux-reg.c
++++ b/drivers/i2c/muxes/i2c-mux-reg.c
+@@ -183,13 +183,12 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
+ if (!mux->data.reg) {
+ dev_info(&pdev->dev,
+ "Register not set, using platform resource\n");
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- mux->data.reg_size = resource_size(res);
+- mux->data.reg = devm_ioremap_resource(&pdev->dev, res);
++ mux->data.reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(mux->data.reg)) {
+ ret = PTR_ERR(mux->data.reg);
+ goto err_put_parent;
+ }
++ mux->data.reg_size = resource_size(res);
+ }
+
+ if (mux->data.reg_size != 4 && mux->data.reg_size != 2 &&
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index 261a9a6b45e15..d8570f620785a 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -281,10 +281,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
+ unsigned int data_reg;
+ int ret = 0;
+
+- if (iio_buffer_enabled(indio_dev))
+- return -EBUSY;
++ ret = iio_device_claim_direct_mode(indio_dev);
++ if (ret)
++ return ret;
+
+- mutex_lock(&indio_dev->mlock);
+ ad_sigma_delta_set_channel(sigma_delta, chan->address);
+
+ spi_bus_lock(sigma_delta->spi->master);
+@@ -323,7 +323,7 @@ out:
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ sigma_delta->bus_locked = false;
+ spi_bus_unlock(sigma_delta->spi->master);
+- mutex_unlock(&indio_dev->mlock);
++ iio_device_release_direct_mode(indio_dev);
+
+ if (ret)
+ return ret;
+diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
+index 622fd384983c7..b3d5b9b7255bc 100644
+--- a/drivers/iio/adc/ti-adc128s052.c
++++ b/drivers/iio/adc/ti-adc128s052.c
+@@ -181,13 +181,13 @@ static int adc128_probe(struct spi_device *spi)
+ }
+
+ static const struct of_device_id adc128_of_match[] = {
+- { .compatible = "ti,adc128s052", },
+- { .compatible = "ti,adc122s021", },
+- { .compatible = "ti,adc122s051", },
+- { .compatible = "ti,adc122s101", },
+- { .compatible = "ti,adc124s021", },
+- { .compatible = "ti,adc124s051", },
+- { .compatible = "ti,adc124s101", },
++ { .compatible = "ti,adc128s052", .data = (void*)0L, },
++ { .compatible = "ti,adc122s021", .data = (void*)1L, },
++ { .compatible = "ti,adc122s051", .data = (void*)1L, },
++ { .compatible = "ti,adc122s101", .data = (void*)1L, },
++ { .compatible = "ti,adc124s021", .data = (void*)2L, },
++ { .compatible = "ti,adc124s051", .data = (void*)2L, },
++ { .compatible = "ti,adc124s101", .data = (void*)2L, },
+ { /* sentinel */ },
+ };
+ MODULE_DEVICE_TABLE(of, adc128_of_match);
+diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
+index 899bcd83f40bc..e0e130ba9d3ec 100644
+--- a/drivers/iio/addac/ad74413r.c
++++ b/drivers/iio/addac/ad74413r.c
+@@ -691,7 +691,7 @@ static int ad74413_get_input_current_offset(struct ad74413r_state *st,
+ if (ret)
+ return ret;
+
+- *val = voltage_offset * AD74413R_ADC_RESULT_MAX / voltage_range;
++ *val = voltage_offset * (int)AD74413R_ADC_RESULT_MAX / voltage_range;
+
+ return IIO_VAL_INT;
+ }
+diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
+index f7fcfd04f659d..bc40240b29e26 100644
+--- a/drivers/iio/imu/adis.c
++++ b/drivers/iio/imu/adis.c
+@@ -270,23 +270,19 @@ EXPORT_SYMBOL_NS(adis_debugfs_reg_access, IIO_ADISLIB);
+ #endif
+
+ /**
+- * adis_enable_irq() - Enable or disable data ready IRQ
++ * __adis_enable_irq() - Enable or disable data ready IRQ (unlocked)
+ * @adis: The adis device
+ * @enable: Whether to enable the IRQ
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+-int adis_enable_irq(struct adis *adis, bool enable)
++int __adis_enable_irq(struct adis *adis, bool enable)
+ {
+- int ret = 0;
++ int ret;
+ u16 msc;
+
+- mutex_lock(&adis->state_lock);
+-
+- if (adis->data->enable_irq) {
+- ret = adis->data->enable_irq(adis, enable);
+- goto out_unlock;
+- }
++ if (adis->data->enable_irq)
++ return adis->data->enable_irq(adis, enable);
+
+ if (adis->data->unmasked_drdy) {
+ if (enable)
+@@ -294,12 +290,12 @@ int adis_enable_irq(struct adis *adis, bool enable)
+ else
+ disable_irq(adis->spi->irq);
+
+- goto out_unlock;
++ return 0;
+ }
+
+ ret = __adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
+ if (ret)
+- goto out_unlock;
++ return ret;
+
+ msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH;
+ msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2;
+@@ -308,13 +304,9 @@ int adis_enable_irq(struct adis *adis, bool enable)
+ else
+ msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN;
+
+- ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
+-
+-out_unlock:
+- mutex_unlock(&adis->state_lock);
+- return ret;
++ return __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
+ }
+-EXPORT_SYMBOL_NS(adis_enable_irq, IIO_ADISLIB);
++EXPORT_SYMBOL_NS(__adis_enable_irq, IIO_ADISLIB);
+
+ /**
+ * __adis_check_status() - Check the device for error conditions (unlocked)
+@@ -445,7 +437,7 @@ int __adis_initial_startup(struct adis *adis)
+ * with 'IRQF_NO_AUTOEN' anyways.
+ */
+ if (!adis->data->unmasked_drdy)
+- adis_enable_irq(adis, false);
++ __adis_enable_irq(adis, false);
+
+ if (!adis->data->prod_id_reg)
+ return 0;
+diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
+index 3d78da2531a9a..727e2ef66aa4b 100644
+--- a/drivers/iio/industrialio-event.c
++++ b/drivers/iio/industrialio-event.c
+@@ -556,7 +556,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
+
+ ret = iio_device_register_sysfs_group(indio_dev, &ev_int->group);
+ if (ret)
+- goto error_free_setup_event_lines;
++ goto error_free_group_attrs;
+
+ ev_int->ioctl_handler.ioctl = iio_event_ioctl;
+ iio_device_ioctl_handler_register(&iio_dev_opaque->indio_dev,
+@@ -564,6 +564,8 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
+
+ return 0;
+
++error_free_group_attrs:
++ kfree(ev_int->group.attrs);
+ error_free_setup_event_lines:
+ iio_free_chan_devattr_list(&ev_int->dev_attr_list);
+ kfree(ev_int);
+diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
+index a60ccf1836872..1117991ca2ab6 100644
+--- a/drivers/iio/temperature/ltc2983.c
++++ b/drivers/iio/temperature/ltc2983.c
+@@ -209,6 +209,7 @@ struct ltc2983_data {
+ * Holds the converted temperature
+ */
+ __be32 temp __aligned(IIO_DMA_MINALIGN);
++ __be32 chan_val;
+ };
+
+ struct ltc2983_sensor {
+@@ -313,19 +314,18 @@ static int __ltc2983_fault_handler(const struct ltc2983_data *st,
+ return 0;
+ }
+
+-static int __ltc2983_chan_assign_common(const struct ltc2983_data *st,
++static int __ltc2983_chan_assign_common(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor,
+ u32 chan_val)
+ {
+ u32 reg = LTC2983_CHAN_START_ADDR(sensor->chan);
+- __be32 __chan_val;
+
+ chan_val |= LTC2983_CHAN_TYPE(sensor->type);
+ dev_dbg(&st->spi->dev, "Assign reg:0x%04X, val:0x%08X\n", reg,
+ chan_val);
+- __chan_val = cpu_to_be32(chan_val);
+- return regmap_bulk_write(st->regmap, reg, &__chan_val,
+- sizeof(__chan_val));
++ st->chan_val = cpu_to_be32(chan_val);
++ return regmap_bulk_write(st->regmap, reg, &st->chan_val,
++ sizeof(st->chan_val));
+ }
+
+ static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st,
+diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
+index aa36ac618e729..17a2274152771 100644
+--- a/drivers/infiniband/Kconfig
++++ b/drivers/infiniband/Kconfig
+@@ -78,6 +78,7 @@ config INFINIBAND_VIRT_DMA
+ def_bool !HIGHMEM
+
+ if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
++if !UML
+ source "drivers/infiniband/hw/bnxt_re/Kconfig"
+ source "drivers/infiniband/hw/cxgb4/Kconfig"
+ source "drivers/infiniband/hw/efa/Kconfig"
+@@ -94,6 +95,7 @@ source "drivers/infiniband/hw/qib/Kconfig"
+ source "drivers/infiniband/hw/usnic/Kconfig"
+ source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
+ source "drivers/infiniband/sw/rdmavt/Kconfig"
++endif # !UML
+ source "drivers/infiniband/sw/rxe/Kconfig"
+ source "drivers/infiniband/sw/siw/Kconfig"
+ endif
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index b69e2c4e4d2a4..3c422698a51c1 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -2851,8 +2851,8 @@ err:
+ static void __exit ib_core_cleanup(void)
+ {
+ roce_gid_mgmt_cleanup();
+- nldev_exit();
+ rdma_nl_unregister(RDMA_NL_LS);
++ nldev_exit();
+ unregister_pernet_device(&rdma_dev_net_ops);
+ unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
+ ib_sa_cleanup();
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index 1893aa613ad73..674344eb8e2f4 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -59,9 +59,6 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_qp_info *qp_info,
+ struct trace_event_raw_ib_mad_send_template *entry)
+ {
+- u16 pkey;
+- struct ib_device *dev = qp_info->port_priv->device;
+- u32 pnum = qp_info->port_priv->port_num;
+ struct ib_ud_wr *wr = &mad_send_wr->send_wr;
+ struct rdma_ah_attr attr = {};
+
+@@ -69,8 +66,6 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
+
+ /* These are common */
+ entry->sl = attr.sl;
+- ib_query_pkey(dev, pnum, wr->pkey_index, &pkey);
+- entry->pkey = pkey;
+ entry->rqpn = wr->remote_qpn;
+ entry->rqkey = wr->remote_qkey;
+ entry->dlid = rdma_ah_get_dlid(&attr);
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index 12dc97067ed2b..222733a83ddb7 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -513,7 +513,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
+
+ /* In create_qp() port is not set yet */
+ if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port))
+- return -EINVAL;
++ return -EMSGSIZE;
+
+ ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num);
+ if (ret)
+@@ -552,7 +552,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_cm_id *cm_id = &id_priv->id;
+
+ if (port && port != cm_id->port_num)
+- return 0;
++ return -EAGAIN;
+
+ if (cm_id->port_num &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
+@@ -894,6 +894,8 @@ static int fill_stat_counter_qps(struct sk_buff *msg,
+ int ret = 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
++ if (!table_attr)
++ return -EMSGSIZE;
+
+ rt = &counter->device->res[RDMA_RESTRACK_QP];
+ xa_lock(&rt->xa);
+diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
+index 1f935d9f61785..01a499a8b88db 100644
+--- a/drivers/infiniband/core/restrack.c
++++ b/drivers/infiniband/core/restrack.c
+@@ -343,8 +343,6 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
+ rt = &dev->res[res->type];
+
+ old = xa_erase(&rt->xa, res->id);
+- if (res->type == RDMA_RESTRACK_MR)
+- return;
+ WARN_ON(old != res);
+
+ out:
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index 84c53bd2a52db..ee59d73915689 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -1213,6 +1213,9 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ p->port_num = port_num;
+ kobject_init(&p->kobj, &port_type);
+
++ if (device->port_data && is_full_dev)
++ device->port_data[port_num].sysfs = p;
++
+ cur_group = p->groups_list;
+ ret = alloc_port_table_group("gids", &p->groups[0], p->attrs_list,
+ attr->gid_tbl_len, show_port_gid);
+@@ -1258,9 +1261,6 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ }
+
+ list_add_tail(&p->kobj.entry, &coredev->port_list);
+- if (device->port_data && is_full_dev)
+- device->port_data[port_num].sysfs = p;
+-
+ return p;
+
+ err_groups:
+@@ -1268,6 +1268,8 @@ err_groups:
+ err_del:
+ kobject_del(&p->kobj);
+ err_put:
++ if (device->port_data && is_full_dev)
++ device->port_data[port_num].sysfs = NULL;
+ kobject_put(&p->kobj);
+ return ERR_PTR(ret);
+ }
+@@ -1276,14 +1278,17 @@ static void destroy_port(struct ib_core_device *coredev, struct ib_port *port)
+ {
+ bool is_full_dev = &port->ibdev->coredev == coredev;
+
+- if (port->ibdev->port_data &&
+- port->ibdev->port_data[port->port_num].sysfs == port)
+- port->ibdev->port_data[port->port_num].sysfs = NULL;
+ list_del(&port->kobj.entry);
+ if (is_full_dev)
+ sysfs_remove_groups(&port->kobj, port->ibdev->ops.port_groups);
++
+ sysfs_remove_groups(&port->kobj, port->groups_list);
+ kobject_del(&port->kobj);
++
++ if (port->ibdev->port_data &&
++ port->ibdev->port_data[port->port_num].sysfs == port)
++ port->ibdev->port_data[port->port_num].sysfs = NULL;
++
+ kobject_put(&port->kobj);
+ }
+
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
+index 877f8e84a672a..77ee77d4000fb 100644
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -177,6 +177,8 @@ out:
+ for (node = 0; node < node_affinity.num_possible_nodes; node++)
+ hfi1_per_node_cntr[node] = 1;
+
++ pci_dev_put(dev);
++
+ return 0;
+ }
+
+diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
+index 1d77514ebbee0..0c0cef5b1e0e5 100644
+--- a/drivers/infiniband/hw/hfi1/firmware.c
++++ b/drivers/infiniband/hw/hfi1/firmware.c
+@@ -1743,6 +1743,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+
+ if (!dd->platform_config.data) {
+ dd_dev_err(dd, "%s: Missing config file\n", __func__);
++ ret = -EINVAL;
+ goto bail;
+ }
+ ptr = (u32 *)dd->platform_config.data;
+@@ -1751,6 +1752,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ ptr++;
+ if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
+ dd_dev_err(dd, "%s: Bad config file\n", __func__);
++ ret = -EINVAL;
+ goto bail;
+ }
+
+@@ -1774,6 +1776,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ if (file_length > dd->platform_config.size) {
+ dd_dev_info(dd, "%s:File claims to be larger than read size\n",
+ __func__);
++ ret = -EINVAL;
+ goto bail;
+ } else if (file_length < dd->platform_config.size) {
+ dd_dev_info(dd,
+@@ -1794,6 +1797,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
+ __func__, (ptr - (u32 *)
+ dd->platform_config.data));
++ ret = -EINVAL;
+ goto bail;
+ }
+
+@@ -1837,6 +1841,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ __func__, table_type,
+ (ptr - (u32 *)
+ dd->platform_config.data));
++ ret = -EINVAL;
+ goto bail; /* We don't trust this file now */
+ }
+ pcfgcache->config_tables[table_type].table = ptr;
+@@ -1856,6 +1861,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ __func__, table_type,
+ (ptr -
+ (u32 *)dd->platform_config.data));
++ ret = -EINVAL;
+ goto bail; /* We don't trust this file now */
+ }
+ pcfgcache->config_tables[table_type].table_metadata =
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 723e55a7de8d9..f701cc86896b3 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -202,6 +202,7 @@ struct hns_roce_ucontext {
+ struct list_head page_list;
+ struct mutex page_mutex;
+ struct hns_user_mmap_entry *db_mmap_entry;
++ u32 config;
+ };
+
+ struct hns_roce_pd {
+@@ -334,6 +335,7 @@ struct hns_roce_wq {
+ u32 head;
+ u32 tail;
+ void __iomem *db_reg;
++ u32 ext_sge_cnt;
+ };
+
+ struct hns_roce_sge {
+@@ -635,6 +637,7 @@ struct hns_roce_qp {
+ struct list_head rq_node; /* all recv qps are on a list */
+ struct list_head sq_node; /* all send qps are on a list */
+ struct hns_user_mmap_entry *dwqe_mmap_entry;
++ u32 config;
+ };
+
+ struct hns_roce_ib_iboe {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 1435fe2ea176f..b2421883993b1 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -192,7 +192,6 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
+ unsigned int *sge_idx, u32 msg_len)
+ {
+ struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
+- unsigned int ext_sge_sz = qp->sq.max_gs * HNS_ROCE_SGE_SIZE;
+ unsigned int left_len_in_pg;
+ unsigned int idx = *sge_idx;
+ unsigned int i = 0;
+@@ -200,7 +199,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
+ void *addr;
+ void *dseg;
+
+- if (msg_len > ext_sge_sz) {
++ if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) {
+ ibdev_err(ibdev,
+ "no enough extended sge space for inline data.\n");
+ return -EINVAL;
+@@ -1274,6 +1273,30 @@ static void update_cmdq_status(struct hns_roce_dev *hr_dev)
+ hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR;
+ }
+
++static int hns_roce_cmd_err_convert_errno(u16 desc_ret)
++{
++ struct hns_roce_cmd_errcode errcode_table[] = {
++ {CMD_EXEC_SUCCESS, 0},
++ {CMD_NO_AUTH, -EPERM},
++ {CMD_NOT_EXIST, -EOPNOTSUPP},
++ {CMD_CRQ_FULL, -EXFULL},
++ {CMD_NEXT_ERR, -ENOSR},
++ {CMD_NOT_EXEC, -ENOTBLK},
++ {CMD_PARA_ERR, -EINVAL},
++ {CMD_RESULT_ERR, -ERANGE},
++ {CMD_TIMEOUT, -ETIME},
++ {CMD_HILINK_ERR, -ENOLINK},
++ {CMD_INFO_ILLEGAL, -ENXIO},
++ {CMD_INVALID, -EBADR},
++ };
++ u16 i;
++
++ for (i = 0; i < ARRAY_SIZE(errcode_table); i++)
++ if (desc_ret == errcode_table[i].return_status)
++ return errcode_table[i].errno;
++ return -EIO;
++}
++
+ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+ {
+@@ -1319,7 +1342,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ dev_err_ratelimited(hr_dev->dev,
+ "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
+ desc->opcode, desc_ret);
+- ret = -EIO;
++ ret = hns_roce_cmd_err_convert_errno(desc_ret);
+ }
+ } else {
+ /* FW/HW reset or incorrect number of desc */
+@@ -2024,13 +2047,14 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
+
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
+ HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
+- HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
++ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
+
+ caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
+- HNS_ROCE_CAP_FLAG_DIRECT_WQE;
++ HNS_ROCE_CAP_FLAG_DIRECT_WQE |
++ HNS_ROCE_CAP_FLAG_XRC;
+ caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
+ } else {
+ caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
+@@ -2342,6 +2366,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
+ caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM);
+ caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM);
+
++ if (!(caps->page_size_cap & PAGE_SIZE))
++ caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
++
+ return 0;
+ }
+
+@@ -2631,31 +2658,124 @@ static void free_dip_list(struct hns_roce_dev *hr_dev)
+ spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
+ }
+
+-static void free_mr_exit(struct hns_roce_dev *hr_dev)
++static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_v2_priv *priv = hr_dev->priv;
++ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
++ struct ib_device *ibdev = &hr_dev->ib_dev;
++ struct hns_roce_pd *hr_pd;
++ struct ib_pd *pd;
++
++ hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL);
++ if (ZERO_OR_NULL_PTR(hr_pd))
++ return NULL;
++ pd = &hr_pd->ibpd;
++ pd->device = ibdev;
++
++ if (hns_roce_alloc_pd(pd, NULL)) {
++ ibdev_err(ibdev, "failed to create pd for free mr.\n");
++ kfree(hr_pd);
++ return NULL;
++ }
++ free_mr->rsv_pd = to_hr_pd(pd);
++ free_mr->rsv_pd->ibpd.device = &hr_dev->ib_dev;
++ free_mr->rsv_pd->ibpd.uobject = NULL;
++ free_mr->rsv_pd->ibpd.__internal_mr = NULL;
++ atomic_set(&free_mr->rsv_pd->ibpd.usecnt, 0);
++
++ return pd;
++}
++
++static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev)
+ {
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
++ struct ib_device *ibdev = &hr_dev->ib_dev;
++ struct ib_cq_init_attr cq_init_attr = {};
++ struct hns_roce_cq *hr_cq;
++ struct ib_cq *cq;
++
++ cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
++
++ hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
++ if (ZERO_OR_NULL_PTR(hr_cq))
++ return NULL;
++
++ cq = &hr_cq->ib_cq;
++ cq->device = ibdev;
++
++ if (hns_roce_create_cq(cq, &cq_init_attr, NULL)) {
++ ibdev_err(ibdev, "failed to create cq for free mr.\n");
++ kfree(hr_cq);
++ return NULL;
++ }
++ free_mr->rsv_cq = to_hr_cq(cq);
++ free_mr->rsv_cq->ib_cq.device = &hr_dev->ib_dev;
++ free_mr->rsv_cq->ib_cq.uobject = NULL;
++ free_mr->rsv_cq->ib_cq.comp_handler = NULL;
++ free_mr->rsv_cq->ib_cq.event_handler = NULL;
++ free_mr->rsv_cq->ib_cq.cq_context = NULL;
++ atomic_set(&free_mr->rsv_cq->ib_cq.usecnt, 0);
++
++ return cq;
++}
++
++static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq,
++ struct ib_qp_init_attr *init_attr, int i)
++{
++ struct hns_roce_v2_priv *priv = hr_dev->priv;
++ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
++ struct ib_device *ibdev = &hr_dev->ib_dev;
++ struct hns_roce_qp *hr_qp;
++ struct ib_qp *qp;
+ int ret;
++
++ hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
++ if (ZERO_OR_NULL_PTR(hr_qp))
++ return -ENOMEM;
++
++ qp = &hr_qp->ibqp;
++ qp->device = ibdev;
++
++ ret = hns_roce_create_qp(qp, init_attr, NULL);
++ if (ret) {
++ ibdev_err(ibdev, "failed to create qp for free mr.\n");
++ kfree(hr_qp);
++ return ret;
++ }
++
++ free_mr->rsv_qp[i] = hr_qp;
++ free_mr->rsv_qp[i]->ibqp.recv_cq = cq;
++ free_mr->rsv_qp[i]->ibqp.send_cq = cq;
++
++ return 0;
++}
++
++static void free_mr_exit(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_v2_priv *priv = hr_dev->priv;
++ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
++ struct ib_qp *qp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ if (free_mr->rsv_qp[i]) {
+- ret = ib_destroy_qp(free_mr->rsv_qp[i]);
+- if (ret)
+- ibdev_err(&hr_dev->ib_dev,
+- "failed to destroy qp in free mr.\n");
+-
++ qp = &free_mr->rsv_qp[i]->ibqp;
++ hns_roce_v2_destroy_qp(qp, NULL);
++ kfree(free_mr->rsv_qp[i]);
+ free_mr->rsv_qp[i] = NULL;
+ }
+ }
+
+ if (free_mr->rsv_cq) {
+- ib_destroy_cq(free_mr->rsv_cq);
++ hns_roce_destroy_cq(&free_mr->rsv_cq->ib_cq, NULL);
++ kfree(free_mr->rsv_cq);
+ free_mr->rsv_cq = NULL;
+ }
+
+ if (free_mr->rsv_pd) {
+- ib_dealloc_pd(free_mr->rsv_pd);
++ hns_roce_dealloc_pd(&free_mr->rsv_pd->ibpd, NULL);
++ kfree(free_mr->rsv_pd);
+ free_mr->rsv_pd = NULL;
+ }
+ }
+@@ -2664,55 +2784,46 @@ static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
+ {
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+- struct ib_device *ibdev = &hr_dev->ib_dev;
+- struct ib_cq_init_attr cq_init_attr = {};
+ struct ib_qp_init_attr qp_init_attr = {};
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+- struct ib_qp *qp;
+ int ret;
+ int i;
+
+- pd = ib_alloc_pd(ibdev, 0);
+- if (IS_ERR(pd)) {
+- ibdev_err(ibdev, "failed to create pd for free mr.\n");
+- return PTR_ERR(pd);
+- }
+- free_mr->rsv_pd = pd;
++ pd = free_mr_init_pd(hr_dev);
++ if (!pd)
++ return -ENOMEM;
+
+- cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
+- cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_init_attr);
+- if (IS_ERR(cq)) {
+- ibdev_err(ibdev, "failed to create cq for free mr.\n");
+- ret = PTR_ERR(cq);
+- goto create_failed;
++ cq = free_mr_init_cq(hr_dev);
++ if (!cq) {
++ ret = -ENOMEM;
++ goto create_failed_cq;
+ }
+- free_mr->rsv_cq = cq;
+
+ qp_init_attr.qp_type = IB_QPT_RC;
+ qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+- qp_init_attr.send_cq = free_mr->rsv_cq;
+- qp_init_attr.recv_cq = free_mr->rsv_cq;
++ qp_init_attr.send_cq = cq;
++ qp_init_attr.recv_cq = cq;
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM;
+ qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM;
+ qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM;
+ qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM;
+
+- qp = ib_create_qp(free_mr->rsv_pd, &qp_init_attr);
+- if (IS_ERR(qp)) {
+- ibdev_err(ibdev, "failed to create qp for free mr.\n");
+- ret = PTR_ERR(qp);
+- goto create_failed;
+- }
+-
+- free_mr->rsv_qp[i] = qp;
++ ret = free_mr_init_qp(hr_dev, cq, &qp_init_attr, i);
++ if (ret)
++ goto create_failed_qp;
+ }
+
+ return 0;
+
+-create_failed:
+- free_mr_exit(hr_dev);
++create_failed_qp:
++ hns_roce_destroy_cq(cq, NULL);
++ kfree(cq);
++
++create_failed_cq:
++ hns_roce_dealloc_pd(pd, NULL);
++ kfree(pd);
+
+ return ret;
+ }
+@@ -2728,14 +2839,17 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
+ int mask;
+ int ret;
+
+- hr_qp = to_hr_qp(free_mr->rsv_qp[sl_num]);
++ hr_qp = to_hr_qp(&free_mr->rsv_qp[sl_num]->ibqp);
+ hr_qp->free_mr_en = 1;
++ hr_qp->ibqp.device = ibdev;
++ hr_qp->ibqp.qp_type = IB_QPT_RC;
+
+ mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS;
+ attr->qp_state = IB_QPS_INIT;
+ attr->port_num = 1;
+ attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
+- ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
++ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
++ IB_QPS_INIT);
+ if (ret) {
+ ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
+ ret);
+@@ -2756,7 +2870,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
+
+ rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
+
+- ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
++ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
++ IB_QPS_RTR);
+ hr_dev->loop_idc = loopback;
+ if (ret) {
+ ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
+@@ -2770,7 +2885,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
+ attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN;
+ attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
+ attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
+- ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
++ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR,
++ IB_QPS_RTS);
+ if (ret)
+ ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
+ ret);
+@@ -3186,7 +3302,8 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+ int i, count;
+
+ count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+- ARRAY_SIZE(pages), &pbl_ba);
++ min_t(int, ARRAY_SIZE(pages), mr->npages),
++ &pbl_ba);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
+ count);
+@@ -3414,7 +3531,7 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
+ mutex_lock(&free_mr->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+- hr_qp = to_hr_qp(free_mr->rsv_qp[i]);
++ hr_qp = free_mr->rsv_qp[i];
+
+ ret = free_mr_post_send_lp_wqe(hr_qp);
+ if (ret) {
+@@ -3429,7 +3546,7 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
+
+ end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies;
+ while (cqe_cnt) {
+- npolled = hns_roce_v2_poll_cq(free_mr->rsv_cq, cqe_cnt, wc);
++ npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc);
+ if (npolled < 0) {
+ ibdev_err(ibdev,
+ "failed to poll cqe for free mr, remain %d cqe.\n",
+@@ -5375,6 +5492,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+
+ rdma_ah_set_sl(&qp_attr->ah_attr,
+ hr_reg_read(&context, QPC_SL));
++ rdma_ah_set_port_num(&qp_attr->ah_attr, hr_qp->port + 1);
++ rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
+ grh->flow_label = hr_reg_read(&context, QPC_FL);
+ grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX);
+ grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT);
+@@ -5468,7 +5587,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ return ret;
+ }
+
+-static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
++int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index c7bf2d52c1cdb..b1b3e1e0b84e5 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -272,6 +272,11 @@ enum hns_roce_cmd_return_status {
+ CMD_OTHER_ERR = 0xff
+ };
+
++struct hns_roce_cmd_errcode {
++ enum hns_roce_cmd_return_status return_status;
++ int errno;
++};
++
+ enum hns_roce_sgid_type {
+ GID_TYPE_FLAG_ROCE_V1 = 0,
+ GID_TYPE_FLAG_ROCE_V2_IPV4,
+@@ -1327,9 +1332,9 @@ struct hns_roce_link_table {
+ #define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2)
+
+ struct hns_roce_v2_free_mr {
+- struct ib_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM];
+- struct ib_cq *rsv_cq;
+- struct ib_pd *rsv_pd;
++ struct hns_roce_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM];
++ struct hns_roce_cq *rsv_cq;
++ struct hns_roce_pd *rsv_pd;
+ struct mutex mutex;
+ };
+
+@@ -1459,6 +1464,8 @@ struct hns_roce_sccc_clr_done {
+ __le32 rsv[5];
+ };
+
++int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
++
+ static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
+ void __iomem *dest)
+ {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index dcf89689a4c62..8ba68ac12388d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -354,10 +354,11 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
+ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
+ {
+- int ret;
+ struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
+- struct hns_roce_ib_alloc_ucontext_resp resp = {};
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
++ struct hns_roce_ib_alloc_ucontext_resp resp = {};
++ struct hns_roce_ib_alloc_ucontext ucmd = {};
++ int ret;
+
+ if (!hr_dev->active)
+ return -EAGAIN;
+@@ -365,6 +366,19 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
+ resp.qp_tab_size = hr_dev->caps.num_qps;
+ resp.srq_tab_size = hr_dev->caps.num_srqs;
+
++ ret = ib_copy_from_udata(&ucmd, udata,
++ min(udata->inlen, sizeof(ucmd)));
++ if (ret)
++ return ret;
++
++ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
++ context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS;
++
++ if (context->config & HNS_ROCE_EXSGE_FLAGS) {
++ resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS;
++ resp.max_inline_data = hr_dev->caps.max_sq_inline;
++ }
++
+ ret = hns_roce_uar_alloc(hr_dev, &context->uar);
+ if (ret)
+ goto error_fail_uar_alloc;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 845ac7d3831f4..37a5cf62f88b4 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -392,10 +392,10 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+
+ return &mr->ibmr;
+
+-err_key:
+- free_mr_key(hr_dev, mr);
+ err_pbl:
+ free_mr_pbl(hr_dev, mr);
++err_key:
++ free_mr_key(hr_dev, mr);
+ err_free:
+ kfree(mr);
+ return ERR_PTR(ret);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index f0bd82a18069a..0ae335fb205ca 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -476,38 +476,109 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ return 0;
+ }
+
+-static u32 get_wqe_ext_sge_cnt(struct hns_roce_qp *qp)
++static u32 get_max_inline_data(struct hns_roce_dev *hr_dev,
++ struct ib_qp_cap *cap)
+ {
+- /* GSI/UD QP only has extended sge */
+- if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD)
+- return qp->sq.max_gs;
+-
+- if (qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
+- return qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE;
++ if (cap->max_inline_data) {
++ cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data);
++ return min(cap->max_inline_data,
++ hr_dev->caps.max_sq_inline);
++ }
+
+ return 0;
+ }
+
++static void update_inline_data(struct hns_roce_qp *hr_qp,
++ struct ib_qp_cap *cap)
++{
++ u32 sge_num = hr_qp->sq.ext_sge_cnt;
++
++ if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) {
++ if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI ||
++ hr_qp->ibqp.qp_type == IB_QPT_UD))
++ sge_num = max((u32)HNS_ROCE_SGE_IN_WQE, sge_num);
++
++ cap->max_inline_data = max(cap->max_inline_data,
++ sge_num * HNS_ROCE_SGE_SIZE);
++ }
++
++ hr_qp->max_inline_data = cap->max_inline_data;
++}
++
++static u32 get_sge_num_from_max_send_sge(bool is_ud_or_gsi,
++ u32 max_send_sge)
++{
++ unsigned int std_sge_num;
++ unsigned int min_sge;
++
++ std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE;
++ min_sge = is_ud_or_gsi ? 1 : 0;
++ return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) :
++ min_sge;
++}
++
++static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi,
++ u32 max_inline_data)
++{
++ unsigned int inline_sge;
++
++ inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE;
++
++ /*
++ * if max_inline_data less than
++ * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE,
++ * In addition to ud's mode, no need to extend sge.
++ */
++ if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE)
++ inline_sge = 0;
++
++ return inline_sge;
++}
++
+ static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
+ struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap)
+ {
++ bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
++ hr_qp->ibqp.qp_type == IB_QPT_UD);
++ unsigned int std_sge_num;
++ u32 inline_ext_sge = 0;
++ u32 ext_wqe_sge_cnt;
+ u32 total_sge_cnt;
+- u32 wqe_sge_cnt;
++
++ cap->max_inline_data = get_max_inline_data(hr_dev, cap);
+
+ hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
++ std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE;
++ ext_wqe_sge_cnt = get_sge_num_from_max_send_sge(is_ud_or_gsi,
++ cap->max_send_sge);
+
+- hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
++ if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) {
++ inline_ext_sge = max(ext_wqe_sge_cnt,
++ get_sge_num_from_max_inl_data(is_ud_or_gsi,
++ cap->max_inline_data));
++ hr_qp->sq.ext_sge_cnt = inline_ext_sge ?
++ roundup_pow_of_two(inline_ext_sge) : 0;
+
+- wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
++ hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num));
++ hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg);
++
++ ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt;
++ } else {
++ hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
++ hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg);
++ hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs;
++ }
+
+ /* If the number of extended sge is not zero, they MUST use the
+ * space of HNS_HW_PAGE_SIZE at least.
+ */
+- if (wqe_sge_cnt) {
+- total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt);
++ if (ext_wqe_sge_cnt) {
++ total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * ext_wqe_sge_cnt);
+ hr_qp->sge.sge_cnt = max(total_sge_cnt,
+ (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE);
+ }
++
++ update_inline_data(hr_qp, cap);
+ }
+
+ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
+@@ -556,6 +627,7 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
+
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ hr_qp->sq.wqe_cnt = cnt;
++ cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+ }
+@@ -986,13 +1058,9 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+ {
+ struct ib_device *ibdev = &hr_dev->ib_dev;
++ struct hns_roce_ucontext *uctx;
+ int ret;
+
+- if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
+- init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
+-
+- hr_qp->max_inline_data = init_attr->cap.max_inline_data;
+-
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
+ else
+@@ -1015,12 +1083,17 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ return ret;
+ }
+
++ uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
++ ibucontext);
++ hr_qp->config = uctx->config;
+ ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
+ if (ret)
+ ibdev_err(ibdev,
+ "failed to set user SQ size, ret = %d.\n",
+ ret);
+ } else {
++ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
++ hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
+ ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
+ if (ret)
+ ibdev_err(ibdev,
+diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
+index a6e5d350a94ce..16183e894da77 100644
+--- a/drivers/infiniband/hw/irdma/uk.c
++++ b/drivers/infiniband/hw/irdma/uk.c
+@@ -566,21 +566,37 @@ static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
+
+ /**
+ * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
+- * @dest: pointer to wqe
+- * @src: pointer to inline data
+- * @len: length of inline data to copy
++ * @wqe: pointer to wqe
++ * @sge_list: table of pointers to inline data
++ * @num_sges: Total inline data length
+ * @polarity: compatibility parameter
+ */
+-static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
+- u8 polarity)
++static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
++ u32 num_sges, u8 polarity)
+ {
+- if (len <= 16) {
+- memcpy(dest, src, len);
+- } else {
+- memcpy(dest, src, 16);
+- src += 16;
+- dest = dest + 32;
+- memcpy(dest, src, len - 16);
++ u32 quanta_bytes_remaining = 16;
++ int i;
++
++ for (i = 0; i < num_sges; i++) {
++ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
++ u32 sge_len = sge_list[i].length;
++
++ while (sge_len) {
++ u32 bytes_copied;
++
++ bytes_copied = min(sge_len, quanta_bytes_remaining);
++ memcpy(wqe, cur_sge, bytes_copied);
++ wqe += bytes_copied;
++ cur_sge += bytes_copied;
++ quanta_bytes_remaining -= bytes_copied;
++ sge_len -= bytes_copied;
++
++ if (!quanta_bytes_remaining) {
++ /* Remaining inline bytes reside after hdr */
++ wqe += 16;
++ quanta_bytes_remaining = 32;
++ }
++ }
+ }
+ }
+
+@@ -612,35 +628,51 @@ static void irdma_set_mw_bind_wqe(__le64 *wqe,
+
+ /**
+ * irdma_copy_inline_data - Copy inline data to wqe
+- * @dest: pointer to wqe
+- * @src: pointer to inline data
+- * @len: length of inline data to copy
++ * @wqe: pointer to wqe
++ * @sge_list: table of pointers to inline data
++ * @num_sges: number of SGE's
+ * @polarity: polarity of wqe valid bit
+ */
+-static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
++static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
++ u32 num_sges, u8 polarity)
+ {
+ u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
+- u32 copy_size;
+-
+- dest += 8;
+- if (len <= 8) {
+- memcpy(dest, src, len);
+- return;
+- }
+-
+- *((u64 *)dest) = *((u64 *)src);
+- len -= 8;
+- src += 8;
+- dest += 24; /* point to additional 32 byte quanta */
+-
+- while (len) {
+- copy_size = len < 31 ? len : 31;
+- memcpy(dest, src, copy_size);
+- *(dest + 31) = inline_valid;
+- len -= copy_size;
+- dest += 32;
+- src += copy_size;
++ u32 quanta_bytes_remaining = 8;
++ bool first_quanta = true;
++ int i;
++
++ wqe += 8;
++
++ for (i = 0; i < num_sges; i++) {
++ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
++ u32 sge_len = sge_list[i].length;
++
++ while (sge_len) {
++ u32 bytes_copied;
++
++ bytes_copied = min(sge_len, quanta_bytes_remaining);
++ memcpy(wqe, cur_sge, bytes_copied);
++ wqe += bytes_copied;
++ cur_sge += bytes_copied;
++ quanta_bytes_remaining -= bytes_copied;
++ sge_len -= bytes_copied;
++
++ if (!quanta_bytes_remaining) {
++ quanta_bytes_remaining = 31;
++
++ /* Remaining inline bytes reside after hdr */
++ if (first_quanta) {
++ first_quanta = false;
++ wqe += 16;
++ } else {
++ *wqe = inline_valid;
++ wqe++;
++ }
++ }
++ }
+ }
++ if (!first_quanta && quanta_bytes_remaining < 31)
++ *(wqe + quanta_bytes_remaining) = inline_valid;
+ }
+
+ /**
+@@ -679,20 +711,27 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+ {
+ __le64 *wqe;
+- struct irdma_inline_rdma_write *op_info;
++ struct irdma_rdma_write *op_info;
+ u64 hdr = 0;
+ u32 wqe_idx;
+ bool read_fence = false;
++ u32 i, total_size = 0;
+ u16 quanta;
+
+ info->push_wqe = qp->push_db ? true : false;
+- op_info = &info->op.inline_rdma_write;
++ op_info = &info->op.rdma_write;
++
++ if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
++ return -EINVAL;
++
++ for (i = 0; i < op_info->num_lo_sges; i++)
++ total_size += op_info->lo_sg_list[i].length;
+
+- if (op_info->len > qp->max_inline_data)
++ if (unlikely(total_size > qp->max_inline_data))
+ return -EINVAL;
+
+- quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
+- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
++ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
++ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+@@ -705,7 +744,7 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+- FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
++ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
+@@ -719,7 +758,8 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+
+- qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
++ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
++ op_info->num_lo_sges,
+ qp->swqe_polarity);
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+@@ -745,20 +785,27 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+ {
+ __le64 *wqe;
+- struct irdma_post_inline_send *op_info;
++ struct irdma_post_send *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool read_fence = false;
++ u32 i, total_size = 0;
+ u16 quanta;
+
+ info->push_wqe = qp->push_db ? true : false;
+- op_info = &info->op.inline_send;
++ op_info = &info->op.send;
++
++ if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
++ return -EINVAL;
+
+- if (op_info->len > qp->max_inline_data)
++ for (i = 0; i < op_info->num_sges; i++)
++ total_size += op_info->sg_list[i].length;
++
++ if (unlikely(total_size > qp->max_inline_data))
+ return -EINVAL;
+
+- quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
+- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
++ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
++ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+@@ -773,7 +820,7 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+- FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
++ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+@@ -789,8 +836,8 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ if (info->imm_data_valid)
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+- qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
+- qp->swqe_polarity);
++ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
++ op_info->num_sges, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+@@ -1002,11 +1049,10 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ __le64 *cqe;
+ struct irdma_qp_uk *qp;
+ struct irdma_ring *pring = NULL;
+- u32 wqe_idx, q_type;
++ u32 wqe_idx;
+ int ret_code;
+ bool move_cq_head = true;
+ u8 polarity;
+- u8 op_type;
+ bool ext_valid;
+ __le64 *ext_cqe;
+
+@@ -1074,7 +1120,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ info->ud_vlan_valid = false;
+ }
+
+- q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
++ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
+ info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
+ info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+@@ -1113,8 +1159,9 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ }
+ wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
+ info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
++ info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+
+- if (q_type == IRDMA_CQE_QTYPE_RQ) {
++ if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
+ u32 array_idx;
+
+ array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
+@@ -1134,10 +1181,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+
+- if (info->imm_valid)
+- info->op_type = IRDMA_OP_TYPE_REC_IMM;
+- else
+- info->op_type = IRDMA_OP_TYPE_REC;
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
+@@ -1195,17 +1238,18 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ sw_wqe = qp->sq_base[tail].elem;
+ get_64bit_val(sw_wqe, 24,
+ &wqe_qword);
+- op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
+- info->op_type = op_type;
++ info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
++ wqe_qword);
+ IRDMA_RING_SET_TAIL(qp->sq_ring,
+ tail + qp->sq_wrtrk_array[tail].quanta);
+- if (op_type != IRDMAQP_OP_NOP) {
++ if (info->op_type != IRDMAQP_OP_NOP) {
+ info->wr_id = qp->sq_wrtrk_array[tail].wrid;
+ info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
+ break;
+ }
+ } while (1);
+- if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
++ if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
++ info->minor_err == FLUSH_PROT_ERR)
+ info->minor_err = FLUSH_MW_BIND_ERR;
+ qp->sq_flush_seen = true;
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
+diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
+index 2ef61923c9268..d0cdf609f5e06 100644
+--- a/drivers/infiniband/hw/irdma/user.h
++++ b/drivers/infiniband/hw/irdma/user.h
+@@ -173,14 +173,6 @@ struct irdma_post_send {
+ u32 ah_id;
+ };
+
+-struct irdma_post_inline_send {
+- void *data;
+- u32 len;
+- u32 qkey;
+- u32 dest_qp;
+- u32 ah_id;
+-};
+-
+ struct irdma_post_rq_info {
+ u64 wr_id;
+ struct ib_sge *sg_list;
+@@ -193,12 +185,6 @@ struct irdma_rdma_write {
+ struct ib_sge rem_addr;
+ };
+
+-struct irdma_inline_rdma_write {
+- void *data;
+- u32 len;
+- struct ib_sge rem_addr;
+-};
+-
+ struct irdma_rdma_read {
+ struct ib_sge *lo_sg_list;
+ u32 num_lo_sges;
+@@ -241,8 +227,6 @@ struct irdma_post_sq_info {
+ struct irdma_rdma_read rdma_read;
+ struct irdma_bind_window bind_window;
+ struct irdma_inv_local_stag inv_local_stag;
+- struct irdma_inline_rdma_write inline_rdma_write;
+- struct irdma_post_inline_send inline_send;
+ } op;
+ };
+
+@@ -261,6 +245,7 @@ struct irdma_cq_poll_info {
+ u16 ud_vlan;
+ u8 ud_smac[6];
+ u8 op_type;
++ u8 q_type;
+ bool stag_invalid_set:1; /* or L_R_Key set */
+ bool push_dropped:1;
+ bool error:1;
+@@ -291,7 +276,8 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ bool post_sq);
+
+ struct irdma_wqe_uk_ops {
+- void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
++ void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list,
++ u32 num_sges, u8 polarity);
+ u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
+ void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
+ u8 valid);
+diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
+index 8dfc9e154d733..445e69e864097 100644
+--- a/drivers/infiniband/hw/irdma/utils.c
++++ b/drivers/infiniband/hw/irdma/utils.c
+@@ -2591,6 +2591,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
+ sw_wqe = qp->sq_base[wqe_idx].elem;
+ get_64bit_val(sw_wqe, 24, &wqe_qword);
+ cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE);
++ cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
+ /* remove the SQ WR by moving SQ tail*/
+ IRDMA_RING_SET_TAIL(*sq_ring,
+ sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
+@@ -2629,6 +2630,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
+
+ cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
+ cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
++ cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
+ /* remove the RQ WR by moving RQ tail */
+ IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
+ ibdev_dbg(iwqp->iwrcq->ibcq.device,
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index a22afbb25bc58..f6973ea55eda7 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -63,36 +63,6 @@ static int irdma_query_device(struct ib_device *ibdev,
+ return 0;
+ }
+
+-/**
+- * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
+- * @link_speed: netdev phy link speed
+- * @active_speed: IB port speed
+- * @active_width: IB port width
+- */
+-static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed,
+- u8 *active_width)
+-{
+- if (link_speed <= SPEED_1000) {
+- *active_width = IB_WIDTH_1X;
+- *active_speed = IB_SPEED_SDR;
+- } else if (link_speed <= SPEED_10000) {
+- *active_width = IB_WIDTH_1X;
+- *active_speed = IB_SPEED_FDR10;
+- } else if (link_speed <= SPEED_20000) {
+- *active_width = IB_WIDTH_4X;
+- *active_speed = IB_SPEED_DDR;
+- } else if (link_speed <= SPEED_25000) {
+- *active_width = IB_WIDTH_1X;
+- *active_speed = IB_SPEED_EDR;
+- } else if (link_speed <= SPEED_40000) {
+- *active_width = IB_WIDTH_4X;
+- *active_speed = IB_SPEED_FDR10;
+- } else {
+- *active_width = IB_WIDTH_4X;
+- *active_speed = IB_SPEED_EDR;
+- }
+-}
+-
+ /**
+ * irdma_query_port - get port attributes
+ * @ibdev: device pointer from stack
+@@ -120,8 +90,9 @@ static int irdma_query_port(struct ib_device *ibdev, u32 port,
+ props->state = IB_PORT_DOWN;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+- irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
+- &props->active_width);
++
++ ib_get_eth_speed(ibdev, port, &props->active_speed,
++ &props->active_width);
+
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->gid_tbl_len = 32;
+@@ -1242,6 +1213,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ av->attrs = attr->ah_attr;
+ rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
++ av->net_type = rdma_gid_attr_network_type(sgid_attr);
+ if (av->net_type == RDMA_NETWORK_IPV6) {
+ __be32 *daddr =
+ av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
+@@ -2358,9 +2330,10 @@ static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
+ * @rf: RDMA PCI function
+ * @iwmr: mr pointer for this memory registration
+ * @use_pbles: flag if to use pble's
++ * @lvl_1_only: request only level 1 pble if true
+ */
+ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
+- bool use_pbles)
++ bool use_pbles, bool lvl_1_only)
+ {
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+@@ -2371,7 +2344,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
+
+ if (use_pbles) {
+ status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
+- false);
++ lvl_1_only);
+ if (status)
+ return status;
+
+@@ -2414,16 +2387,10 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
+ bool ret = true;
+
+ pg_size = iwmr->page_size;
+- err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
++ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true);
+ if (err)
+ return err;
+
+- if (use_pbles && palloc->level != PBLE_LEVEL_1) {
+- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+- iwpbl->pbl_allocated = false;
+- return -ENOMEM;
+- }
+-
+ if (use_pbles)
+ arr = palloc->level1.addr;
+
+@@ -2899,7 +2866,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ case IRDMA_MEMREG_TYPE_MEM:
+ use_pbles = (iwmr->page_cnt != 1);
+
+- err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
++ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+ if (err)
+ goto error;
+
+@@ -3165,30 +3132,20 @@ static int irdma_post_send(struct ib_qp *ibqp,
+ info.stag_to_inv = ib_wr->ex.invalidate_rkey;
+ }
+
+- if (ib_wr->send_flags & IB_SEND_INLINE) {
+- info.op.inline_send.data = (void *)(unsigned long)
+- ib_wr->sg_list[0].addr;
+- info.op.inline_send.len = ib_wr->sg_list[0].length;
+- if (iwqp->ibqp.qp_type == IB_QPT_UD ||
+- iwqp->ibqp.qp_type == IB_QPT_GSI) {
+- ah = to_iwah(ud_wr(ib_wr)->ah);
+- info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;
+- info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
+- info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
+- }
++ info.op.send.num_sges = ib_wr->num_sge;
++ info.op.send.sg_list = ib_wr->sg_list;
++ if (iwqp->ibqp.qp_type == IB_QPT_UD ||
++ iwqp->ibqp.qp_type == IB_QPT_GSI) {
++ ah = to_iwah(ud_wr(ib_wr)->ah);
++ info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
++ info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
++ info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
++ }
++
++ if (ib_wr->send_flags & IB_SEND_INLINE)
+ err = irdma_uk_inline_send(ukqp, &info, false);
+- } else {
+- info.op.send.num_sges = ib_wr->num_sge;
+- info.op.send.sg_list = ib_wr->sg_list;
+- if (iwqp->ibqp.qp_type == IB_QPT_UD ||
+- iwqp->ibqp.qp_type == IB_QPT_GSI) {
+- ah = to_iwah(ud_wr(ib_wr)->ah);
+- info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
+- info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
+- info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
+- }
++ else
+ err = irdma_uk_send(ukqp, &info, false);
+- }
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
+@@ -3205,22 +3162,15 @@ static int irdma_post_send(struct ib_qp *ibqp,
+ else
+ info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
+
+- if (ib_wr->send_flags & IB_SEND_INLINE) {
+- info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
+- info.op.inline_rdma_write.len =
+- ib_wr->sg_list[0].length;
+- info.op.inline_rdma_write.rem_addr.addr =
+- rdma_wr(ib_wr)->remote_addr;
+- info.op.inline_rdma_write.rem_addr.lkey =
+- rdma_wr(ib_wr)->rkey;
++ info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
++ info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
++ info.op.rdma_write.rem_addr.addr =
++ rdma_wr(ib_wr)->remote_addr;
++ info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
++ if (ib_wr->send_flags & IB_SEND_INLINE)
+ err = irdma_uk_inline_rdma_write(ukqp, &info, false);
+- } else {
+- info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
+- info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+- info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
+- info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
++ else
+ err = irdma_uk_rdma_write(ukqp, &info, false);
+- }
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ inv_stag = true;
+@@ -3380,7 +3330,6 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
+ static void irdma_process_cqe(struct ib_wc *entry,
+ struct irdma_cq_poll_info *cq_poll_info)
+ {
+- struct irdma_qp *iwqp;
+ struct irdma_sc_qp *qp;
+
+ entry->wc_flags = 0;
+@@ -3388,7 +3337,6 @@ static void irdma_process_cqe(struct ib_wc *entry,
+ entry->wr_id = cq_poll_info->wr_id;
+
+ qp = cq_poll_info->qp_handle;
+- iwqp = qp->qp_uk.back_qp;
+ entry->qp = qp->qp_uk.back_qp;
+
+ if (cq_poll_info->error) {
+@@ -3421,42 +3369,17 @@ static void irdma_process_cqe(struct ib_wc *entry,
+ }
+ }
+
+- switch (cq_poll_info->op_type) {
+- case IRDMA_OP_TYPE_RDMA_WRITE:
+- case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+- entry->opcode = IB_WC_RDMA_WRITE;
+- break;
+- case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
+- case IRDMA_OP_TYPE_RDMA_READ:
+- entry->opcode = IB_WC_RDMA_READ;
+- break;
+- case IRDMA_OP_TYPE_SEND_INV:
+- case IRDMA_OP_TYPE_SEND_SOL:
+- case IRDMA_OP_TYPE_SEND_SOL_INV:
+- case IRDMA_OP_TYPE_SEND:
+- entry->opcode = IB_WC_SEND;
+- break;
+- case IRDMA_OP_TYPE_FAST_REG_NSMR:
+- entry->opcode = IB_WC_REG_MR;
+- break;
+- case IRDMA_OP_TYPE_INV_STAG:
+- entry->opcode = IB_WC_LOCAL_INV;
+- break;
+- case IRDMA_OP_TYPE_REC_IMM:
+- case IRDMA_OP_TYPE_REC:
+- entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ?
+- IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
++ if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
++ set_ib_wc_op_sq(cq_poll_info, entry);
++ } else {
++ set_ib_wc_op_rq(cq_poll_info, entry,
++ qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
++ true : false);
+ if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
+ cq_poll_info->stag_invalid_set) {
+ entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
+ entry->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+- break;
+- default:
+- ibdev_err(&iwqp->iwdev->ibdev,
+- "Invalid opcode = %d in CQE\n", cq_poll_info->op_type);
+- entry->status = IB_WC_GENERAL_ERR;
+- return;
+ }
+
+ if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index 4309b7159f42c..a536e9fa85ebf 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -232,6 +232,59 @@ static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
+ return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
+ }
+
++static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
++ struct ib_wc *entry)
++{
++ switch (cq_poll_info->op_type) {
++ case IRDMA_OP_TYPE_RDMA_WRITE:
++ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
++ entry->opcode = IB_WC_RDMA_WRITE;
++ break;
++ case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
++ case IRDMA_OP_TYPE_RDMA_READ:
++ entry->opcode = IB_WC_RDMA_READ;
++ break;
++ case IRDMA_OP_TYPE_SEND_SOL:
++ case IRDMA_OP_TYPE_SEND_SOL_INV:
++ case IRDMA_OP_TYPE_SEND_INV:
++ case IRDMA_OP_TYPE_SEND:
++ entry->opcode = IB_WC_SEND;
++ break;
++ case IRDMA_OP_TYPE_FAST_REG_NSMR:
++ entry->opcode = IB_WC_REG_MR;
++ break;
++ case IRDMA_OP_TYPE_INV_STAG:
++ entry->opcode = IB_WC_LOCAL_INV;
++ break;
++ default:
++ entry->status = IB_WC_GENERAL_ERR;
++ }
++}
++
++static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
++ struct ib_wc *entry, bool send_imm_support)
++{
++ /**
++ * iWARP does not support sendImm, so the presence of Imm data
++ * must be WriteImm.
++ */
++ if (!send_imm_support) {
++ entry->opcode = cq_poll_info->imm_valid ?
++ IB_WC_RECV_RDMA_WITH_IMM :
++ IB_WC_RECV;
++ return;
++ }
++
++ switch (cq_poll_info->op_type) {
++ case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
++ case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
++ entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
++ break;
++ default:
++ entry->opcode = IB_WC_RECV;
++ }
++}
++
+ void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
+ int irdma_ib_register_device(struct irdma_device *iwdev);
+ void irdma_ib_unregister_device(struct irdma_device *iwdev);
+diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
+index 502e9ada99b30..80e2d631fdb24 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mr.c
++++ b/drivers/infiniband/sw/rxe/rxe_mr.c
+@@ -99,6 +99,7 @@ err2:
+ kfree(mr->map[i]);
+
+ kfree(mr->map);
++ mr->map = NULL;
+ err1:
+ return -ENOMEM;
+ }
+@@ -122,7 +123,6 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+ int num_buf;
+ void *vaddr;
+ int err;
+- int i;
+
+ umem = ib_umem_get(&rxe->ib_dev, start, length, access);
+ if (IS_ERR(umem)) {
+@@ -163,9 +163,8 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+ pr_warn("%s: Unable to get virtual address\n",
+ __func__);
+ err = -ENOMEM;
+- goto err_cleanup_map;
++ goto err_release_umem;
+ }
+-
+ buf->addr = (uintptr_t)vaddr;
+ buf->size = PAGE_SIZE;
+ num_buf++;
+@@ -182,10 +181,6 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+
+ return 0;
+
+-err_cleanup_map:
+- for (i = 0; i < mr->num_map; i++)
+- kfree(mr->map[i]);
+- kfree(mr->map);
+ err_release_umem:
+ ib_umem_release(umem);
+ err_out:
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index a62bab88415cb..e459fb542b83a 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -829,12 +829,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
+ if (qp->resp.mr)
+ rxe_put(qp->resp.mr);
+
+- if (qp_type(qp) == IB_QPT_RC)
+- sk_dst_reset(qp->sk->sk);
+-
+ free_rd_atomic_resources(qp);
+
+ if (qp->sk) {
++ if (qp_type(qp) == IB_QPT_RC)
++ sk_dst_reset(qp->sk->sk);
++
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
+ }
+diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
+index d68e37859e73b..403029de6b92d 100644
+--- a/drivers/infiniband/sw/siw/siw_cq.c
++++ b/drivers/infiniband/sw/siw/siw_cq.c
+@@ -56,8 +56,6 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
+ if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) {
+ memset(wc, 0, sizeof(*wc));
+ wc->wr_id = cqe->id;
+- wc->status = map_cqe_status[cqe->status].ib;
+- wc->opcode = map_wc_opcode[cqe->opcode];
+ wc->byte_len = cqe->bytes;
+
+ /*
+@@ -71,10 +69,32 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
+ wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ }
+ wc->qp = cqe->base_qp;
++ wc->opcode = map_wc_opcode[cqe->opcode];
++ wc->status = map_cqe_status[cqe->status].ib;
+ siw_dbg_cq(cq,
+ "idx %u, type %d, flags %2x, id 0x%pK\n",
+ cq->cq_get % cq->num_cqe, cqe->opcode,
+ cqe->flags, (void *)(uintptr_t)cqe->id);
++ } else {
++ /*
++ * A malicious user may set invalid opcode or
++ * status in the user mmapped CQE array.
++ * Sanity check and correct values in that case
++ * to avoid out-of-bounds access to global arrays
++ * for opcode and status mapping.
++ */
++ u8 opcode = cqe->opcode;
++ u16 status = cqe->status;
++
++ if (opcode >= SIW_NUM_OPCODES) {
++ opcode = 0;
++ status = SIW_WC_GENERAL_ERR;
++ } else if (status >= SIW_NUM_WC_STATUS) {
++ status = SIW_WC_GENERAL_ERR;
++ }
++ wc->opcode = map_wc_opcode[opcode];
++ wc->status = map_cqe_status[status].ib;
++
+ }
+ WRITE_ONCE(cqe->flags, 0);
+ cq->cq_get++;
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index 7d47b521070b1..05052b49107f2 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
+ dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
+
+ if (paddr)
+- return virt_to_page((void *)paddr);
++ return virt_to_page((void *)(uintptr_t)paddr);
+
+ return NULL;
+ }
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index 3e814cfb298cf..906fde1a2a0de 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -676,13 +676,45 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
+ static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+ {
+- struct siw_sqe sqe = {};
+ int rv = 0;
+
+ while (wr) {
+- sqe.id = wr->wr_id;
+- sqe.opcode = wr->opcode;
+- rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
++ struct siw_sqe sqe = {};
++
++ switch (wr->opcode) {
++ case IB_WR_RDMA_WRITE:
++ sqe.opcode = SIW_OP_WRITE;
++ break;
++ case IB_WR_RDMA_READ:
++ sqe.opcode = SIW_OP_READ;
++ break;
++ case IB_WR_RDMA_READ_WITH_INV:
++ sqe.opcode = SIW_OP_READ_LOCAL_INV;
++ break;
++ case IB_WR_SEND:
++ sqe.opcode = SIW_OP_SEND;
++ break;
++ case IB_WR_SEND_WITH_IMM:
++ sqe.opcode = SIW_OP_SEND_WITH_IMM;
++ break;
++ case IB_WR_SEND_WITH_INV:
++ sqe.opcode = SIW_OP_SEND_REMOTE_INV;
++ break;
++ case IB_WR_LOCAL_INV:
++ sqe.opcode = SIW_OP_INVAL_STAG;
++ break;
++ case IB_WR_REG_MR:
++ sqe.opcode = SIW_OP_REG_MR;
++ break;
++ default:
++ rv = -EINVAL;
++ break;
++ }
++ if (!rv) {
++ sqe.id = wr->wr_id;
++ rv = siw_sqe_complete(qp, &sqe, 0,
++ SIW_WC_WR_FLUSH_ERR);
++ }
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+index ea16ba5d8da6c..9ad8d98562752 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+@@ -41,6 +41,11 @@ static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
+ [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
+ };
+
++static unsigned int ipoib_get_max_num_queues(void)
++{
++ return min_t(unsigned int, num_possible_cpus(), 128);
++}
++
+ static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
+ {
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+@@ -172,6 +177,8 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+ .changelink = ipoib_changelink,
+ .get_size = ipoib_get_size,
+ .fill_info = ipoib_fill_info,
++ .get_num_rx_queues = ipoib_get_max_num_queues,
++ .get_num_tx_queues = ipoib_get_max_num_queues,
+ };
+
+ struct rtnl_link_ops *ipoib_get_link_ops(void)
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 1075c2ac8fe20..b4d6a4a5ae81e 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -3410,7 +3410,8 @@ static int srp_parse_options(struct net *net, const char *buf,
+ break;
+
+ case SRP_OPT_PKEY:
+- if (match_hex(args, &token)) {
++ ret = match_hex(args, &token);
++ if (ret) {
+ pr_warn("bad P_Key parameter '%s'\n", p);
+ goto out;
+ }
+@@ -3470,7 +3471,8 @@ static int srp_parse_options(struct net *net, const char *buf,
+ break;
+
+ case SRP_OPT_MAX_SECT:
+- if (match_int(args, &token)) {
++ ret = match_int(args, &token);
++ if (ret) {
+ pr_warn("bad max sect parameter '%s'\n", p);
+ goto out;
+ }
+@@ -3478,8 +3480,15 @@ static int srp_parse_options(struct net *net, const char *buf,
+ break;
+
+ case SRP_OPT_QUEUE_SIZE:
+- if (match_int(args, &token) || token < 1) {
++ ret = match_int(args, &token);
++ if (ret) {
++ pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n",
++ p, ret);
++ goto out;
++ }
++ if (token < 1) {
+ pr_warn("bad queue_size parameter '%s'\n", p);
++ ret = -EINVAL;
+ goto out;
+ }
+ target->scsi_host->can_queue = token;
+@@ -3490,25 +3499,40 @@ static int srp_parse_options(struct net *net, const char *buf,
+ break;
+
+ case SRP_OPT_MAX_CMD_PER_LUN:
+- if (match_int(args, &token) || token < 1) {
++ ret = match_int(args, &token);
++ if (ret) {
++ pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n",
++ p, ret);
++ goto out;
++ }
++ if (token < 1) {
+ pr_warn("bad max cmd_per_lun parameter '%s'\n",
+ p);
++ ret = -EINVAL;
+ goto out;
+ }
+ target->scsi_host->cmd_per_lun = token;
+ break;
+
+ case SRP_OPT_TARGET_CAN_QUEUE:
+- if (match_int(args, &token) || token < 1) {
++ ret = match_int(args, &token);
++ if (ret) {
++ pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n",
++ p, ret);
++ goto out;
++ }
++ if (token < 1) {
+ pr_warn("bad max target_can_queue parameter '%s'\n",
+ p);
++ ret = -EINVAL;
+ goto out;
+ }
+ target->target_can_queue = token;
+ break;
+
+ case SRP_OPT_IO_CLASS:
+- if (match_hex(args, &token)) {
++ ret = match_hex(args, &token);
++ if (ret) {
+ pr_warn("bad IO class parameter '%s'\n", p);
+ goto out;
+ }
+@@ -3517,6 +3541,7 @@ static int srp_parse_options(struct net *net, const char *buf,
+ pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
+ token, SRP_REV10_IB_IO_CLASS,
+ SRP_REV16A_IB_IO_CLASS);
++ ret = -EINVAL;
+ goto out;
+ }
+ target->io_class = token;
+@@ -3539,16 +3564,24 @@ static int srp_parse_options(struct net *net, const char *buf,
+ break;
+
+ case SRP_OPT_CMD_SG_ENTRIES:
+- if (match_int(args, &token) || token < 1 || token > 255) {
++ ret = match_int(args, &token);
++ if (ret) {
++ pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n",
++ p, ret);
++ goto out;
++ }
++ if (token < 1 || token > 255) {
+ pr_warn("bad max cmd_sg_entries parameter '%s'\n",
+ p);
++ ret = -EINVAL;
+ goto out;
+ }
+ target->cmd_sg_cnt = token;
+ break;
+
+ case SRP_OPT_ALLOW_EXT_SG:
+- if (match_int(args, &token)) {
++ ret = match_int(args, &token);
++ if (ret) {
+ pr_warn("bad allow_ext_sg parameter '%s'\n", p);
+ goto out;
+ }
+@@ -3556,43 +3589,77 @@ static int srp_parse_options(struct net *net, const char *buf,
+ break;
+
+ case SRP_OPT_SG_TABLESIZE:
+- if (match_int(args, &token) || token < 1 ||
+- token > SG_MAX_SEGMENTS) {
++ ret = match_int(args, &token);
++ if (ret) {
++ pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n",
++ p, ret);
++ goto out;
++ }
++ if (token < 1 || token > SG_MAX_SEGMENTS) {
+ pr_warn("bad max sg_tablesize parameter '%s'\n",
+ p);
++ ret = -EINVAL;
+ goto out;
+ }
+ target->sg_tablesize = token;
+ break;
+
+ case SRP_OPT_COMP_VECTOR:
+- if (match_int(args, &token) || token < 0) {
++ ret = match_int(args, &token);
++ if (ret) {
++ pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n",
++ p, ret);
++ goto out;
++ }
++ if (token < 0) {
+ pr_warn("bad comp_vector parameter '%s'\n", p);
++ ret = -EINVAL;
+ goto out;
+ }
+ target->comp_vector = token;
+ break;
+
+ case SRP_OPT_TL_RETRY_COUNT:
+- if (match_int(args, &token) || token < 2 || token > 7) {
++ ret = match_int(args, &token);
++ if (ret) {
++ pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n",
++ p, ret);
++ goto out;
++ }
++ if (token < 2 || token > 7) {
+ pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
+ p);
++ ret = -EINVAL;
+ goto out;
+ }
+ target->tl_retry_count = token;
+ break;
+
+ case SRP_OPT_MAX_IT_IU_SIZE:
+- if (match_int(args, &token) || token < 0) {
++ ret = match_int(args, &token);
++ if (ret) {
++ pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n",
++ p, ret);
++ goto out;
++ }
++ if (token < 0) {
+ pr_warn("bad maximum initiator to target IU size '%s'\n", p);
++ ret = -EINVAL;
+ goto out;
+ }
+ target->max_it_iu_size = token;
+ break;
+
+ case SRP_OPT_CH_COUNT:
+- if (match_int(args, &token) || token < 1) {
++ ret = match_int(args, &token);
++ if (ret) {
++ pr_warn("match_int() failed for channel count parameter '%s', Error %d\n",
++ p, ret);
++ goto out;
++ }
++ if (token < 1) {
+ pr_warn("bad channel count %s\n", p);
++ ret = -EINVAL;
+ goto out;
+ }
+ target->ch_count = token;
+@@ -3601,6 +3668,7 @@ static int srp_parse_options(struct net *net, const char *buf,
+ default:
+ pr_warn("unknown parameter or missing value '%s' in target creation request\n",
+ p);
++ ret = -EINVAL;
+ goto out;
+ }
+ }
+diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
+index 9dcf3f51f2dd9..04ca3d1c28162 100644
+--- a/drivers/input/joystick/Kconfig
++++ b/drivers/input/joystick/Kconfig
+@@ -46,6 +46,7 @@ config JOYSTICK_A3D
+ config JOYSTICK_ADC
+ tristate "Simple joystick connected over ADC"
+ depends on IIO
++ select IIO_BUFFER
+ select IIO_BUFFER_CB
+ help
+ Say Y here if you have a simple joystick connected over ADC.
+diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
+index 9f088900f863b..fa942651619d2 100644
+--- a/drivers/input/misc/Kconfig
++++ b/drivers/input/misc/Kconfig
+@@ -330,7 +330,7 @@ config INPUT_CPCAP_PWRBUTTON
+
+ config INPUT_WISTRON_BTNS
+ tristate "x86 Wistron laptop button interface"
+- depends on X86_32
++ depends on X86_32 && !UML
+ select INPUT_SPARSEKMAP
+ select NEW_LEDS
+ select LEDS_CLASS
+diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
+index ddb863bf63eec..e47ab6c1177f5 100644
+--- a/drivers/input/misc/iqs7222.c
++++ b/drivers/input/misc/iqs7222.c
+@@ -86,7 +86,9 @@ enum iqs7222_reg_key_id {
+ IQS7222_REG_KEY_TOUCH,
+ IQS7222_REG_KEY_DEBOUNCE,
+ IQS7222_REG_KEY_TAP,
++ IQS7222_REG_KEY_TAP_LEGACY,
+ IQS7222_REG_KEY_AXIAL,
++ IQS7222_REG_KEY_AXIAL_LEGACY,
+ IQS7222_REG_KEY_WHEEL,
+ IQS7222_REG_KEY_NO_WHEEL,
+ IQS7222_REG_KEY_RESERVED
+@@ -105,14 +107,14 @@ enum iqs7222_reg_grp_id {
+ IQS7222_NUM_REG_GRPS
+ };
+
+-static const char * const iqs7222_reg_grp_names[] = {
++static const char * const iqs7222_reg_grp_names[IQS7222_NUM_REG_GRPS] = {
+ [IQS7222_REG_GRP_CYCLE] = "cycle",
+ [IQS7222_REG_GRP_CHAN] = "channel",
+ [IQS7222_REG_GRP_SLDR] = "slider",
+ [IQS7222_REG_GRP_GPIO] = "gpio",
+ };
+
+-static const unsigned int iqs7222_max_cols[] = {
++static const unsigned int iqs7222_max_cols[IQS7222_NUM_REG_GRPS] = {
+ [IQS7222_REG_GRP_STAT] = IQS7222_MAX_COLS_STAT,
+ [IQS7222_REG_GRP_CYCLE] = IQS7222_MAX_COLS_CYCLE,
+ [IQS7222_REG_GRP_GLBL] = IQS7222_MAX_COLS_GLBL,
+@@ -202,10 +204,68 @@ struct iqs7222_dev_desc {
+ int allow_offset;
+ int event_offset;
+ int comms_offset;
++ bool legacy_gesture;
+ struct iqs7222_reg_grp_desc reg_grps[IQS7222_NUM_REG_GRPS];
+ };
+
+ static const struct iqs7222_dev_desc iqs7222_devs[] = {
++ {
++ .prod_num = IQS7222_PROD_NUM_A,
++ .fw_major = 1,
++ .fw_minor = 13,
++ .sldr_res = U8_MAX * 16,
++ .touch_link = 1768,
++ .allow_offset = 9,
++ .event_offset = 10,
++ .comms_offset = 12,
++ .reg_grps = {
++ [IQS7222_REG_GRP_STAT] = {
++ .base = IQS7222_SYS_STATUS,
++ .num_row = 1,
++ .num_col = 8,
++ },
++ [IQS7222_REG_GRP_CYCLE] = {
++ .base = 0x8000,
++ .num_row = 7,
++ .num_col = 3,
++ },
++ [IQS7222_REG_GRP_GLBL] = {
++ .base = 0x8700,
++ .num_row = 1,
++ .num_col = 3,
++ },
++ [IQS7222_REG_GRP_BTN] = {
++ .base = 0x9000,
++ .num_row = 12,
++ .num_col = 3,
++ },
++ [IQS7222_REG_GRP_CHAN] = {
++ .base = 0xA000,
++ .num_row = 12,
++ .num_col = 6,
++ },
++ [IQS7222_REG_GRP_FILT] = {
++ .base = 0xAC00,
++ .num_row = 1,
++ .num_col = 2,
++ },
++ [IQS7222_REG_GRP_SLDR] = {
++ .base = 0xB000,
++ .num_row = 2,
++ .num_col = 11,
++ },
++ [IQS7222_REG_GRP_GPIO] = {
++ .base = 0xC000,
++ .num_row = 1,
++ .num_col = 3,
++ },
++ [IQS7222_REG_GRP_SYS] = {
++ .base = IQS7222_SYS_SETUP,
++ .num_row = 1,
++ .num_col = 13,
++ },
++ },
++ },
+ {
+ .prod_num = IQS7222_PROD_NUM_A,
+ .fw_major = 1,
+@@ -215,6 +275,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ .allow_offset = 9,
+ .event_offset = 10,
+ .comms_offset = 12,
++ .legacy_gesture = true,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+@@ -874,6 +935,16 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
+ .reg_offset = 9,
+ .reg_shift = 8,
+ .reg_width = 8,
++ .val_pitch = 16,
++ .label = "maximum gesture time",
++ },
++ {
++ .name = "azoteq,gesture-max-ms",
++ .reg_grp = IQS7222_REG_GRP_SLDR,
++ .reg_key = IQS7222_REG_KEY_TAP_LEGACY,
++ .reg_offset = 9,
++ .reg_shift = 8,
++ .reg_width = 8,
+ .val_pitch = 4,
+ .label = "maximum gesture time",
+ },
+@@ -884,6 +955,16 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
+ .reg_offset = 9,
+ .reg_shift = 3,
+ .reg_width = 5,
++ .val_pitch = 16,
++ .label = "minimum gesture time",
++ },
++ {
++ .name = "azoteq,gesture-min-ms",
++ .reg_grp = IQS7222_REG_GRP_SLDR,
++ .reg_key = IQS7222_REG_KEY_TAP_LEGACY,
++ .reg_offset = 9,
++ .reg_shift = 3,
++ .reg_width = 5,
+ .val_pitch = 4,
+ .label = "minimum gesture time",
+ },
+@@ -897,6 +978,16 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
+ .val_pitch = 16,
+ .label = "gesture distance",
+ },
++ {
++ .name = "azoteq,gesture-dist",
++ .reg_grp = IQS7222_REG_GRP_SLDR,
++ .reg_key = IQS7222_REG_KEY_AXIAL_LEGACY,
++ .reg_offset = 10,
++ .reg_shift = 8,
++ .reg_width = 8,
++ .val_pitch = 16,
++ .label = "gesture distance",
++ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+@@ -904,6 +995,16 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
+ .reg_offset = 10,
+ .reg_shift = 0,
+ .reg_width = 8,
++ .val_pitch = 16,
++ .label = "maximum gesture time",
++ },
++ {
++ .name = "azoteq,gesture-max-ms",
++ .reg_grp = IQS7222_REG_GRP_SLDR,
++ .reg_key = IQS7222_REG_KEY_AXIAL_LEGACY,
++ .reg_offset = 10,
++ .reg_shift = 0,
++ .reg_width = 8,
+ .val_pitch = 4,
+ .label = "maximum gesture time",
+ },
+@@ -1567,56 +1668,17 @@ static int iqs7222_gpio_select(struct iqs7222_private *iqs7222,
+ }
+
+ static int iqs7222_parse_props(struct iqs7222_private *iqs7222,
+- struct fwnode_handle **child_node,
+- int child_index,
++ struct fwnode_handle *reg_grp_node,
++ int reg_grp_index,
+ enum iqs7222_reg_grp_id reg_grp,
+ enum iqs7222_reg_key_id reg_key)
+ {
+- u16 *setup = iqs7222_setup(iqs7222, reg_grp, child_index);
++ u16 *setup = iqs7222_setup(iqs7222, reg_grp, reg_grp_index);
+ struct i2c_client *client = iqs7222->client;
+- struct fwnode_handle *reg_grp_node;
+- char reg_grp_name[16];
+ int i;
+
+- switch (reg_grp) {
+- case IQS7222_REG_GRP_CYCLE:
+- case IQS7222_REG_GRP_CHAN:
+- case IQS7222_REG_GRP_SLDR:
+- case IQS7222_REG_GRP_GPIO:
+- case IQS7222_REG_GRP_BTN:
+- /*
+- * These groups derive a child node and return it to the caller
+- * for additional group-specific processing. In some cases, the
+- * child node may have already been derived.
+- */
+- reg_grp_node = *child_node;
+- if (reg_grp_node)
+- break;
+-
+- snprintf(reg_grp_name, sizeof(reg_grp_name), "%s-%d",
+- iqs7222_reg_grp_names[reg_grp], child_index);
+-
+- reg_grp_node = device_get_named_child_node(&client->dev,
+- reg_grp_name);
+- if (!reg_grp_node)
+- return 0;
+-
+- *child_node = reg_grp_node;
+- break;
+-
+- case IQS7222_REG_GRP_GLBL:
+- case IQS7222_REG_GRP_FILT:
+- case IQS7222_REG_GRP_SYS:
+- /*
+- * These groups are not organized beneath a child node, nor are
+- * they subject to any additional processing by the caller.
+- */
+- reg_grp_node = dev_fwnode(&client->dev);
+- break;
+-
+- default:
+- return -EINVAL;
+- }
++ if (!setup)
++ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(iqs7222_props); i++) {
+ const char *name = iqs7222_props[i].name;
+@@ -1686,11 +1748,66 @@ static int iqs7222_parse_props(struct iqs7222_private *iqs7222,
+ return 0;
+ }
+
+-static int iqs7222_parse_cycle(struct iqs7222_private *iqs7222, int cycle_index)
++static int iqs7222_parse_event(struct iqs7222_private *iqs7222,
++ struct fwnode_handle *event_node,
++ int reg_grp_index,
++ enum iqs7222_reg_grp_id reg_grp,
++ enum iqs7222_reg_key_id reg_key,
++ u16 event_enable, u16 event_link,
++ unsigned int *event_type,
++ unsigned int *event_code)
++{
++ struct i2c_client *client = iqs7222->client;
++ int error;
++
++ error = iqs7222_parse_props(iqs7222, event_node, reg_grp_index,
++ reg_grp, reg_key);
++ if (error)
++ return error;
++
++ error = iqs7222_gpio_select(iqs7222, event_node, event_enable,
++ event_link);
++ if (error)
++ return error;
++
++ error = fwnode_property_read_u32(event_node, "linux,code", event_code);
++ if (error == -EINVAL) {
++ return 0;
++ } else if (error) {
++ dev_err(&client->dev, "Failed to read %s code: %d\n",
++ fwnode_get_name(event_node), error);
++ return error;
++ }
++
++ if (!event_type) {
++ input_set_capability(iqs7222->keypad, EV_KEY, *event_code);
++ return 0;
++ }
++
++ error = fwnode_property_read_u32(event_node, "linux,input-type",
++ event_type);
++ if (error == -EINVAL) {
++ *event_type = EV_KEY;
++ } else if (error) {
++ dev_err(&client->dev, "Failed to read %s input type: %d\n",
++ fwnode_get_name(event_node), error);
++ return error;
++ } else if (*event_type != EV_KEY && *event_type != EV_SW) {
++ dev_err(&client->dev, "Invalid %s input type: %d\n",
++ fwnode_get_name(event_node), *event_type);
++ return -EINVAL;
++ }
++
++ input_set_capability(iqs7222->keypad, *event_type, *event_code);
++
++ return 0;
++}
++
++static int iqs7222_parse_cycle(struct iqs7222_private *iqs7222,
++ struct fwnode_handle *cycle_node, int cycle_index)
+ {
+ u16 *cycle_setup = iqs7222->cycle_setup[cycle_index];
+ struct i2c_client *client = iqs7222->client;
+- struct fwnode_handle *cycle_node = NULL;
+ unsigned int pins[9];
+ int error, count, i;
+
+@@ -1698,17 +1815,7 @@ static int iqs7222_parse_cycle(struct iqs7222_private *iqs7222, int cycle_index)
+ * Each channel shares a cycle with one other channel; the mapping of
+ * channels to cycles is fixed. Properties defined for a cycle impact
+ * both channels tied to the cycle.
+- */
+- error = iqs7222_parse_props(iqs7222, &cycle_node, cycle_index,
+- IQS7222_REG_GRP_CYCLE,
+- IQS7222_REG_KEY_NONE);
+- if (error)
+- return error;
+-
+- if (!cycle_node)
+- return 0;
+-
+- /*
++ *
+ * Unlike channels which are restricted to a select range of CRx pins
+ * based on channel number, any cycle can claim any of the device's 9
+ * CTx pins (CTx0-8).
+@@ -1750,11 +1857,11 @@ static int iqs7222_parse_cycle(struct iqs7222_private *iqs7222, int cycle_index)
+ return 0;
+ }
+
+-static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
++static int iqs7222_parse_chan(struct iqs7222_private *iqs7222,
++ struct fwnode_handle *chan_node, int chan_index)
+ {
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ struct i2c_client *client = iqs7222->client;
+- struct fwnode_handle *chan_node = NULL;
+ int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+ int ext_chan = rounddown(num_chan, 10);
+ int error, i;
+@@ -1762,15 +1869,6 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ u16 *sys_setup = iqs7222->sys_setup;
+ unsigned int val;
+
+- error = iqs7222_parse_props(iqs7222, &chan_node, chan_index,
+- IQS7222_REG_GRP_CHAN,
+- IQS7222_REG_KEY_NONE);
+- if (error)
+- return error;
+-
+- if (!chan_node)
+- return 0;
+-
+ if (dev_desc->allow_offset &&
+ fwnode_property_present(chan_node, "azoteq,ulp-allow"))
+ sys_setup[dev_desc->allow_offset] &= ~BIT(chan_index);
+@@ -1810,8 +1908,9 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ chan_setup[0] |= IQS7222_CHAN_SETUP_0_REF_MODE_FOLLOW;
+ chan_setup[4] = val * 42 + 1048;
+
+- if (!fwnode_property_read_u32(chan_node, "azoteq,ref-weight",
+- &val)) {
++ error = fwnode_property_read_u32(chan_node, "azoteq,ref-weight",
++ &val);
++ if (!error) {
+ if (val > U16_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s reference weight: %u\n",
+@@ -1820,6 +1919,11 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ }
+
+ chan_setup[5] = val;
++ } else if (error != -EINVAL) {
++ dev_err(&client->dev,
++ "Failed to read %s reference weight: %d\n",
++ fwnode_get_name(chan_node), error);
++ return error;
+ }
+
+ /*
+@@ -1892,21 +1996,10 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ if (!event_node)
+ continue;
+
+- error = iqs7222_parse_props(iqs7222, &event_node, chan_index,
+- IQS7222_REG_GRP_BTN,
+- iqs7222_kp_events[i].reg_key);
+- if (error)
+- return error;
+-
+- error = iqs7222_gpio_select(iqs7222, event_node,
+- BIT(chan_index),
+- dev_desc->touch_link - (i ? 0 : 2));
+- if (error)
+- return error;
+-
+- if (!fwnode_property_read_u32(event_node,
+- "azoteq,timeout-press-ms",
+- &val)) {
++ error = fwnode_property_read_u32(event_node,
++ "azoteq,timeout-press-ms",
++ &val);
++ if (!error) {
+ /*
+ * The IQS7222B employs a global pair of press timeout
+ * registers as opposed to channel-specific registers.
+@@ -1919,57 +2012,31 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ if (val > U8_MAX * 500) {
+ dev_err(&client->dev,
+ "Invalid %s press timeout: %u\n",
+- fwnode_get_name(chan_node), val);
++ fwnode_get_name(event_node), val);
++ fwnode_handle_put(event_node);
+ return -EINVAL;
+ }
+
+ *setup &= ~(U8_MAX << i * 8);
+ *setup |= (val / 500 << i * 8);
+- }
+-
+- error = fwnode_property_read_u32(event_node, "linux,code",
+- &val);
+- if (error) {
+- dev_err(&client->dev, "Failed to read %s code: %d\n",
+- fwnode_get_name(chan_node), error);
++ } else if (error != -EINVAL) {
++ dev_err(&client->dev,
++ "Failed to read %s press timeout: %d\n",
++ fwnode_get_name(event_node), error);
++ fwnode_handle_put(event_node);
+ return error;
+ }
+
+- iqs7222->kp_code[chan_index][i] = val;
+- iqs7222->kp_type[chan_index][i] = EV_KEY;
+-
+- if (fwnode_property_present(event_node, "linux,input-type")) {
+- error = fwnode_property_read_u32(event_node,
+- "linux,input-type",
+- &val);
+- if (error) {
+- dev_err(&client->dev,
+- "Failed to read %s input type: %d\n",
+- fwnode_get_name(chan_node), error);
+- return error;
+- }
+-
+- if (val != EV_KEY && val != EV_SW) {
+- dev_err(&client->dev,
+- "Invalid %s input type: %u\n",
+- fwnode_get_name(chan_node), val);
+- return -EINVAL;
+- }
+-
+- iqs7222->kp_type[chan_index][i] = val;
+- }
+-
+- /*
+- * Reference channels can opt out of event reporting by using
+- * KEY_RESERVED in place of a true key or switch code.
+- */
+- if (iqs7222->kp_type[chan_index][i] == EV_KEY &&
+- iqs7222->kp_code[chan_index][i] == KEY_RESERVED)
+- continue;
+-
+- input_set_capability(iqs7222->keypad,
+- iqs7222->kp_type[chan_index][i],
+- iqs7222->kp_code[chan_index][i]);
++ error = iqs7222_parse_event(iqs7222, event_node, chan_index,
++ IQS7222_REG_GRP_BTN,
++ iqs7222_kp_events[i].reg_key,
++ BIT(chan_index),
++ dev_desc->touch_link - (i ? 0 : 2),
++ &iqs7222->kp_type[chan_index][i],
++ &iqs7222->kp_code[chan_index][i]);
++ fwnode_handle_put(event_node);
++ if (error)
++ return error;
+
+ if (!dev_desc->event_offset)
+ continue;
+@@ -1981,16 +2048,16 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ * The following call handles a special pair of properties that apply
+ * to a channel node, but reside within the button (event) group.
+ */
+- return iqs7222_parse_props(iqs7222, &chan_node, chan_index,
++ return iqs7222_parse_props(iqs7222, chan_node, chan_index,
+ IQS7222_REG_GRP_BTN,
+ IQS7222_REG_KEY_DEBOUNCE);
+ }
+
+-static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
++static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222,
++ struct fwnode_handle *sldr_node, int sldr_index)
+ {
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ struct i2c_client *client = iqs7222->client;
+- struct fwnode_handle *sldr_node = NULL;
+ int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+ int ext_chan = rounddown(num_chan, 10);
+ int count, error, reg_offset, i;
+@@ -1998,15 +2065,6 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ u16 *sldr_setup = iqs7222->sldr_setup[sldr_index];
+ unsigned int chan_sel[4], val;
+
+- error = iqs7222_parse_props(iqs7222, &sldr_node, sldr_index,
+- IQS7222_REG_GRP_SLDR,
+- IQS7222_REG_KEY_NONE);
+- if (error)
+- return error;
+-
+- if (!sldr_node)
+- return 0;
+-
+ /*
+ * Each slider can be spread across 3 to 4 channels. It is possible to
+ * select only 2 channels, but doing so prevents the slider from using
+@@ -2065,8 +2123,9 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ if (fwnode_property_present(sldr_node, "azoteq,use-prox"))
+ sldr_setup[4 + reg_offset] -= 2;
+
+- if (!fwnode_property_read_u32(sldr_node, "azoteq,slider-size", &val)) {
+- if (!val || val > dev_desc->sldr_res) {
++ error = fwnode_property_read_u32(sldr_node, "azoteq,slider-size", &val);
++ if (!error) {
++ if (val > dev_desc->sldr_res) {
+ dev_err(&client->dev, "Invalid %s size: %u\n",
+ fwnode_get_name(sldr_node), val);
+ return -EINVAL;
+@@ -2079,9 +2138,21 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ sldr_setup[2] |= (val / 16 <<
+ IQS7222_SLDR_SETUP_2_RES_SHIFT);
+ }
++ } else if (error != -EINVAL) {
++ dev_err(&client->dev, "Failed to read %s size: %d\n",
++ fwnode_get_name(sldr_node), error);
++ return error;
+ }
+
+- if (!fwnode_property_read_u32(sldr_node, "azoteq,top-speed", &val)) {
++ if (!(reg_offset ? sldr_setup[3]
++ : sldr_setup[2] & IQS7222_SLDR_SETUP_2_RES_MASK)) {
++ dev_err(&client->dev, "Undefined %s size\n",
++ fwnode_get_name(sldr_node));
++ return -EINVAL;
++ }
++
++ error = fwnode_property_read_u32(sldr_node, "azoteq,top-speed", &val);
++ if (!error) {
+ if (val > (reg_offset ? U16_MAX : U8_MAX * 4)) {
+ dev_err(&client->dev, "Invalid %s top speed: %u\n",
+ fwnode_get_name(sldr_node), val);
+@@ -2094,9 +2165,14 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ sldr_setup[2] &= ~IQS7222_SLDR_SETUP_2_TOP_SPEED_MASK;
+ sldr_setup[2] |= (val / 4);
+ }
++ } else if (error != -EINVAL) {
++ dev_err(&client->dev, "Failed to read %s top speed: %d\n",
++ fwnode_get_name(sldr_node), error);
++ return error;
+ }
+
+- if (!fwnode_property_read_u32(sldr_node, "linux,axis", &val)) {
++ error = fwnode_property_read_u32(sldr_node, "linux,axis", &val);
++ if (!error) {
+ u16 sldr_max = sldr_setup[3] - 1;
+
+ if (!reg_offset) {
+@@ -2110,6 +2186,10 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+
+ input_set_abs_params(iqs7222->keypad, val, 0, sldr_max, 0, 0);
+ iqs7222->sl_axis[sldr_index] = val;
++ } else if (error != -EINVAL) {
++ dev_err(&client->dev, "Failed to read %s axis: %d\n",
++ fwnode_get_name(sldr_node), error);
++ return error;
+ }
+
+ if (dev_desc->wheel_enable) {
+@@ -2130,46 +2210,47 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++) {
+ const char *event_name = iqs7222_sl_events[i].name;
+ struct fwnode_handle *event_node;
++ enum iqs7222_reg_key_id reg_key;
+
+ event_node = fwnode_get_named_child_node(sldr_node, event_name);
+ if (!event_node)
+ continue;
+
+- error = iqs7222_parse_props(iqs7222, &event_node, sldr_index,
+- IQS7222_REG_GRP_SLDR,
+- reg_offset ?
+- IQS7222_REG_KEY_RESERVED :
+- iqs7222_sl_events[i].reg_key);
+- if (error)
+- return error;
++ /*
++ * Depending on the device, gestures are either offered using
++ * one of two timing resolutions, or are not supported at all.
++ */
++ if (reg_offset)
++ reg_key = IQS7222_REG_KEY_RESERVED;
++ else if (dev_desc->legacy_gesture &&
++ iqs7222_sl_events[i].reg_key == IQS7222_REG_KEY_TAP)
++ reg_key = IQS7222_REG_KEY_TAP_LEGACY;
++ else if (dev_desc->legacy_gesture &&
++ iqs7222_sl_events[i].reg_key == IQS7222_REG_KEY_AXIAL)
++ reg_key = IQS7222_REG_KEY_AXIAL_LEGACY;
++ else
++ reg_key = iqs7222_sl_events[i].reg_key;
+
+ /*
+ * The press/release event does not expose a direct GPIO link,
+ * but one can be emulated by tying each of the participating
+ * channels to the same GPIO.
+ */
+- error = iqs7222_gpio_select(iqs7222, event_node,
++ error = iqs7222_parse_event(iqs7222, event_node, sldr_index,
++ IQS7222_REG_GRP_SLDR, reg_key,
+ i ? iqs7222_sl_events[i].enable
+ : sldr_setup[3 + reg_offset],
+ i ? 1568 + sldr_index * 30
+- : sldr_setup[4 + reg_offset]);
++ : sldr_setup[4 + reg_offset],
++ NULL,
++ &iqs7222->sl_code[sldr_index][i]);
++ fwnode_handle_put(event_node);
+ if (error)
+ return error;
+
+ if (!reg_offset)
+ sldr_setup[9] |= iqs7222_sl_events[i].enable;
+
+- error = fwnode_property_read_u32(event_node, "linux,code",
+- &val);
+- if (error) {
+- dev_err(&client->dev, "Failed to read %s code: %d\n",
+- fwnode_get_name(sldr_node), error);
+- return error;
+- }
+-
+- iqs7222->sl_code[sldr_index][i] = val;
+- input_set_capability(iqs7222->keypad, EV_KEY, val);
+-
+ if (!dev_desc->event_offset)
+ continue;
+
+@@ -2190,19 +2271,63 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ * The following call handles a special pair of properties that shift
+ * to make room for a wheel enable control in the case of IQS7222C.
+ */
+- return iqs7222_parse_props(iqs7222, &sldr_node, sldr_index,
++ return iqs7222_parse_props(iqs7222, sldr_node, sldr_index,
+ IQS7222_REG_GRP_SLDR,
+ dev_desc->wheel_enable ?
+ IQS7222_REG_KEY_WHEEL :
+ IQS7222_REG_KEY_NO_WHEEL);
+ }
+
++static int (*iqs7222_parse_extra[IQS7222_NUM_REG_GRPS])
++ (struct iqs7222_private *iqs7222,
++ struct fwnode_handle *reg_grp_node,
++ int reg_grp_index) = {
++ [IQS7222_REG_GRP_CYCLE] = iqs7222_parse_cycle,
++ [IQS7222_REG_GRP_CHAN] = iqs7222_parse_chan,
++ [IQS7222_REG_GRP_SLDR] = iqs7222_parse_sldr,
++};
++
++static int iqs7222_parse_reg_grp(struct iqs7222_private *iqs7222,
++ enum iqs7222_reg_grp_id reg_grp,
++ int reg_grp_index)
++{
++ struct i2c_client *client = iqs7222->client;
++ struct fwnode_handle *reg_grp_node;
++ int error;
++
++ if (iqs7222_reg_grp_names[reg_grp]) {
++ char reg_grp_name[16];
++
++ snprintf(reg_grp_name, sizeof(reg_grp_name), "%s-%d",
++ iqs7222_reg_grp_names[reg_grp], reg_grp_index);
++
++ reg_grp_node = device_get_named_child_node(&client->dev,
++ reg_grp_name);
++ } else {
++ reg_grp_node = fwnode_handle_get(dev_fwnode(&client->dev));
++ }
++
++ if (!reg_grp_node)
++ return 0;
++
++ error = iqs7222_parse_props(iqs7222, reg_grp_node, reg_grp_index,
++ reg_grp, IQS7222_REG_KEY_NONE);
++
++ if (!error && iqs7222_parse_extra[reg_grp])
++ error = iqs7222_parse_extra[reg_grp](iqs7222, reg_grp_node,
++ reg_grp_index);
++
++ fwnode_handle_put(reg_grp_node);
++
++ return error;
++}
++
+ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
+ {
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ const struct iqs7222_reg_grp_desc *reg_grps = dev_desc->reg_grps;
+ u16 *sys_setup = iqs7222->sys_setup;
+- int error, i;
++ int error, i, j;
+
+ if (dev_desc->allow_offset)
+ sys_setup[dev_desc->allow_offset] = U16_MAX;
+@@ -2210,32 +2335,13 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
+ if (dev_desc->event_offset)
+ sys_setup[dev_desc->event_offset] = IQS7222_EVENT_MASK_ATI;
+
+- for (i = 0; i < reg_grps[IQS7222_REG_GRP_CYCLE].num_row; i++) {
+- error = iqs7222_parse_cycle(iqs7222, i);
+- if (error)
+- return error;
+- }
+-
+- error = iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_GLBL,
+- IQS7222_REG_KEY_NONE);
+- if (error)
+- return error;
+-
+ for (i = 0; i < reg_grps[IQS7222_REG_GRP_GPIO].num_row; i++) {
+- struct fwnode_handle *gpio_node = NULL;
+ u16 *gpio_setup = iqs7222->gpio_setup[i];
+- int j;
+
+ gpio_setup[0] &= ~IQS7222_GPIO_SETUP_0_GPIO_EN;
+ gpio_setup[1] = 0;
+ gpio_setup[2] = 0;
+
+- error = iqs7222_parse_props(iqs7222, &gpio_node, i,
+- IQS7222_REG_GRP_GPIO,
+- IQS7222_REG_KEY_NONE);
+- if (error)
+- return error;
+-
+ if (reg_grps[IQS7222_REG_GRP_GPIO].num_row == 1)
+ continue;
+
+@@ -2258,29 +2364,21 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
+ chan_setup[5] = 0;
+ }
+
+- for (i = 0; i < reg_grps[IQS7222_REG_GRP_CHAN].num_row; i++) {
+- error = iqs7222_parse_chan(iqs7222, i);
+- if (error)
+- return error;
+- }
+-
+- error = iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_FILT,
+- IQS7222_REG_KEY_NONE);
+- if (error)
+- return error;
+-
+ for (i = 0; i < reg_grps[IQS7222_REG_GRP_SLDR].num_row; i++) {
+ u16 *sldr_setup = iqs7222->sldr_setup[i];
+
+ sldr_setup[0] &= ~IQS7222_SLDR_SETUP_0_CHAN_CNT_MASK;
++ }
+
+- error = iqs7222_parse_sldr(iqs7222, i);
+- if (error)
+- return error;
++ for (i = 0; i < IQS7222_NUM_REG_GRPS; i++) {
++ for (j = 0; j < reg_grps[i].num_row; j++) {
++ error = iqs7222_parse_reg_grp(iqs7222, i, j);
++ if (error)
++ return error;
++ }
+ }
+
+- return iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_SYS,
+- IQS7222_REG_KEY_NONE);
++ return 0;
+ }
+
+ static int iqs7222_report(struct iqs7222_private *iqs7222)
+diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
+index 879a4d984c907..e1308e179dd6f 100644
+--- a/drivers/input/touchscreen/elants_i2c.c
++++ b/drivers/input/touchscreen/elants_i2c.c
+@@ -1329,14 +1329,12 @@ static int elants_i2c_power_on(struct elants_data *ts)
+ if (IS_ERR_OR_NULL(ts->reset_gpio))
+ return 0;
+
+- gpiod_set_value_cansleep(ts->reset_gpio, 1);
+-
+ error = regulator_enable(ts->vcc33);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "failed to enable vcc33 regulator: %d\n",
+ error);
+- goto release_reset_gpio;
++ return error;
+ }
+
+ error = regulator_enable(ts->vccio);
+@@ -1345,7 +1343,7 @@ static int elants_i2c_power_on(struct elants_data *ts)
+ "failed to enable vccio regulator: %d\n",
+ error);
+ regulator_disable(ts->vcc33);
+- goto release_reset_gpio;
++ return error;
+ }
+
+ /*
+@@ -1354,7 +1352,6 @@ static int elants_i2c_power_on(struct elants_data *ts)
+ */
+ udelay(ELAN_POWERON_DELAY_USEC);
+
+-release_reset_gpio:
+ gpiod_set_value_cansleep(ts->reset_gpio, 0);
+ if (error)
+ return error;
+@@ -1462,7 +1459,7 @@ static int elants_i2c_probe(struct i2c_client *client)
+ return error;
+ }
+
+- ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW);
++ ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+
+diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
+index 35cd448efdfbe..82d5e8a8c19ea 100644
+--- a/drivers/interconnect/qcom/sc7180.c
++++ b/drivers/interconnect/qcom/sc7180.c
+@@ -369,7 +369,7 @@ static const struct qcom_icc_desc sc7180_gem_noc = {
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+ };
+
+-static struct qcom_icc_bcm *mc_virt_bcms[] = {
++static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ };
+diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
+index 6a1f02c62dffc..9f7fab49a5a90 100644
+--- a/drivers/iommu/amd/iommu_v2.c
++++ b/drivers/iommu/amd/iommu_v2.c
+@@ -587,6 +587,7 @@ out_drop_state:
+ put_device_state(dev_state);
+
+ out:
++ pci_dev_put(pdev);
+ return ret;
+ }
+
+diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
+index 0d03f837a5d4e..7a1a413f75ab2 100644
+--- a/drivers/iommu/fsl_pamu.c
++++ b/drivers/iommu/fsl_pamu.c
+@@ -868,7 +868,7 @@ static int fsl_pamu_probe(struct platform_device *pdev)
+ ret = create_csd(ppaact_phys, mem_size, csd_port_id);
+ if (ret) {
+ dev_err(dev, "could not create coherence subdomain\n");
+- return ret;
++ goto error;
+ }
+ }
+
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 65a3b3d886dc0..959d895fc1dff 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -283,13 +283,23 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_device *iommu_dev;
+ struct iommu_group *group;
++ static DEFINE_MUTEX(iommu_probe_device_lock);
+ int ret;
+
+ if (!ops)
+ return -ENODEV;
+-
+- if (!dev_iommu_get(dev))
+- return -ENOMEM;
++ /*
++ * Serialise to avoid races between IOMMU drivers registering in
++ * parallel and/or the "replay" calls from ACPI/OF code via client
++ * driver probe. Once the latter have been cleaned up we should
++ * probably be able to use device_lock() here to minimise the scope,
++ * but for now enforcing a simple global ordering is fine.
++ */
++ mutex_lock(&iommu_probe_device_lock);
++ if (!dev_iommu_get(dev)) {
++ ret = -ENOMEM;
++ goto err_unlock;
++ }
+
+ if (!try_module_get(ops->owner)) {
+ ret = -EINVAL;
+@@ -309,11 +319,14 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ ret = PTR_ERR(group);
+ goto out_release;
+ }
+- iommu_group_put(group);
+
++ mutex_lock(&group->mutex);
+ if (group_list && !group->default_domain && list_empty(&group->entry))
+ list_add_tail(&group->entry, group_list);
++ mutex_unlock(&group->mutex);
++ iommu_group_put(group);
+
++ mutex_unlock(&iommu_probe_device_lock);
+ iommu_device_link(iommu_dev, dev);
+
+ return 0;
+@@ -328,6 +341,9 @@ out_module_put:
+ err_free:
+ dev_iommu_free(dev);
+
++err_unlock:
++ mutex_unlock(&iommu_probe_device_lock);
++
+ return ret;
+ }
+
+@@ -1799,11 +1815,11 @@ int bus_iommu_probe(struct bus_type *bus)
+ return ret;
+
+ list_for_each_entry_safe(group, next, &group_list, entry) {
++ mutex_lock(&group->mutex);
++
+ /* Remove item from the list */
+ list_del_init(&group->entry);
+
+- mutex_lock(&group->mutex);
+-
+ /* Try to allocate default domain */
+ probe_alloc_default_domain(bus, group);
+
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 2ab2ecfe01f80..56d007582b6fa 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -454,7 +454,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
+ fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
+ }
+
+- if (report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova,
++ if (!dom || report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova,
+ write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
+ dev_err_ratelimited(
+ bank->parent_dev,
+@@ -1044,20 +1044,24 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m
+ struct mtk_iommu_data *data)
+ {
+ struct device_node *larbnode, *smicomm_node, *smi_subcomm_node;
+- struct platform_device *plarbdev;
++ struct platform_device *plarbdev, *pcommdev;
+ struct device_link *link;
+ int i, larb_nr, ret;
+
+ larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL);
+ if (larb_nr < 0)
+ return larb_nr;
++ if (larb_nr == 0 || larb_nr > MTK_LARB_NR_MAX)
++ return -EINVAL;
+
+ for (i = 0; i < larb_nr; i++) {
+ u32 id;
+
+ larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
+- if (!larbnode)
+- return -EINVAL;
++ if (!larbnode) {
++ ret = -EINVAL;
++ goto err_larbdev_put;
++ }
+
+ if (!of_device_is_available(larbnode)) {
+ of_node_put(larbnode);
+@@ -1067,20 +1071,32 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m
+ ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
+ if (ret)/* The id is consecutive if there is no this property */
+ id = i;
++ if (id >= MTK_LARB_NR_MAX) {
++ of_node_put(larbnode);
++ ret = -EINVAL;
++ goto err_larbdev_put;
++ }
+
+ plarbdev = of_find_device_by_node(larbnode);
++ of_node_put(larbnode);
+ if (!plarbdev) {
+- of_node_put(larbnode);
+- return -ENODEV;
++ ret = -ENODEV;
++ goto err_larbdev_put;
+ }
+- if (!plarbdev->dev.driver) {
+- of_node_put(larbnode);
+- return -EPROBE_DEFER;
++ if (data->larb_imu[id].dev) {
++ platform_device_put(plarbdev);
++ ret = -EEXIST;
++ goto err_larbdev_put;
+ }
+ data->larb_imu[id].dev = &plarbdev->dev;
+
+- component_match_add_release(dev, match, component_release_of,
+- component_compare_of, larbnode);
++ if (!plarbdev->dev.driver) {
++ ret = -EPROBE_DEFER;
++ goto err_larbdev_put;
++ }
++
++ component_match_add(dev, match, component_compare_dev, &plarbdev->dev);
++ platform_device_put(plarbdev);
+ }
+
+ /* Get smi-(sub)-common dev from the last larb. */
+@@ -1098,17 +1114,28 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m
+ else
+ smicomm_node = smi_subcomm_node;
+
+- plarbdev = of_find_device_by_node(smicomm_node);
++ pcommdev = of_find_device_by_node(smicomm_node);
+ of_node_put(smicomm_node);
+- data->smicomm_dev = &plarbdev->dev;
++ if (!pcommdev)
++ return -ENODEV;
++ data->smicomm_dev = &pcommdev->dev;
+
+ link = device_link_add(data->smicomm_dev, dev,
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
++ platform_device_put(pcommdev);
+ if (!link) {
+ dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
+ return -EINVAL;
+ }
+ return 0;
++
++err_larbdev_put:
++ for (i = MTK_LARB_NR_MAX - 1; i >= 0; i--) {
++ if (!data->larb_imu[i].dev)
++ continue;
++ put_device(data->larb_imu[i].dev);
++ }
++ return ret;
+ }
+
+ static int mtk_iommu_probe(struct platform_device *pdev)
+@@ -1173,6 +1200,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+
+ banks_num = data->plat_data->banks_num;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ return -EINVAL;
+ if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) {
+ dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res);
+ return -EINVAL;
+diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
+index a3fc59b814ab5..a68eadd64f38d 100644
+--- a/drivers/iommu/rockchip-iommu.c
++++ b/drivers/iommu/rockchip-iommu.c
+@@ -280,19 +280,17 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
+ * 11:9 - Page address bit 34:32
+ * 8:4 - Page address bit 39:35
+ * 3 - Security
+- * 2 - Readable
+- * 1 - Writable
++ * 2 - Writable
++ * 1 - Readable
+ * 0 - 1 if Page @ Page address is valid
+ */
+-#define RK_PTE_PAGE_READABLE_V2 BIT(2)
+-#define RK_PTE_PAGE_WRITABLE_V2 BIT(1)
+
+ static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
+ {
+ u32 flags = 0;
+
+- flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
+- flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
++ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
++ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
+
+ return rk_mk_dte_v2(page) | flags;
+ }
+diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
+index 3c071782f6f16..c2e5e81d609e1 100644
+--- a/drivers/iommu/s390-iommu.c
++++ b/drivers/iommu/s390-iommu.c
+@@ -79,10 +79,36 @@ static void s390_domain_free(struct iommu_domain *domain)
+ {
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+
++ WARN_ON(!list_empty(&s390_domain->devices));
+ dma_cleanup_tables(s390_domain->dma_table);
+ kfree(s390_domain);
+ }
+
++static void __s390_iommu_detach_device(struct zpci_dev *zdev)
++{
++ struct s390_domain *s390_domain = zdev->s390_domain;
++ struct s390_domain_device *domain_device, *tmp;
++ unsigned long flags;
++
++ if (!s390_domain)
++ return;
++
++ spin_lock_irqsave(&s390_domain->list_lock, flags);
++ list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices,
++ list) {
++ if (domain_device->zdev == zdev) {
++ list_del(&domain_device->list);
++ kfree(domain_device);
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&s390_domain->list_lock, flags);
++
++ zpci_unregister_ioat(zdev, 0);
++ zdev->s390_domain = NULL;
++ zdev->dma_table = NULL;
++}
++
+ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+ {
+@@ -90,7 +116,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ struct zpci_dev *zdev = to_zpci_dev(dev);
+ struct s390_domain_device *domain_device;
+ unsigned long flags;
+- int cc, rc;
++ int cc, rc = 0;
+
+ if (!zdev)
+ return -ENODEV;
+@@ -99,24 +125,18 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ if (!domain_device)
+ return -ENOMEM;
+
+- if (zdev->dma_table && !zdev->s390_domain) {
+- cc = zpci_dma_exit_device(zdev);
+- if (cc) {
+- rc = -EIO;
+- goto out_free;
+- }
+- }
+-
+ if (zdev->s390_domain)
+- zpci_unregister_ioat(zdev, 0);
++ __s390_iommu_detach_device(zdev);
++ else if (zdev->dma_table)
++ zpci_dma_exit_device(zdev);
+
+- zdev->dma_table = s390_domain->dma_table;
+ cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+- virt_to_phys(zdev->dma_table));
++ virt_to_phys(s390_domain->dma_table));
+ if (cc) {
+ rc = -EIO;
+- goto out_restore;
++ goto out_free;
+ }
++ zdev->dma_table = s390_domain->dma_table;
+
+ spin_lock_irqsave(&s390_domain->list_lock, flags);
+ /* First device defines the DMA range limits */
+@@ -127,9 +147,9 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ /* Allow only devices with identical DMA range limits */
+ } else if (domain->geometry.aperture_start != zdev->start_dma ||
+ domain->geometry.aperture_end != zdev->end_dma) {
+- rc = -EINVAL;
+ spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+- goto out_restore;
++ rc = -EINVAL;
++ goto out_unregister;
+ }
+ domain_device->zdev = zdev;
+ zdev->s390_domain = s390_domain;
+@@ -138,14 +158,9 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
+
+ return 0;
+
+-out_restore:
+- if (!zdev->s390_domain) {
+- zpci_dma_init_device(zdev);
+- } else {
+- zdev->dma_table = zdev->s390_domain->dma_table;
+- zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+- virt_to_phys(zdev->dma_table));
+- }
++out_unregister:
++ zpci_unregister_ioat(zdev, 0);
++ zdev->dma_table = NULL;
+ out_free:
+ kfree(domain_device);
+
+@@ -155,32 +170,12 @@ out_free:
+ static void s390_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+ {
+- struct s390_domain *s390_domain = to_s390_domain(domain);
+ struct zpci_dev *zdev = to_zpci_dev(dev);
+- struct s390_domain_device *domain_device, *tmp;
+- unsigned long flags;
+- int found = 0;
+
+- if (!zdev)
+- return;
++ WARN_ON(zdev->s390_domain != to_s390_domain(domain));
+
+- spin_lock_irqsave(&s390_domain->list_lock, flags);
+- list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices,
+- list) {
+- if (domain_device->zdev == zdev) {
+- list_del(&domain_device->list);
+- kfree(domain_device);
+- found = 1;
+- break;
+- }
+- }
+- spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+-
+- if (found && (zdev->s390_domain == s390_domain)) {
+- zdev->s390_domain = NULL;
+- zpci_unregister_ioat(zdev, 0);
+- zpci_dma_init_device(zdev);
+- }
++ __s390_iommu_detach_device(zdev);
++ zpci_dma_init_device(zdev);
+ }
+
+ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
+@@ -198,24 +193,13 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
+ static void s390_iommu_release_device(struct device *dev)
+ {
+ struct zpci_dev *zdev = to_zpci_dev(dev);
+- struct iommu_domain *domain;
+
+ /*
+- * This is a workaround for a scenario where the IOMMU API common code
+- * "forgets" to call the detach_dev callback: After binding a device
+- * to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers
+- * the attach_dev), removing the device via
+- * "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev,
+- * only release_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
+- * notifier.
+- *
+- * So let's call detach_dev from here if it hasn't been called before.
++ * release_device is expected to detach any domain currently attached
++ * to the device, but keep it attached to other devices in the group.
+ */
+- if (zdev && zdev->s390_domain) {
+- domain = iommu_get_domain_for_dev(dev);
+- if (domain)
+- s390_iommu_detach_device(domain, dev);
+- }
++ if (zdev)
++ __s390_iommu_detach_device(zdev);
+ }
+
+ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
+diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
+index cd9b74ee24def..5b585eace3d46 100644
+--- a/drivers/iommu/sun50i-iommu.c
++++ b/drivers/iommu/sun50i-iommu.c
+@@ -27,6 +27,7 @@
+ #include <linux/types.h>
+
+ #define IOMMU_RESET_REG 0x010
++#define IOMMU_RESET_RELEASE_ALL 0xffffffff
+ #define IOMMU_ENABLE_REG 0x020
+ #define IOMMU_ENABLE_ENABLE BIT(0)
+
+@@ -92,6 +93,8 @@
+ #define NUM_PT_ENTRIES 256
+ #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
+
++#define SPAGE_SIZE 4096
++
+ struct sun50i_iommu {
+ struct iommu_device iommu;
+
+@@ -270,7 +273,7 @@ static u32 sun50i_mk_pte(phys_addr_t page, int prot)
+ enum sun50i_iommu_aci aci;
+ u32 flags = 0;
+
+- if (prot & (IOMMU_READ | IOMMU_WRITE))
++ if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE))
+ aci = SUN50I_IOMMU_ACI_RD_WR;
+ else if (prot & IOMMU_READ)
+ aci = SUN50I_IOMMU_ACI_RD;
+@@ -294,6 +297,62 @@ static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
+ dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
+ }
+
++static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu,
++ unsigned long iova)
++{
++ u32 reg;
++ int ret;
++
++ iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
++ iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12));
++ iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG,
++ IOMMU_TLB_IVLD_ENABLE_ENABLE);
++
++ ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG,
++ reg, !reg, 1, 2000);
++ if (ret)
++ dev_warn(iommu->dev, "TLB invalidation timed out!\n");
++}
++
++static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu,
++ unsigned long iova)
++{
++ u32 reg;
++ int ret;
++
++ iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
++ iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG,
++ IOMMU_PC_IVLD_ENABLE_ENABLE);
++
++ ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG,
++ reg, !reg, 1, 2000);
++ if (ret)
++ dev_warn(iommu->dev, "PTW cache invalidation timed out!\n");
++}
++
++static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu,
++ unsigned long iova, size_t size)
++{
++ assert_spin_locked(&iommu->iommu_lock);
++
++ iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
++
++ sun50i_iommu_zap_iova(iommu, iova);
++ sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE);
++ if (size > SPAGE_SIZE) {
++ sun50i_iommu_zap_iova(iommu, iova + size);
++ sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE);
++ }
++ sun50i_iommu_zap_ptw_cache(iommu, iova);
++ sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M);
++ if (size > SZ_1M) {
++ sun50i_iommu_zap_ptw_cache(iommu, iova + size);
++ sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M);
++ }
++
++ iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
++}
++
+ static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
+ {
+ u32 reg;
+@@ -343,6 +402,18 @@ static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+ }
+
++static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
++ unsigned long iova, size_t size)
++{
++ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
++ struct sun50i_iommu *iommu = sun50i_domain->iommu;
++ unsigned long flags;
++
++ spin_lock_irqsave(&iommu->iommu_lock, flags);
++ sun50i_iommu_zap_range(iommu, iova, size);
++ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
++}
++
+ static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+ {
+@@ -511,7 +582,7 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
+ sun50i_iommu_free_page_table(iommu, drop_pt);
+ }
+
+- sun50i_table_flush(sun50i_domain, page_table, PT_SIZE);
++ sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES);
+ sun50i_table_flush(sun50i_domain, dte_addr, 1);
+
+ return page_table;
+@@ -601,7 +672,6 @@ static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
+ struct sun50i_iommu_domain *sun50i_domain;
+
+ if (type != IOMMU_DOMAIN_DMA &&
+- type != IOMMU_DOMAIN_IDENTITY &&
+ type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+@@ -766,6 +836,7 @@ static const struct iommu_ops sun50i_iommu_ops = {
+ .attach_dev = sun50i_iommu_attach_device,
+ .detach_dev = sun50i_iommu_detach_device,
+ .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
++ .iotlb_sync_map = sun50i_iommu_iotlb_sync_map,
+ .iotlb_sync = sun50i_iommu_iotlb_sync,
+ .iova_to_phys = sun50i_iommu_iova_to_phys,
+ .map = sun50i_iommu_map,
+@@ -785,6 +856,8 @@ static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
+ report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
+ else
+ dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
++
++ sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE);
+ }
+
+ static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
+@@ -868,8 +941,8 @@ static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
+
+ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
+ {
++ u32 status, l1_status, l2_status, resets;
+ struct sun50i_iommu *iommu = dev_id;
+- u32 status;
+
+ spin_lock(&iommu->iommu_lock);
+
+@@ -879,6 +952,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
+ return IRQ_NONE;
+ }
+
++ l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG);
++ l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG);
++
+ if (status & IOMMU_INT_INVALID_L2PG)
+ sun50i_iommu_handle_pt_irq(iommu,
+ IOMMU_INT_ERR_ADDR_L2_REG,
+@@ -892,8 +968,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
+
+ iommu_write(iommu, IOMMU_INT_CLR_REG, status);
+
+- iommu_write(iommu, IOMMU_RESET_REG, ~status);
+- iommu_write(iommu, IOMMU_RESET_REG, status);
++ resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK;
++ iommu_write(iommu, IOMMU_RESET_REG, ~resets);
++ iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL);
+
+ spin_unlock(&iommu->iommu_lock);
+
+diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
+index b60e1853593f4..3989d16f997b3 100644
+--- a/drivers/irqchip/irq-gic-pm.c
++++ b/drivers/irqchip/irq-gic-pm.c
+@@ -102,7 +102,7 @@ static int gic_probe(struct platform_device *pdev)
+
+ pm_runtime_enable(dev);
+
+- ret = pm_runtime_get_sync(dev);
++ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto rpm_disable;
+
+diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
+index 0da8716f8f24b..c4584e2f0ad3d 100644
+--- a/drivers/irqchip/irq-loongson-liointc.c
++++ b/drivers/irqchip/irq-loongson-liointc.c
+@@ -207,10 +207,13 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
+ "reg-names", core_reg_names[i]);
+
+ if (index < 0)
+- goto out_iounmap;
++ continue;
+
+ priv->core_isr[i] = of_iomap(node, index);
+ }
++
++ if (!priv->core_isr[0])
++ goto out_iounmap;
+ }
+
+ /* Setup IRQ domain */
+diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
+index c01b9c2570053..03493cda65a37 100644
+--- a/drivers/irqchip/irq-loongson-pch-pic.c
++++ b/drivers/irqchip/irq-loongson-pch-pic.c
+@@ -159,6 +159,9 @@ static int pch_pic_domain_translate(struct irq_domain *d,
+ return -EINVAL;
+
+ if (of_node) {
++ if (fwspec->param_count < 2)
++ return -EINVAL;
++
+ *hwirq = fwspec->param[0] + priv->ht_vec_base;
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ } else {
+diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
+index d8d48b1f7c29d..139f26b0a6ef8 100644
+--- a/drivers/irqchip/irq-ls-extirq.c
++++ b/drivers/irqchip/irq-ls-extirq.c
+@@ -203,7 +203,7 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent)
+ if (ret)
+ goto err_parse_map;
+
+- priv->big_endian = of_device_is_big_endian(parent);
++ priv->big_endian = of_device_is_big_endian(node->parent);
+ priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") ||
+ of_device_is_compatible(node, "fsl,ls1043a-extirq");
+ raw_spin_lock_init(&priv->lock);
+diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
+index 0dcbeb1a05a1f..91df62a64cd91 100644
+--- a/drivers/irqchip/irq-wpcm450-aic.c
++++ b/drivers/irqchip/irq-wpcm450-aic.c
+@@ -146,6 +146,7 @@ static int __init wpcm450_aic_of_init(struct device_node *node,
+ aic->regs = of_iomap(node, 0);
+ if (!aic->regs) {
+ pr_err("Failed to map WPCM450 AIC registers\n");
++ kfree(aic);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
+index 4f7eaa17fb274..e840609c50eb7 100644
+--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
+@@ -3217,6 +3217,7 @@ static int
+ hfcm_l1callback(struct dchannel *dch, u_int cmd)
+ {
+ struct hfc_multi *hc = dch->hw;
++ struct sk_buff_head free_queue;
+ u_long flags;
+
+ switch (cmd) {
+@@ -3245,6 +3246,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
+ l1_event(dch->l1, HW_POWERUP_IND);
+ break;
+ case HW_DEACT_REQ:
++ __skb_queue_head_init(&free_queue);
+ /* start deactivation */
+ spin_lock_irqsave(&hc->lock, flags);
+ if (hc->ctype == HFC_TYPE_E1) {
+@@ -3264,20 +3266,21 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
+ plxsd_checksync(hc, 0);
+ }
+ }
+- skb_queue_purge(&dch->squeue);
++ skb_queue_splice_init(&dch->squeue, &free_queue);
+ if (dch->tx_skb) {
+- dev_kfree_skb(dch->tx_skb);
++ __skb_queue_tail(&free_queue, dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+- dev_kfree_skb(dch->rx_skb);
++ __skb_queue_tail(&free_queue, dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
+ del_timer(&dch->timer);
+ spin_unlock_irqrestore(&hc->lock, flags);
++ __skb_queue_purge(&free_queue);
+ break;
+ case HW_POWERUP_REQ:
+ spin_lock_irqsave(&hc->lock, flags);
+@@ -3384,6 +3387,9 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+ case PH_DEACTIVATE_REQ:
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ if (dch->dev.D.protocol != ISDN_P_TE_S0) {
++ struct sk_buff_head free_queue;
++
++ __skb_queue_head_init(&free_queue);
+ spin_lock_irqsave(&hc->lock, flags);
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG
+@@ -3405,14 +3411,14 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+ /* deactivate */
+ dch->state = 1;
+ }
+- skb_queue_purge(&dch->squeue);
++ skb_queue_splice_init(&dch->squeue, &free_queue);
+ if (dch->tx_skb) {
+- dev_kfree_skb(dch->tx_skb);
++ __skb_queue_tail(&free_queue, dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+- dev_kfree_skb(dch->rx_skb);
++ __skb_queue_tail(&free_queue, dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+@@ -3424,6 +3430,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+ #endif
+ ret = 0;
+ spin_unlock_irqrestore(&hc->lock, flags);
++ __skb_queue_purge(&free_queue);
+ } else
+ ret = l1_event(dch->l1, hh->prim);
+ break;
+diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
+index e964a8dd8512a..c0331b2680108 100644
+--- a/drivers/isdn/hardware/mISDN/hfcpci.c
++++ b/drivers/isdn/hardware/mISDN/hfcpci.c
+@@ -1617,16 +1617,19 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ spin_lock_irqsave(&hc->lock, flags);
+ if (hc->hw.protocol == ISDN_P_NT_S0) {
++ struct sk_buff_head free_queue;
++
++ __skb_queue_head_init(&free_queue);
+ /* prepare deactivation */
+ Write_hfc(hc, HFCPCI_STATES, 0x40);
+- skb_queue_purge(&dch->squeue);
++ skb_queue_splice_init(&dch->squeue, &free_queue);
+ if (dch->tx_skb) {
+- dev_kfree_skb(dch->tx_skb);
++ __skb_queue_tail(&free_queue, dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+- dev_kfree_skb(dch->rx_skb);
++ __skb_queue_tail(&free_queue, dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+@@ -1639,10 +1642,12 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+ hc->hw.mst_m &= ~HFCPCI_MASTER;
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ ret = 0;
++ spin_unlock_irqrestore(&hc->lock, flags);
++ __skb_queue_purge(&free_queue);
+ } else {
+ ret = l1_event(dch->l1, hh->prim);
++ spin_unlock_irqrestore(&hc->lock, flags);
+ }
+- spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ }
+ if (!ret)
+diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
+index 651f2f8f685b7..1efd17979f240 100644
+--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
+@@ -326,20 +326,24 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+
+ if (hw->protocol == ISDN_P_NT_S0) {
++ struct sk_buff_head free_queue;
++
++ __skb_queue_head_init(&free_queue);
+ hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT);
+ spin_lock_irqsave(&hw->lock, flags);
+- skb_queue_purge(&dch->squeue);
++ skb_queue_splice_init(&dch->squeue, &free_queue);
+ if (dch->tx_skb) {
+- dev_kfree_skb(dch->tx_skb);
++ __skb_queue_tail(&free_queue, dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+- dev_kfree_skb(dch->rx_skb);
++ __skb_queue_tail(&free_queue, dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ spin_unlock_irqrestore(&hw->lock, flags);
++ __skb_queue_purge(&free_queue);
+ #ifdef FIXME
+ if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
+ dchannel_sched_event(&hc->dch, D_CLEARBUSY);
+@@ -1330,7 +1334,7 @@ tx_iso_complete(struct urb *urb)
+ printk("\n");
+ }
+
+- dev_kfree_skb(tx_skb);
++ dev_consume_skb_irq(tx_skb);
+ tx_skb = NULL;
+ if (fifo->dch && get_next_dframe(fifo->dch))
+ tx_skb = fifo->dch->tx_skb;
+diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
+index 52b59b62f437c..b2f4c4ec7c567 100644
+--- a/drivers/leds/leds-is31fl319x.c
++++ b/drivers/leds/leds-is31fl319x.c
+@@ -38,6 +38,7 @@
+ #define IS31FL3190_CURRENT_uA_MIN 5000
+ #define IS31FL3190_CURRENT_uA_DEFAULT 42000
+ #define IS31FL3190_CURRENT_uA_MAX 42000
++#define IS31FL3190_CURRENT_SHIFT 2
+ #define IS31FL3190_CURRENT_MASK GENMASK(4, 2)
+ #define IS31FL3190_CURRENT_5_mA 0x02
+ #define IS31FL3190_CURRENT_10_mA 0x01
+@@ -553,7 +554,7 @@ static int is31fl319x_probe(struct i2c_client *client)
+ is31fl3196_db_to_gain(is31->audio_gain_db));
+ else
+ regmap_update_bits(is31->regmap, IS31FL3190_CURRENT, IS31FL3190_CURRENT_MASK,
+- is31fl3190_microamp_to_cs(dev, aggregated_led_microamp));
++ is31fl3190_microamp_to_cs(dev, aggregated_led_microamp) << IS31FL3190_CURRENT_SHIFT);
+
+ for (i = 0; i < is31->cdef->num_leds; i++) {
+ struct is31fl319x_led *led = &is31->leds[i];
+diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
+index 02f51cc618376..c1a56259226fb 100644
+--- a/drivers/leds/rgb/leds-qcom-lpg.c
++++ b/drivers/leds/rgb/leds-qcom-lpg.c
+@@ -602,8 +602,8 @@ static void lpg_brightness_set(struct lpg_led *led, struct led_classdev *cdev,
+ lpg_lut_sync(lpg, lut_mask);
+ }
+
+-static void lpg_brightness_single_set(struct led_classdev *cdev,
+- enum led_brightness value)
++static int lpg_brightness_single_set(struct led_classdev *cdev,
++ enum led_brightness value)
+ {
+ struct lpg_led *led = container_of(cdev, struct lpg_led, cdev);
+ struct mc_subled info;
+@@ -614,10 +614,12 @@ static void lpg_brightness_single_set(struct led_classdev *cdev,
+ lpg_brightness_set(led, cdev, &info);
+
+ mutex_unlock(&led->lpg->lock);
++
++ return 0;
+ }
+
+-static void lpg_brightness_mc_set(struct led_classdev *cdev,
+- enum led_brightness value)
++static int lpg_brightness_mc_set(struct led_classdev *cdev,
++ enum led_brightness value)
+ {
+ struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
+ struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
+@@ -628,6 +630,8 @@ static void lpg_brightness_mc_set(struct led_classdev *cdev,
+ lpg_brightness_set(led, cdev, mc->subled_info);
+
+ mutex_unlock(&led->lpg->lock);
++
++ return 0;
+ }
+
+ static int lpg_blink_set(struct lpg_led *led,
+@@ -1118,7 +1122,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+ led->mcdev.num_colors = num_channels;
+
+ cdev = &led->mcdev.led_cdev;
+- cdev->brightness_set = lpg_brightness_mc_set;
++ cdev->brightness_set_blocking = lpg_brightness_mc_set;
+ cdev->blink_set = lpg_blink_mc_set;
+
+ /* Register pattern accessors only if we have a LUT block */
+@@ -1132,7 +1136,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+ return ret;
+
+ cdev = &led->cdev;
+- cdev->brightness_set = lpg_brightness_single_set;
++ cdev->brightness_set_blocking = lpg_brightness_single_set;
+ cdev->blink_set = lpg_blink_single_set;
+
+ /* Register pattern accessors only if we have a LUT block */
+@@ -1151,7 +1155,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+ else
+ cdev->brightness = LED_OFF;
+
+- cdev->brightness_set(cdev, cdev->brightness);
++ cdev->brightness_set_blocking(cdev, cdev->brightness);
+
+ init_data.fwnode = of_fwnode_handle(np);
+
+diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
+index 9b63bd2551c63..cd4e34d15c26b 100644
+--- a/drivers/macintosh/macio-adb.c
++++ b/drivers/macintosh/macio-adb.c
+@@ -108,6 +108,10 @@ int macio_init(void)
+ return -ENXIO;
+ }
+ adb = ioremap(r.start, sizeof(struct adb_regs));
++ if (!adb) {
++ of_node_put(adbs);
++ return -ENOMEM;
++ }
+
+ out_8(&adb->ctrl.r, 0);
+ out_8(&adb->intr.r, 0);
+diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
+index 1ec1e5984563f..3bc1f374e6577 100644
+--- a/drivers/macintosh/macio_asic.c
++++ b/drivers/macintosh/macio_asic.c
+@@ -424,7 +424,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
+ if (of_device_register(&dev->ofdev) != 0) {
+ printk(KERN_DEBUG"macio: device registration error for %s!\n",
+ dev_name(&dev->ofdev.dev));
+- kfree(dev);
++ put_device(&dev->ofdev.dev);
+ return NULL;
+ }
+
+diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
+index a47aef8df52fd..c6d4957c4da83 100644
+--- a/drivers/mailbox/arm_mhuv2.c
++++ b/drivers/mailbox/arm_mhuv2.c
+@@ -1062,8 +1062,8 @@ static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id)
+ int ret = -EINVAL;
+
+ reg = devm_of_iomap(dev, dev->of_node, 0, NULL);
+- if (!reg)
+- return -ENOMEM;
++ if (IS_ERR(reg))
++ return PTR_ERR(reg);
+
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
+index cfacb3f320a64..853901acaeec2 100644
+--- a/drivers/mailbox/mailbox-mpfs.c
++++ b/drivers/mailbox/mailbox-mpfs.c
+@@ -2,7 +2,7 @@
+ /*
+ * Microchip PolarFire SoC (MPFS) system controller/mailbox controller driver
+ *
+- * Copyright (c) 2020 Microchip Corporation. All rights reserved.
++ * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+@@ -56,7 +56,7 @@
+ #define SCB_STATUS_NOTIFY_MASK BIT(SCB_STATUS_NOTIFY)
+
+ #define SCB_STATUS_POS (16)
+-#define SCB_STATUS_MASK GENMASK_ULL(SCB_STATUS_POS + SCB_MASK_WIDTH, SCB_STATUS_POS)
++#define SCB_STATUS_MASK GENMASK(SCB_STATUS_POS + SCB_MASK_WIDTH - 1, SCB_STATUS_POS)
+
+ struct mpfs_mbox {
+ struct mbox_controller controller;
+@@ -130,13 +130,38 @@ static void mpfs_mbox_rx_data(struct mbox_chan *chan)
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+ struct mpfs_mss_response *response = mbox->response;
+ u16 num_words = ALIGN((response->resp_size), (4)) / 4U;
+- u32 i;
++ u32 i, status;
+
+ if (!response->resp_msg) {
+ dev_err(mbox->dev, "failed to assign memory for response %d\n", -ENOMEM);
+ return;
+ }
+
++ /*
++ * The status is stored in bits 31:16 of the SERVICES_SR register.
++ * It is only valid when BUSY == 0.
++ * We should *never* get an interrupt while the controller is
++ * still in the busy state. If we do, something has gone badly
++ * wrong & the content of the mailbox would not be valid.
++ */
++ if (mpfs_mbox_busy(mbox)) {
++ dev_err(mbox->dev, "got an interrupt but system controller is busy\n");
++ response->resp_status = 0xDEAD;
++ return;
++ }
++
++ status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
++
++ /*
++ * If the status of the individual servers is non-zero, the service has
++ * failed. The contents of the mailbox at this point are not be valid,
++ * so don't bother reading them. Set the status so that the driver
++ * implementing the service can handle the result.
++ */
++ response->resp_status = (status & SCB_STATUS_MASK) >> SCB_STATUS_POS;
++ if (response->resp_status)
++ return;
++
+ if (!mpfs_mbox_busy(mbox)) {
+ for (i = 0; i < num_words; i++) {
+ response->resp_msg[i] =
+diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
+index 3c2bc0ca454cf..105d46c9801ba 100644
+--- a/drivers/mailbox/pcc.c
++++ b/drivers/mailbox/pcc.c
+@@ -743,6 +743,7 @@ static int __init pcc_init(void)
+
+ if (IS_ERR(pcc_pdev)) {
+ pr_debug("Err creating PCC platform bundle\n");
++ pcc_chan_count = 0;
+ return PTR_ERR(pcc_pdev);
+ }
+
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index 31a0fa9142744..12e004ff1a147 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -493,6 +493,7 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
+ ret = device_register(&ipi_mbox->dev);
+ if (ret) {
+ dev_err(dev, "Failed to register ipi mbox dev.\n");
++ put_device(&ipi_mbox->dev);
+ return ret;
+ }
+ mdev = &ipi_mbox->dev;
+@@ -619,7 +620,8 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
+ ipi_mbox = &pdata->ipi_mboxes[i];
+ if (ipi_mbox->dev.parent) {
+ mbox_controller_unregister(&ipi_mbox->mbox);
+- device_unregister(&ipi_mbox->dev);
++ if (device_is_registered(&ipi_mbox->dev))
++ device_unregister(&ipi_mbox->dev);
+ }
+ }
+ }
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index 338fc889b357a..b8ad4f16b4acd 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -71,8 +71,10 @@ static int mcb_probe(struct device *dev)
+
+ get_device(dev);
+ ret = mdrv->probe(mdev, found_id);
+- if (ret)
++ if (ret) {
+ module_put(carrier_mod);
++ put_device(dev);
++ }
+
+ return ret;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 0266bfddfbe27..aa6938da0db85 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -108,7 +108,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ return 0;
+
+ err:
+- mcb_free_dev(mdev);
++ put_device(&mdev->dev);
+
+ return ret;
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 95a1ee3d314eb..e30c2d2bc9c78 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -732,28 +732,48 @@ static char *_dm_claim_ptr = "I belong to device-mapper";
+ /*
+ * Open a table device so we can use it as a map destination.
+ */
+-static int open_table_device(struct table_device *td, dev_t dev,
+- struct mapped_device *md)
++static struct table_device *open_table_device(struct mapped_device *md,
++ dev_t dev, fmode_t mode)
+ {
++ struct table_device *td;
+ struct block_device *bdev;
+ u64 part_off;
+ int r;
+
+- BUG_ON(td->dm_dev.bdev);
++ td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
++ if (!td)
++ return ERR_PTR(-ENOMEM);
++ refcount_set(&td->count, 1);
+
+- bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
+- if (IS_ERR(bdev))
+- return PTR_ERR(bdev);
++ bdev = blkdev_get_by_dev(dev, mode | FMODE_EXCL, _dm_claim_ptr);
++ if (IS_ERR(bdev)) {
++ r = PTR_ERR(bdev);
++ goto out_free_td;
++ }
+
+- r = bd_link_disk_holder(bdev, dm_disk(md));
+- if (r) {
+- blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
+- return r;
++ /*
++ * We can be called before the dm disk is added. In that case we can't
++ * register the holder relation here. It will be done once add_disk was
++ * called.
++ */
++ if (md->disk->slave_dir) {
++ r = bd_link_disk_holder(bdev, md->disk);
++ if (r)
++ goto out_blkdev_put;
+ }
+
++ td->dm_dev.mode = mode;
+ td->dm_dev.bdev = bdev;
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
+- return 0;
++ format_dev_t(td->dm_dev.name, dev);
++ list_add(&td->list, &md->table_devices);
++ return td;
++
++out_blkdev_put:
++ blkdev_put(bdev, mode | FMODE_EXCL);
++out_free_td:
++ kfree(td);
++ return ERR_PTR(r);
+ }
+
+ /*
+@@ -761,14 +781,12 @@ static int open_table_device(struct table_device *td, dev_t dev,
+ */
+ static void close_table_device(struct table_device *td, struct mapped_device *md)
+ {
+- if (!td->dm_dev.bdev)
+- return;
+-
+- bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
++ if (md->disk->slave_dir)
++ bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
+ blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
+ put_dax(td->dm_dev.dax_dev);
+- td->dm_dev.bdev = NULL;
+- td->dm_dev.dax_dev = NULL;
++ list_del(&td->list);
++ kfree(td);
+ }
+
+ static struct table_device *find_table_device(struct list_head *l, dev_t dev,
+@@ -786,31 +804,16 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev,
+ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
+ struct dm_dev **result)
+ {
+- int r;
+ struct table_device *td;
+
+ mutex_lock(&md->table_devices_lock);
+ td = find_table_device(&md->table_devices, dev, mode);
+ if (!td) {
+- td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
+- if (!td) {
++ td = open_table_device(md, dev, mode);
++ if (IS_ERR(td)) {
+ mutex_unlock(&md->table_devices_lock);
+- return -ENOMEM;
++ return PTR_ERR(td);
+ }
+-
+- td->dm_dev.mode = mode;
+- td->dm_dev.bdev = NULL;
+-
+- if ((r = open_table_device(td, dev, md))) {
+- mutex_unlock(&md->table_devices_lock);
+- kfree(td);
+- return r;
+- }
+-
+- format_dev_t(td->dm_dev.name, dev);
+-
+- refcount_set(&td->count, 1);
+- list_add(&td->list, &md->table_devices);
+ } else {
+ refcount_inc(&td->count);
+ }
+@@ -825,11 +828,8 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
+ struct table_device *td = container_of(d, struct table_device, dm_dev);
+
+ mutex_lock(&md->table_devices_lock);
+- if (refcount_dec_and_test(&td->count)) {
++ if (refcount_dec_and_test(&td->count))
+ close_table_device(td, md);
+- list_del(&td->list);
+- kfree(td);
+- }
+ mutex_unlock(&md->table_devices_lock);
+ }
+
+@@ -1972,8 +1972,21 @@ static void cleanup_mapped_device(struct mapped_device *md)
+ md->disk->private_data = NULL;
+ spin_unlock(&_minor_lock);
+ if (dm_get_md_type(md) != DM_TYPE_NONE) {
++ struct table_device *td;
++
+ dm_sysfs_exit(md);
++ list_for_each_entry(td, &md->table_devices, list) {
++ bd_unlink_disk_holder(td->dm_dev.bdev,
++ md->disk);
++ }
++
++ /*
++ * Hold lock to make sure del_gendisk() won't concurrent
++ * with open/close_table_device().
++ */
++ mutex_lock(&md->table_devices_lock);
+ del_gendisk(md->disk);
++ mutex_unlock(&md->table_devices_lock);
+ }
+ dm_queue_destroy_crypto_profile(md->queue);
+ put_disk(md->disk);
+@@ -2305,6 +2318,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
+ {
+ enum dm_queue_mode type = dm_table_get_type(t);
+ struct queue_limits limits;
++ struct table_device *td;
+ int r;
+
+ switch (type) {
+@@ -2333,17 +2347,40 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
+ if (r)
+ return r;
+
++ /*
++ * Hold lock to make sure add_disk() and del_gendisk() won't concurrent
++ * with open_table_device() and close_table_device().
++ */
++ mutex_lock(&md->table_devices_lock);
+ r = add_disk(md->disk);
++ mutex_unlock(&md->table_devices_lock);
+ if (r)
+ return r;
+
+- r = dm_sysfs_init(md);
+- if (r) {
+- del_gendisk(md->disk);
+- return r;
++ /*
++ * Register the holder relationship for devices added before the disk
++ * was live.
++ */
++ list_for_each_entry(td, &md->table_devices, list) {
++ r = bd_link_disk_holder(td->dm_dev.bdev, md->disk);
++ if (r)
++ goto out_undo_holders;
+ }
++
++ r = dm_sysfs_init(md);
++ if (r)
++ goto out_undo_holders;
++
+ md->type = type;
+ return 0;
++
++out_undo_holders:
++ list_for_each_entry_continue_reverse(td, &md->table_devices, list)
++ bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
++ mutex_lock(&md->table_devices_lock);
++ del_gendisk(md->disk);
++ mutex_unlock(&md->table_devices_lock);
++ return r;
+ }
+
+ struct mapped_device *dm_get_md(dev_t dev)
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index bf6dffadbe6f6..63ece30114e53 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -2195,20 +2195,23 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+
+ if (set) {
+ bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
+- if (*bmc_new == 0) {
+- /* need to set on-disk bits too. */
+- sector_t end = block + new_blocks;
+- sector_t start = block >> chunkshift;
+- start <<= chunkshift;
+- while (start < end) {
+- md_bitmap_file_set_bit(bitmap, block);
+- start += 1 << chunkshift;
++ if (bmc_new) {
++ if (*bmc_new == 0) {
++ /* need to set on-disk bits too. */
++ sector_t end = block + new_blocks;
++ sector_t start = block >> chunkshift;
++
++ start <<= chunkshift;
++ while (start < end) {
++ md_bitmap_file_set_bit(bitmap, block);
++ start += 1 << chunkshift;
++ }
++ *bmc_new = 2;
++ md_bitmap_count_page(&bitmap->counts, block, 1);
++ md_bitmap_set_pending(&bitmap->counts, block);
+ }
+- *bmc_new = 2;
+- md_bitmap_count_page(&bitmap->counts, block, 1);
+- md_bitmap_set_pending(&bitmap->counts, block);
++ *bmc_new |= NEEDED_MASK;
+ }
+- *bmc_new |= NEEDED_MASK;
+ if (new_blocks < old_blocks)
+ old_blocks = new_blocks;
+ }
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index a467b492d4ad3..fd82881761d34 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -509,13 +509,14 @@ static void md_end_flush(struct bio *bio)
+ struct md_rdev *rdev = bio->bi_private;
+ struct mddev *mddev = rdev->mddev;
+
++ bio_put(bio);
++
+ rdev_dec_pending(rdev, mddev);
+
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ /* The pre-request flush has finished */
+ queue_work(md_wq, &mddev->flush_work);
+ }
+- bio_put(bio);
+ }
+
+ static void md_submit_flush_data(struct work_struct *ws);
+@@ -913,10 +914,12 @@ static void super_written(struct bio *bio)
+ } else
+ clear_bit(LastDev, &rdev->flags);
+
++ bio_put(bio);
++
++ rdev_dec_pending(rdev, mddev);
++
+ if (atomic_dec_and_test(&mddev->pending_writes))
+ wake_up(&mddev->sb_wait);
+- rdev_dec_pending(rdev, mddev);
+- bio_put(bio);
+ }
+
+ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 857c49399c28e..b536befd88988 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -398,7 +398,6 @@ static int raid0_run(struct mddev *mddev)
+
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
+- blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
+
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 05d8438cfec88..58f705f429480 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -3159,6 +3159,7 @@ static int raid1_run(struct mddev *mddev)
+ * RAID1 needs at least one disk in active
+ */
+ if (conf->raid_disks - mddev->degraded < 1) {
++ md_unregister_thread(&conf->thread);
+ ret = -EINVAL;
+ goto abort;
+ }
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 3aa8b6e11d585..9a6503f5cb982 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -4145,8 +4145,6 @@ static int raid10_run(struct mddev *mddev)
+ conf->thread = NULL;
+
+ if (mddev->queue) {
+- blk_queue_max_discard_sectors(mddev->queue,
+- UINT_MAX);
+ blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ raid10_set_io_opt(conf);
+diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
+index 15a08d8c69ef8..c2d2792227f86 100644
+--- a/drivers/media/dvb-core/dvb_ca_en50221.c
++++ b/drivers/media/dvb-core/dvb_ca_en50221.c
+@@ -157,7 +157,7 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca)
+ {
+ unsigned int i;
+
+- dvb_free_device(ca->dvbdev);
++ dvb_device_put(ca->dvbdev);
+ for (i = 0; i < ca->slot_count; i++)
+ vfree(ca->slot_info[i].rx_buffer.data);
+
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index 48e735cdbe6bb..c41a7e5c2b928 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -136,7 +136,7 @@ static void __dvb_frontend_free(struct dvb_frontend *fe)
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+
+ if (fepriv)
+- dvb_free_device(fepriv->dvbdev);
++ dvb_device_put(fepriv->dvbdev);
+
+ dvb_frontend_invoke_release(fe, fe->ops.release);
+
+@@ -2986,6 +2986,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
+ .name = fe->ops.info.name,
+ #endif
+ };
++ int ret;
+
+ dev_dbg(dvb->device, "%s:\n", __func__);
+
+@@ -3019,8 +3020,13 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
+ "DVB: registering adapter %i frontend %i (%s)...\n",
+ fe->dvb->num, fe->id, fe->ops.info.name);
+
+- dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template,
++ ret = dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template,
+ fe, DVB_DEVICE_FRONTEND, 0);
++ if (ret) {
++ dvb_frontend_put(fe);
++ mutex_unlock(&frontend_mutex);
++ return ret;
++ }
+
+ /*
+ * Initialize the cache to the proper values according with the
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 675d877a67b25..9934728734af9 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -97,7 +97,7 @@ static int dvb_device_open(struct inode *inode, struct file *file)
+ new_fops = fops_get(dvbdev->fops);
+ if (!new_fops)
+ goto fail;
+- file->private_data = dvbdev;
++ file->private_data = dvb_device_get(dvbdev);
+ replace_fops(file, new_fops);
+ if (file->f_op->open)
+ err = file->f_op->open(inode, file);
+@@ -161,6 +161,9 @@ int dvb_generic_release(struct inode *inode, struct file *file)
+ }
+
+ dvbdev->users++;
++
++ dvb_device_put(dvbdev);
++
+ return 0;
+ }
+ EXPORT_SYMBOL(dvb_generic_release);
+@@ -478,6 +481,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ }
+
+ memcpy(dvbdev, template, sizeof(struct dvb_device));
++ kref_init(&dvbdev->ref);
+ dvbdev->type = type;
+ dvbdev->id = id;
+ dvbdev->adapter = adap;
+@@ -508,7 +512,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ #endif
+
+ dvbdev->minor = minor;
+- dvb_minors[minor] = dvbdev;
++ dvb_minors[minor] = dvb_device_get(dvbdev);
+ up_write(&minor_rwsem);
+
+ ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
+@@ -553,6 +557,7 @@ void dvb_remove_device(struct dvb_device *dvbdev)
+
+ down_write(&minor_rwsem);
+ dvb_minors[dvbdev->minor] = NULL;
++ dvb_device_put(dvbdev);
+ up_write(&minor_rwsem);
+
+ dvb_media_device_free(dvbdev);
+@@ -564,21 +569,34 @@ void dvb_remove_device(struct dvb_device *dvbdev)
+ EXPORT_SYMBOL(dvb_remove_device);
+
+
+-void dvb_free_device(struct dvb_device *dvbdev)
++static void dvb_free_device(struct kref *ref)
+ {
+- if (!dvbdev)
+- return;
++ struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref);
+
+ kfree (dvbdev->fops);
+ kfree (dvbdev);
+ }
+-EXPORT_SYMBOL(dvb_free_device);
++
++
++struct dvb_device *dvb_device_get(struct dvb_device *dvbdev)
++{
++ kref_get(&dvbdev->ref);
++ return dvbdev;
++}
++EXPORT_SYMBOL(dvb_device_get);
++
++
++void dvb_device_put(struct dvb_device *dvbdev)
++{
++ if (dvbdev)
++ kref_put(&dvbdev->ref, dvb_free_device);
++}
+
+
+ void dvb_unregister_device(struct dvb_device *dvbdev)
+ {
+ dvb_remove_device(dvbdev);
+- dvb_free_device(dvbdev);
++ dvb_device_put(dvbdev);
+ }
+ EXPORT_SYMBOL(dvb_unregister_device);
+
+diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
+index da0ff7b44da41..68b92b4419cff 100644
+--- a/drivers/media/dvb-frontends/bcm3510.c
++++ b/drivers/media/dvb-frontends/bcm3510.c
+@@ -649,6 +649,7 @@ static int bcm3510_download_firmware(struct dvb_frontend* fe)
+ deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04zx\n",addr,len,fw->size);
+ if ((ret = bcm3510_write_ram(st,addr,&b[i+4],len)) < 0) {
+ err("firmware download failed: %d\n",ret);
++ release_firmware(fw);
+ return ret;
+ }
+ i += 4 + len;
+diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
+index 516de278cc493..a12fedcc3a1ce 100644
+--- a/drivers/media/i2c/ad5820.c
++++ b/drivers/media/i2c/ad5820.c
+@@ -327,18 +327,18 @@ static int ad5820_probe(struct i2c_client *client,
+
+ ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
+ if (ret < 0)
+- goto cleanup2;
++ goto clean_mutex;
+
+ ret = v4l2_async_register_subdev(&coil->subdev);
+ if (ret < 0)
+- goto cleanup;
++ goto clean_entity;
+
+ return ret;
+
+-cleanup2:
+- mutex_destroy(&coil->power_lock);
+-cleanup:
++clean_entity:
+ media_entity_cleanup(&coil->subdev.entity);
++clean_mutex:
++ mutex_destroy(&coil->power_lock);
+ return ret;
+ }
+
+diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
+index 02eabe10ab970..00095c7762c24 100644
+--- a/drivers/media/i2c/adv748x/adv748x-afe.c
++++ b/drivers/media/i2c/adv748x/adv748x-afe.c
+@@ -521,6 +521,10 @@ int adv748x_afe_init(struct adv748x_afe *afe)
+ }
+ }
+
++ adv748x_afe_s_input(afe, afe->input);
++
++ adv_dbg(state, "AFE Default input set to %d\n", afe->input);
++
+ /* Entity pads and sinks are 0-indexed to match the pads */
+ for (i = ADV748X_AFE_SINK_AIN0; i <= ADV748X_AFE_SINK_AIN7; i++)
+ afe->pads[i].flags = MEDIA_PAD_FL_SINK;
+diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
+index 0f47ef015a1d3..83a3ee275bbe8 100644
+--- a/drivers/media/i2c/dw9768.c
++++ b/drivers/media/i2c/dw9768.c
+@@ -414,6 +414,7 @@ static int dw9768_probe(struct i2c_client *client)
+ {
+ struct device *dev = &client->dev;
+ struct dw9768 *dw9768;
++ bool full_power;
+ unsigned int i;
+ int ret;
+
+@@ -469,13 +470,23 @@ static int dw9768_probe(struct i2c_client *client)
+
+ dw9768->sd.entity.function = MEDIA_ENT_F_LENS;
+
++ /*
++ * Figure out whether we're going to power up the device here. Generally
++ * this is done if CONFIG_PM is disabled in a DT system or the device is
++ * to be powered on in an ACPI system. Similarly for power off in
++ * remove.
++ */
+ pm_runtime_enable(dev);
+- if (!pm_runtime_enabled(dev)) {
++ full_power = (is_acpi_node(dev_fwnode(dev)) &&
++ acpi_dev_state_d0(dev)) ||
++ (is_of_node(dev_fwnode(dev)) && !pm_runtime_enabled(dev));
++ if (full_power) {
+ ret = dw9768_runtime_resume(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to power on: %d\n", ret);
+ goto err_clean_entity;
+ }
++ pm_runtime_set_active(dev);
+ }
+
+ ret = v4l2_async_register_subdev(&dw9768->sd);
+@@ -484,14 +495,17 @@ static int dw9768_probe(struct i2c_client *client)
+ goto err_power_off;
+ }
+
++ pm_runtime_idle(dev);
++
+ return 0;
+
+ err_power_off:
+- if (pm_runtime_enabled(dev))
+- pm_runtime_disable(dev);
+- else
++ if (full_power) {
+ dw9768_runtime_suspend(dev);
++ pm_runtime_set_suspended(dev);
++ }
+ err_clean_entity:
++ pm_runtime_disable(dev);
+ media_entity_cleanup(&dw9768->sd.entity);
+ err_free_handler:
+ v4l2_ctrl_handler_free(&dw9768->ctrls);
+@@ -503,14 +517,17 @@ static void dw9768_remove(struct i2c_client *client)
+ {
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9768 *dw9768 = sd_to_dw9768(sd);
++ struct device *dev = &client->dev;
+
+ v4l2_async_unregister_subdev(&dw9768->sd);
+ v4l2_ctrl_handler_free(&dw9768->ctrls);
+ media_entity_cleanup(&dw9768->sd.entity);
+- pm_runtime_disable(&client->dev);
+- if (!pm_runtime_status_suspended(&client->dev))
+- dw9768_runtime_suspend(&client->dev);
+- pm_runtime_set_suspended(&client->dev);
++ if ((is_acpi_node(dev_fwnode(dev)) && acpi_dev_state_d0(dev)) ||
++ (is_of_node(dev_fwnode(dev)) && !pm_runtime_enabled(dev))) {
++ dw9768_runtime_suspend(dev);
++ pm_runtime_set_suspended(dev);
++ }
++ pm_runtime_disable(dev);
+ }
+
+ static const struct of_device_id dw9768_of_table[] = {
+diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
+index c5b69823f257e..7c61873b71981 100644
+--- a/drivers/media/i2c/hi846.c
++++ b/drivers/media/i2c/hi846.c
+@@ -2008,22 +2008,24 @@ static int hi846_parse_dt(struct hi846 *hi846, struct device *dev)
+ bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+- v4l2_fwnode_endpoint_free(&bus_cfg);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto check_hwcfg_error;
+ }
+
+ hi846->nr_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequency property not found in DT\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto check_hwcfg_error;
+ }
+
+ /* Check that link frequences for all the modes are in device tree */
+ fq = hi846_check_link_freqs(hi846, &bus_cfg);
+ if (fq) {
+ dev_err(dev, "Link frequency of %lld is not supported\n", fq);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto check_hwcfg_error;
+ }
+
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+@@ -2044,6 +2046,10 @@ static int hi846_parse_dt(struct hi846 *hi846, struct device *dev)
+ }
+
+ return 0;
++
++check_hwcfg_error:
++ v4l2_fwnode_endpoint_free(&bus_cfg);
++ return ret;
+ }
+
+ static int hi846_probe(struct i2c_client *client)
+diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
+index 45f7b5e52bc39..b69db6fc82618 100644
+--- a/drivers/media/i2c/mt9p031.c
++++ b/drivers/media/i2c/mt9p031.c
+@@ -702,7 +702,6 @@ static int mt9p031_init_cfg(struct v4l2_subdev *subdev,
+ V4L2_SUBDEV_FORMAT_TRY;
+
+ crop = __mt9p031_get_pad_crop(mt9p031, sd_state, 0, which);
+- v4l2_subdev_get_try_crop(subdev, sd_state, 0);
+ crop->left = MT9P031_COLUMN_START_DEF;
+ crop->top = MT9P031_ROW_START_DEF;
+ crop->width = MT9P031_WINDOW_WIDTH_DEF;
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 2d740397a5d4d..3f6d715efa823 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -3817,7 +3817,8 @@ static int ov5640_probe(struct i2c_client *client)
+ sensor->current_mode =
+ &ov5640_mode_data[OV5640_MODE_VGA_640_480];
+ sensor->last_mode = sensor->current_mode;
+- sensor->current_link_freq = OV5640_DEFAULT_LINK_FREQ;
++ sensor->current_link_freq =
++ ov5640_csi2_link_freqs[OV5640_DEFAULT_LINK_FREQ];
+
+ sensor->ae_target = 52;
+
+diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
+index 84604ea7bdf9e..17465fcf28e33 100644
+--- a/drivers/media/i2c/ov5648.c
++++ b/drivers/media/i2c/ov5648.c
+@@ -2597,6 +2597,7 @@ static void ov5648_remove(struct i2c_client *client)
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ mutex_destroy(&sensor->mutex);
+ media_entity_cleanup(&subdev->entity);
++ v4l2_fwnode_endpoint_free(&sensor->endpoint);
+ }
+
+ static const struct dev_pm_ops ov5648_pm_ops = {
+diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
+index d5f32e3ff5441..754c8be1b6d8b 100644
+--- a/drivers/media/pci/saa7164/saa7164-core.c
++++ b/drivers/media/pci/saa7164/saa7164-core.c
+@@ -1259,7 +1259,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
+
+ if (saa7164_dev_setup(dev) < 0) {
+ err = -EINVAL;
+- goto fail_free;
++ goto fail_dev;
+ }
+
+ /* print pci info */
+@@ -1427,6 +1427,8 @@ fail_fw:
+
+ fail_irq:
+ saa7164_dev_unregister(dev);
++fail_dev:
++ pci_disable_device(pci_dev);
+ fail_free:
+ v4l2_device_unregister(&dev->v4l2_dev);
+ kfree(dev);
+diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
+index 4a546eeefe38f..6d87fbb0ee04a 100644
+--- a/drivers/media/pci/solo6x10/solo6x10-core.c
++++ b/drivers/media/pci/solo6x10/solo6x10-core.c
+@@ -420,6 +420,7 @@ static int solo_sysfs_init(struct solo_dev *solo_dev)
+ solo_dev->nr_chans);
+
+ if (device_register(dev)) {
++ put_device(dev);
+ dev->parent = NULL;
+ return -ENOMEM;
+ }
+diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
+index feb75dc204de8..b27e6bed85f0f 100644
+--- a/drivers/media/platform/amphion/vdec.c
++++ b/drivers/media/platform/amphion/vdec.c
+@@ -286,6 +286,7 @@ static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+ struct vpu_format *cur_fmt;
+ int i;
+
++ vpu_inst_lock(inst);
+ cur_fmt = vpu_get_format(inst, f->type);
+
+ pixmp->pixelformat = cur_fmt->pixfmt;
+@@ -303,6 +304,7 @@ static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+ f->fmt.pix_mp.xfer_func = vdec->codec_info.transfer_chars;
+ f->fmt.pix_mp.ycbcr_enc = vdec->codec_info.matrix_coeffs;
+ f->fmt.pix_mp.quantization = vdec->codec_info.full_range;
++ vpu_inst_unlock(inst);
+
+ return 0;
+ }
+@@ -753,6 +755,9 @@ static bool vdec_check_source_change(struct vpu_inst *inst)
+ if (!inst->fh.m2m_ctx)
+ return false;
+
++ if (vdec->reset_codec)
++ return false;
++
+ if (!vb2_is_streaming(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx)))
+ return true;
+ fmt = vpu_helper_find_format(inst, inst->cap_format.type, vdec->codec_info.pixfmt);
+@@ -1088,7 +1093,8 @@ static void vdec_event_seq_hdr(struct vpu_inst *inst, struct vpu_dec_codec_info
+ vdec->seq_tag = vdec->codec_info.tag;
+ if (vdec->is_source_changed) {
+ vdec_update_state(inst, VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE, 0);
+- vpu_notify_source_change(inst);
++ vdec->source_change++;
++ vdec_handle_resolution_change(inst);
+ vdec->is_source_changed = false;
+ }
+ }
+@@ -1335,6 +1341,8 @@ static void vdec_abort(struct vpu_inst *inst)
+ vdec->decoded_frame_count,
+ vdec->display_frame_count,
+ vdec->sequence);
++ if (!vdec->seq_hdr_found)
++ vdec->reset_codec = true;
+ vdec->params.end_flag = 0;
+ vdec->drain = 0;
+ vdec->params.frame_count = 0;
+@@ -1342,6 +1350,7 @@ static void vdec_abort(struct vpu_inst *inst)
+ vdec->display_frame_count = 0;
+ vdec->sequence = 0;
+ vdec->aborting = false;
++ inst->extra_size = 0;
+ }
+
+ static void vdec_stop(struct vpu_inst *inst, bool free)
+@@ -1464,8 +1473,7 @@ static int vdec_start_session(struct vpu_inst *inst, u32 type)
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+- if (inst->state == VPU_CODEC_STATE_SEEK)
+- vdec_update_state(inst, vdec->state, 1);
++ vdec_update_state(inst, vdec->state, 1);
+ vdec->eos_received = 0;
+ vpu_process_output_buffer(inst);
+ } else {
+@@ -1629,6 +1637,7 @@ static int vdec_open(struct file *file)
+ return ret;
+
+ vdec->fixed_fmt = false;
++ vdec->state = VPU_CODEC_STATE_ACTIVE;
+ inst->min_buffer_cap = VDEC_MIN_BUFFER_CAP;
+ inst->min_buffer_out = VDEC_MIN_BUFFER_OUT;
+ vdec_init(file);
+diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
+index beac0309ca8d9..048c23c2bf4db 100644
+--- a/drivers/media/platform/amphion/vpu.h
++++ b/drivers/media/platform/amphion/vpu.h
+@@ -13,6 +13,7 @@
+ #include <linux/mailbox_controller.h>
+ #include <linux/kfifo.h>
+
++#define VPU_TIMEOUT_WAKEUP msecs_to_jiffies(200)
+ #define VPU_TIMEOUT msecs_to_jiffies(1000)
+ #define VPU_INST_NULL_ID (-1L)
+ #define VPU_MSG_BUFFER_SIZE (8192)
+diff --git a/drivers/media/platform/amphion/vpu_cmds.c b/drivers/media/platform/amphion/vpu_cmds.c
+index f4d7ca78a6212..fa581ba6bab2d 100644
+--- a/drivers/media/platform/amphion/vpu_cmds.c
++++ b/drivers/media/platform/amphion/vpu_cmds.c
+@@ -269,7 +269,7 @@ exit:
+ return flag;
+ }
+
+-static int sync_session_response(struct vpu_inst *inst, unsigned long key)
++static int sync_session_response(struct vpu_inst *inst, unsigned long key, long timeout, int try)
+ {
+ struct vpu_core *core;
+
+@@ -279,10 +279,12 @@ static int sync_session_response(struct vpu_inst *inst, unsigned long key)
+ core = inst->core;
+
+ call_void_vop(inst, wait_prepare);
+- wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), VPU_TIMEOUT);
++ wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), timeout);
+ call_void_vop(inst, wait_finish);
+
+ if (!check_is_responsed(inst, key)) {
++ if (try)
++ return -EINVAL;
+ dev_err(inst->dev, "[%d] sync session timeout\n", inst->id);
+ set_bit(inst->id, &core->hang_mask);
+ mutex_lock(&inst->core->cmd_lock);
+@@ -294,6 +296,19 @@ static int sync_session_response(struct vpu_inst *inst, unsigned long key)
+ return 0;
+ }
+
++static void vpu_core_keep_active(struct vpu_core *core)
++{
++ struct vpu_rpc_event pkt;
++
++ memset(&pkt, 0, sizeof(pkt));
++ vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_NOOP, NULL);
++
++ dev_dbg(core->dev, "try to wake up\n");
++ mutex_lock(&core->cmd_lock);
++ vpu_cmd_send(core, &pkt);
++ mutex_unlock(&core->cmd_lock);
++}
++
+ static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
+ {
+ unsigned long key;
+@@ -304,9 +319,25 @@ static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
+ return -EINVAL;
+
+ ret = vpu_request_cmd(inst, id, data, &key, &sync);
+- if (!ret && sync)
+- ret = sync_session_response(inst, key);
++ if (ret)
++ goto exit;
++
++ /* workaround for a firmware issue,
++ * firmware should be waked up by start or configure command,
++ * but there is a very small change that firmware failed to wakeup.
++ * in such case, try to wakeup firmware again by sending a noop command
++ */
++ if (sync && (id == VPU_CMD_ID_CONFIGURE_CODEC || id == VPU_CMD_ID_START)) {
++ if (sync_session_response(inst, key, VPU_TIMEOUT_WAKEUP, 1))
++ vpu_core_keep_active(inst->core);
++ else
++ goto exit;
++ }
++
++ if (sync)
++ ret = sync_session_response(inst, key, VPU_TIMEOUT, 0);
+
++exit:
+ if (ret)
+ dev_err(inst->dev, "[%d] send cmd(0x%x) fail\n", inst->id, id);
+
+diff --git a/drivers/media/platform/amphion/vpu_drv.c b/drivers/media/platform/amphion/vpu_drv.c
+index 9d5a5075343d3..f01ce49d27e80 100644
+--- a/drivers/media/platform/amphion/vpu_drv.c
++++ b/drivers/media/platform/amphion/vpu_drv.c
+@@ -245,7 +245,11 @@ static int __init vpu_driver_init(void)
+ if (ret)
+ return ret;
+
+- return vpu_core_driver_init();
++ ret = vpu_core_driver_init();
++ if (ret)
++ platform_driver_unregister(&amphion_vpu_driver);
++
++ return ret;
+ }
+
+ static void __exit vpu_driver_exit(void)
+diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
+index 51e0702f9ae17..9f2890730fd70 100644
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -692,6 +692,7 @@ int vpu_malone_set_decode_params(struct vpu_shared_addr *shared,
+ }
+
+ static struct vpu_pair malone_cmds[] = {
++ {VPU_CMD_ID_NOOP, VID_API_CMD_NULL},
+ {VPU_CMD_ID_START, VID_API_CMD_START},
+ {VPU_CMD_ID_STOP, VID_API_CMD_STOP},
+ {VPU_CMD_ID_ABORT, VID_API_CMD_ABORT},
+diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
+index d8247f36d84ba..92672a802b492 100644
+--- a/drivers/media/platform/amphion/vpu_msgs.c
++++ b/drivers/media/platform/amphion/vpu_msgs.c
+@@ -43,6 +43,7 @@ static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc
+ req_data.ref_frame_num,
+ req_data.act_buf_size,
+ req_data.act_buf_num);
++ vpu_inst_lock(inst);
+ call_void_vop(inst, mem_request,
+ req_data.enc_frame_size,
+ req_data.enc_frame_num,
+@@ -50,6 +51,7 @@ static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc
+ req_data.ref_frame_num,
+ req_data.act_buf_size,
+ req_data.act_buf_num);
++ vpu_inst_unlock(inst);
+ }
+
+ static void vpu_session_handle_stop_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
+index b779e0ba916ca..590d1084e5a5d 100644
+--- a/drivers/media/platform/amphion/vpu_v4l2.c
++++ b/drivers/media/platform/amphion/vpu_v4l2.c
+@@ -65,18 +65,11 @@ unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf)
+
+ void vpu_v4l2_set_error(struct vpu_inst *inst)
+ {
+- struct vb2_queue *src_q;
+- struct vb2_queue *dst_q;
+-
+ vpu_inst_lock(inst);
+ dev_err(inst->dev, "some error occurs in codec\n");
+ if (inst->fh.m2m_ctx) {
+- src_q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
+- dst_q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
+- src_q->error = 1;
+- dst_q->error = 1;
+- wake_up(&src_q->done_wq);
+- wake_up(&dst_q->done_wq);
++ vb2_queue_error(v4l2_m2m_get_src_vq(inst->fh.m2m_ctx));
++ vb2_queue_error(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx));
+ }
+ vpu_inst_unlock(inst);
+ }
+@@ -249,8 +242,12 @@ int vpu_process_capture_buffer(struct vpu_inst *inst)
+
+ struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst)
+ {
+- struct vb2_v4l2_buffer *src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
++ struct vb2_v4l2_buffer *src_buf = NULL;
++
++ if (!inst->fh.m2m_ctx)
++ return NULL;
+
++ src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
+ if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
+ return NULL;
+
+@@ -273,7 +270,7 @@ void vpu_skip_frame(struct vpu_inst *inst, int count)
+ enum vb2_buffer_state state;
+ int i = 0;
+
+- if (count <= 0)
++ if (count <= 0 || !inst->fh.m2m_ctx)
+ return;
+
+ while (i < count) {
+@@ -603,10 +600,6 @@ static int vpu_v4l2_release(struct vpu_inst *inst)
+ inst->workqueue = NULL;
+ }
+
+- if (inst->fh.m2m_ctx) {
+- v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
+- inst->fh.m2m_ctx = NULL;
+- }
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ mutex_destroy(&inst->lock);
+ v4l2_fh_del(&inst->fh);
+@@ -689,6 +682,13 @@ int vpu_v4l2_close(struct file *file)
+
+ vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst);
+
++ vpu_inst_lock(inst);
++ if (inst->fh.m2m_ctx) {
++ v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
++ inst->fh.m2m_ctx = NULL;
++ }
++ vpu_inst_unlock(inst);
++
+ call_void_vop(inst, release);
+ vpu_inst_unregister(inst);
+ vpu_inst_put(inst);
+diff --git a/drivers/media/platform/amphion/vpu_windsor.c b/drivers/media/platform/amphion/vpu_windsor.c
+index 1526af2ef9da4..b93c8cfdee7f5 100644
+--- a/drivers/media/platform/amphion/vpu_windsor.c
++++ b/drivers/media/platform/amphion/vpu_windsor.c
+@@ -658,6 +658,7 @@ int vpu_windsor_get_stream_buffer_size(struct vpu_shared_addr *shared)
+ }
+
+ static struct vpu_pair windsor_cmds[] = {
++ {VPU_CMD_ID_NOOP, GTB_ENC_CMD_NOOP},
+ {VPU_CMD_ID_CONFIGURE_CODEC, GTB_ENC_CMD_CONFIGURE_CODEC},
+ {VPU_CMD_ID_START, GTB_ENC_CMD_STREAM_START},
+ {VPU_CMD_ID_STOP, GTB_ENC_CMD_STREAM_STOP},
+diff --git a/drivers/media/platform/chips-media/coda-bit.c b/drivers/media/platform/chips-media/coda-bit.c
+index 2736a902e3df3..ed47d5bd8d61e 100644
+--- a/drivers/media/platform/chips-media/coda-bit.c
++++ b/drivers/media/platform/chips-media/coda-bit.c
+@@ -854,7 +854,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
+ /* Only H.264BP and H.263P3 are considered */
+ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64);
+ iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64);
+- if (!iram_info->buf_dbk_c_use)
++ if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use)
+ goto out;
+ iram_info->axi_sram_use |= dbk_bits;
+
+@@ -878,7 +878,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
+
+ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128);
+ iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128);
+- if (!iram_info->buf_dbk_c_use)
++ if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use)
+ goto out;
+ iram_info->axi_sram_use |= dbk_bits;
+
+@@ -1084,10 +1084,16 @@ static int coda_start_encoding(struct coda_ctx *ctx)
+ }
+
+ if (dst_fourcc == V4L2_PIX_FMT_JPEG) {
+- if (!ctx->params.jpeg_qmat_tab[0])
++ if (!ctx->params.jpeg_qmat_tab[0]) {
+ ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
+- if (!ctx->params.jpeg_qmat_tab[1])
++ if (!ctx->params.jpeg_qmat_tab[0])
++ return -ENOMEM;
++ }
++ if (!ctx->params.jpeg_qmat_tab[1]) {
+ ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
++ if (!ctx->params.jpeg_qmat_tab[1])
++ return -ENOMEM;
++ }
+ coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
+ }
+
+diff --git a/drivers/media/platform/chips-media/coda-jpeg.c b/drivers/media/platform/chips-media/coda-jpeg.c
+index 435e7030fc2a8..ba8f410029172 100644
+--- a/drivers/media/platform/chips-media/coda-jpeg.c
++++ b/drivers/media/platform/chips-media/coda-jpeg.c
+@@ -1052,10 +1052,16 @@ static int coda9_jpeg_start_encoding(struct coda_ctx *ctx)
+ v4l2_err(&dev->v4l2_dev, "error loading Huffman tables\n");
+ return ret;
+ }
+- if (!ctx->params.jpeg_qmat_tab[0])
++ if (!ctx->params.jpeg_qmat_tab[0]) {
+ ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
+- if (!ctx->params.jpeg_qmat_tab[1])
++ if (!ctx->params.jpeg_qmat_tab[0])
++ return -ENOMEM;
++ }
++ if (!ctx->params.jpeg_qmat_tab[1]) {
+ ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
++ if (!ctx->params.jpeg_qmat_tab[1])
++ return -ENOMEM;
++ }
+ coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
+
+ return 0;
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+index 86c054600a08c..124c1b96e96bd 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+@@ -252,10 +252,9 @@ static int mdp_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
+ dma_addr_t dma_addr;
+
+ pkt->va_base = kzalloc(size, GFP_KERNEL);
+- if (!pkt->va_base) {
+- kfree(pkt);
++ if (!pkt->va_base)
+ return -ENOMEM;
+- }
++
+ pkt->buf_size = size;
+ pkt->cl = (void *)client;
+
+@@ -368,25 +367,30 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+- goto err_cmdq_data;
++ goto err_cancel_job;
+ }
+
+- if (mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K)) {
+- ret = -ENOMEM;
+- goto err_cmdq_data;
+- }
++ ret = mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K);
++ if (ret)
++ goto err_free_cmd;
+
+ comps = kcalloc(param->config->num_components, sizeof(*comps),
+ GFP_KERNEL);
+ if (!comps) {
+ ret = -ENOMEM;
+- goto err_cmdq_data;
++ goto err_destroy_pkt;
+ }
+
+ path = kzalloc(sizeof(*path), GFP_KERNEL);
+ if (!path) {
+ ret = -ENOMEM;
+- goto err_cmdq_data;
++ goto err_free_comps;
++ }
++
++ ret = mtk_mutex_prepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
++ if (ret) {
++ dev_err(dev, "Fail to enable mutex clk\n");
++ goto err_free_path;
+ }
+
+ path->mdp_dev = mdp;
+@@ -406,15 +410,13 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+ ret = mdp_path_ctx_init(mdp, path);
+ if (ret) {
+ dev_err(dev, "mdp_path_ctx_init error\n");
+- goto err_cmdq_data;
++ goto err_free_path;
+ }
+
+- mtk_mutex_prepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+-
+ ret = mdp_path_config(mdp, cmd, path);
+ if (ret) {
+ dev_err(dev, "mdp_path_config error\n");
+- goto err_cmdq_data;
++ goto err_free_path;
+ }
+ cmdq_pkt_finalize(&cmd->pkt);
+
+@@ -431,10 +433,8 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+ cmd->mdp_ctx = param->mdp_ctx;
+
+ ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd->comps, cmd->num_comps);
+- if (ret) {
+- dev_err(dev, "comp %d failed to enable clock!\n", ret);
+- goto err_clock_off;
+- }
++ if (ret)
++ goto err_free_path;
+
+ dma_sync_single_for_device(mdp->cmdq_clt->chan->mbox->dev,
+ cmd->pkt.pa_base, cmd->pkt.cmd_buf_size,
+@@ -450,17 +450,20 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+ return 0;
+
+ err_clock_off:
+- mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+ mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
+ cmd->num_comps);
+-err_cmdq_data:
++err_free_path:
++ mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+ kfree(path);
+- atomic_dec(&mdp->job_count);
+- wake_up(&mdp->callback_wq);
+- if (cmd && cmd->pkt.buf_size > 0)
+- mdp_cmdq_pkt_destroy(&cmd->pkt);
++err_free_comps:
+ kfree(comps);
++err_destroy_pkt:
++ mdp_cmdq_pkt_destroy(&cmd->pkt);
++err_free_cmd:
+ kfree(cmd);
++err_cancel_job:
++ atomic_dec(&mdp->job_count);
++
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(mdp_cmdq_send);
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+index d3eaf8884412d..7bc05f42a23c1 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+@@ -699,12 +699,22 @@ int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
+ dev_err(dev,
+ "Failed to enable clk %d. type:%d id:%d\n",
+ i, comp->type, comp->id);
+- pm_runtime_put(comp->comp_dev);
+- return ret;
++ goto err_revert;
+ }
+ }
+
+ return 0;
++
++err_revert:
++ while (--i >= 0) {
++ if (IS_ERR_OR_NULL(comp->clks[i]))
++ continue;
++ clk_disable_unprepare(comp->clks[i]);
++ }
++ if (comp->comp_dev)
++ pm_runtime_put_sync(comp->comp_dev);
++
++ return ret;
+ }
+
+ void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
+@@ -723,11 +733,13 @@ void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
+
+ int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num)
+ {
+- int i;
++ int i, ret;
+
+- for (i = 0; i < num; i++)
+- if (mdp_comp_clock_on(dev, &comps[i]) != 0)
+- return ++i;
++ for (i = 0; i < num; i++) {
++ ret = mdp_comp_clock_on(dev, &comps[i]);
++ if (ret)
++ return ret;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+index c413e59d42860..2d1f6ae9f0802 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+@@ -196,27 +196,27 @@ static int mdp_probe(struct platform_device *pdev)
+ mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MMSYS);
+ if (!mm_pdev) {
+ ret = -ENODEV;
+- goto err_return;
++ goto err_destroy_device;
+ }
+ mdp->mdp_mmsys = &mm_pdev->dev;
+
+ mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MUTEX);
+ if (WARN_ON(!mm_pdev)) {
+ ret = -ENODEV;
+- goto err_return;
++ goto err_destroy_device;
+ }
+ for (i = 0; i < MDP_PIPE_MAX; i++) {
+ mdp->mdp_mutex[i] = mtk_mutex_get(&mm_pdev->dev);
+ if (!mdp->mdp_mutex[i]) {
+ ret = -ENODEV;
+- goto err_return;
++ goto err_free_mutex;
+ }
+ }
+
+ ret = mdp_comp_config(mdp);
+ if (ret) {
+ dev_err(dev, "Failed to config mdp components\n");
+- goto err_return;
++ goto err_free_mutex;
+ }
+
+ mdp->job_wq = alloc_workqueue(MDP_MODULE_NAME, WQ_FREEZABLE, 0);
+@@ -287,11 +287,12 @@ err_destroy_job_wq:
+ destroy_workqueue(mdp->job_wq);
+ err_deinit_comp:
+ mdp_comp_destroy(mdp);
+-err_return:
++err_free_mutex:
+ for (i = 0; i < MDP_PIPE_MAX; i++)
+- if (mdp)
+- mtk_mutex_put(mdp->mdp_mutex[i]);
++ mtk_mutex_put(mdp->mdp_mutex[i]);
++err_destroy_device:
+ kfree(mdp);
++err_return:
+ dev_dbg(dev, "Errno %d\n", ret);
+ return ret;
+ }
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+index c45bd2599bb2d..ffbcee04dc26f 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+@@ -138,10 +138,13 @@ static void mtk_vdec_stateless_cap_to_disp(struct mtk_vcodec_ctx *ctx, int error
+ state = VB2_BUF_STATE_DONE;
+
+ vb2_dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+- v4l2_m2m_buf_done(vb2_dst, state);
+-
+- mtk_v4l2_debug(2, "free frame buffer id:%d to done list",
+- vb2_dst->vb2_buf.index);
++ if (vb2_dst) {
++ v4l2_m2m_buf_done(vb2_dst, state);
++ mtk_v4l2_debug(2, "free frame buffer id:%d to done list",
++ vb2_dst->vb2_buf.index);
++ } else {
++ mtk_v4l2_err("dst buffer is NULL");
++ }
+
+ if (src_buf_req)
+ v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
+@@ -250,7 +253,7 @@ static void mtk_vdec_worker(struct work_struct *work)
+
+ state = ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE;
+ if (!IS_VDEC_LAT_ARCH(dev->vdec_pdata->hw_arch) ||
+- ctx->current_codec == V4L2_PIX_FMT_VP8_FRAME || ret) {
++ ctx->current_codec == V4L2_PIX_FMT_VP8_FRAME) {
+ v4l2_m2m_buf_done_and_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx, state);
+ if (src_buf_req)
+ v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+index 4cc92700692b3..955b2d0c8f53f 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+@@ -471,14 +471,19 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
+ sizeof(share_info->h264_slice_params));
+
+ fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
+- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+- vdec_fb_va = (unsigned long)fb;
++ if (!fb) {
++ err = -EBUSY;
++ mtk_vcodec_err(inst, "fb buffer is NULL");
++ goto vdec_dec_end;
++ }
+
++ vdec_fb_va = (unsigned long)fb;
++ y_fb_dma = (u64)fb->base_y.dma_addr;
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
+ c_fb_dma =
+ y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
+ else
+- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
++ c_fb_dma = (u64)fb->base_c.dma_addr;
+
+ mtk_vcodec_debug(inst, "[h264-core] y/c addr = 0x%llx 0x%llx", y_fb_dma,
+ c_fb_dma);
+@@ -539,6 +544,29 @@ vdec_dec_end:
+ return 0;
+ }
+
++static void vdec_h264_insert_startcode(struct mtk_vcodec_dev *vcodec_dev, unsigned char *buf,
++ size_t *bs_size, struct mtk_h264_pps_param *pps)
++{
++ struct device *dev = &vcodec_dev->plat_dev->dev;
++
++ /* Need to add pending data at the end of bitstream when bs_sz is small than
++ * 20 bytes for cavlc bitstream, or lat will decode fail. This pending data is
++ * useful for mt8192 and mt8195 platform.
++ *
++ * cavlc bitstream when entropy_coding_mode_flag is false.
++ */
++ if (pps->entropy_coding_mode_flag || *bs_size > 20 ||
++ !(of_device_is_compatible(dev->of_node, "mediatek,mt8192-vcodec-dec") ||
++ of_device_is_compatible(dev->of_node, "mediatek,mt8195-vcodec-dec")))
++ return;
++
++ buf[*bs_size] = 0;
++ buf[*bs_size + 1] = 0;
++ buf[*bs_size + 2] = 1;
++ buf[*bs_size + 3] = 0xff;
++ (*bs_size) += 4;
++}
++
+ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+ {
+@@ -582,9 +610,6 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ }
+
+ inst->vsi->dec.nal_info = buf[nal_start_idx];
+- inst->vsi->dec.bs_buf_addr = (u64)bs->dma_addr;
+- inst->vsi->dec.bs_buf_size = bs->size;
+-
+ lat_buf->src_buf_req = src_buf_info->m2m_buf.vb.vb2_buf.req_obj.req;
+ v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb, &lat_buf->ts_info, true);
+
+@@ -592,6 +617,12 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ if (err)
+ goto err_free_fb_out;
+
++ vdec_h264_insert_startcode(inst->ctx->dev, buf, &bs->size,
++ &share_info->h264_slice_params.pps);
++
++ inst->vsi->dec.bs_buf_addr = (uint64_t)bs->dma_addr;
++ inst->vsi->dec.bs_buf_size = bs->size;
++
+ *res_chg = inst->resolution_changed;
+ if (inst->resolution_changed) {
+ mtk_vcodec_debug(inst, "- resolution changed -");
+@@ -630,7 +661,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ err = vpu_dec_start(vpu, data, 2);
+ if (err) {
+ mtk_vcodec_debug(inst, "lat decode err: %d", err);
+- goto err_scp_decode;
++ goto err_free_fb_out;
+ }
+
+ share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
+@@ -647,12 +678,17 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ /* wait decoder done interrupt */
+ timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0);
++ if (timeout)
++ mtk_vcodec_err(inst, "lat decode timeout: pic_%d", inst->slice_dec_num);
+ inst->vsi->dec.timeout = !!timeout;
+
+ err = vpu_dec_end(vpu);
+- if (err == SLICE_HEADER_FULL || timeout || err == TRANS_BUFFER_FULL) {
+- err = -EINVAL;
+- goto err_scp_decode;
++ if (err == SLICE_HEADER_FULL || err == TRANS_BUFFER_FULL) {
++ if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
++ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
++ inst->slice_dec_num++;
++ mtk_vcodec_err(inst, "lat dec fail: pic_%d err:%d", inst->slice_dec_num, err);
++ return -EINVAL;
+ }
+
+ share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
+@@ -669,10 +705,6 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+
+ inst->slice_dec_num++;
+ return 0;
+-
+-err_scp_decode:
+- if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
+- vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
+ err_free_fb_out:
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
+ mtk_vcodec_err(inst, "slice dec number: %d err: %d", inst->slice_dec_num, err);
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+index fb1c36a3592d1..cbb6728b8a40b 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+@@ -2073,21 +2073,23 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ return -EBUSY;
+ }
+ pfc = (struct vdec_vp9_slice_pfc *)lat_buf->private_data;
+- if (!pfc)
+- return -EINVAL;
++ if (!pfc) {
++ ret = -EINVAL;
++ goto err_free_fb_out;
++ }
+ vsi = &pfc->vsi;
+
+ ret = vdec_vp9_slice_setup_lat(instance, bs, lat_buf, pfc);
+ if (ret) {
+ mtk_vcodec_err(instance, "Failed to setup VP9 lat ret %d\n", ret);
+- return ret;
++ goto err_free_fb_out;
+ }
+ vdec_vp9_slice_vsi_to_remote(vsi, instance->vsi);
+
+ ret = vpu_dec_start(&instance->vpu, NULL, 0);
+ if (ret) {
+ mtk_vcodec_err(instance, "Failed to dec VP9 ret %d\n", ret);
+- return ret;
++ goto err_free_fb_out;
+ }
+
+ if (instance->irq) {
+@@ -2107,7 +2109,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ /* LAT trans full, no more UBE or decode timeout */
+ if (ret) {
+ mtk_vcodec_err(instance, "VP9 decode error: %d\n", ret);
+- return ret;
++ goto err_free_fb_out;
+ }
+
+ mtk_vcodec_debug(instance, "lat dma addr: 0x%lx 0x%lx\n",
+@@ -2120,6 +2122,9 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ vdec_msg_queue_qbuf(&ctx->dev->msg_queue_core_ctx, lat_buf);
+
+ return 0;
++err_free_fb_out:
++ vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
++ return ret;
+ }
+
+ static int vdec_vp9_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+index ae500980ad45c..dc2004790a472 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+@@ -221,7 +221,7 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
+ mtk_vcodec_dec_disable_hardware(ctx, MTK_VDEC_CORE);
+ vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
+
+- if (!list_empty(&ctx->msg_queue.lat_ctx.ready_queue)) {
++ if (!list_empty(&dev->msg_queue_core_ctx.ready_queue)) {
+ mtk_v4l2_debug(3, "re-schedule to decode for core: %d",
+ dev->msg_queue_core_ctx.ready_num);
+ queue_work(dev->core_workqueue, &msg_queue->core_work);
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
+index 9418fcf740a82..ef28122a5ed49 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
+@@ -76,12 +76,14 @@ void print_wrapper_info(struct device *dev, void __iomem *reg)
+
+ void mxc_jpeg_enable_irq(void __iomem *reg, int slot)
+ {
+- writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN));
++ writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS));
++ writel(0xF0C, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN));
+ }
+
+ void mxc_jpeg_disable_irq(void __iomem *reg, int slot)
+ {
+ writel(0x0, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN));
++ writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS));
+ }
+
+ void mxc_jpeg_sw_reset(void __iomem *reg)
+diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
+index 81fb3a5bc1d51..41deda232e4a1 100644
+--- a/drivers/media/platform/qcom/camss/camss-video.c
++++ b/drivers/media/platform/qcom/camss/camss-video.c
+@@ -495,7 +495,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
+
+ ret = video_device_pipeline_start(vdev, &video->pipe);
+ if (ret < 0)
+- return ret;
++ goto flush_buffers;
+
+ ret = video_check_format(video);
+ if (ret < 0)
+@@ -524,6 +524,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
+ error:
+ video_device_pipeline_stop(vdev);
+
++flush_buffers:
+ video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 1118c40886d52..a157cac72e0ab 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1465,6 +1465,14 @@ static int camss_configure_pd(struct camss *camss)
+ return camss->genpd_num;
+ }
+
++ /*
++ * If a platform device has just one power domain, then it is attached
++ * at platform_probe() level, thus there shall be no need and even no
++ * option to attach it again, this is the case for CAMSS on MSM8916.
++ */
++ if (camss->genpd_num == 1)
++ return 0;
++
+ camss->genpd = devm_kmalloc_array(dev, camss->genpd_num,
+ sizeof(*camss->genpd), GFP_KERNEL);
+ if (!camss->genpd)
+@@ -1698,6 +1706,9 @@ void camss_delete(struct camss *camss)
+
+ pm_runtime_disable(camss->dev);
+
++ if (camss->genpd_num == 1)
++ return;
++
+ for (i = 0; i < camss->genpd_num; i++) {
+ device_link_del(camss->genpd_link[i]);
+ dev_pm_domain_detach(camss->genpd[i], true);
+diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
+index c93d2906e4c7d..48c9084bb4dba 100644
+--- a/drivers/media/platform/qcom/venus/pm_helpers.c
++++ b/drivers/media/platform/qcom/venus/pm_helpers.c
+@@ -869,8 +869,8 @@ static int vcodec_domains_get(struct venus_core *core)
+ for (i = 0; i < res->vcodec_pmdomains_num; i++) {
+ pd = dev_pm_domain_attach_by_name(dev,
+ res->vcodec_pmdomains[i]);
+- if (IS_ERR(pd))
+- return PTR_ERR(pd);
++ if (IS_ERR_OR_NULL(pd))
++ return PTR_ERR(pd) ? : -ENODATA;
+ core->pmdomains[i] = pd;
+ }
+
+diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.c b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
+index 91cc8d58a663b..1791100b69353 100644
+--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.c
++++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
+@@ -1173,7 +1173,7 @@ int __init fimc_register_driver(void)
+ return platform_driver_register(&fimc_driver);
+ }
+
+-void __exit fimc_unregister_driver(void)
++void fimc_unregister_driver(void)
+ {
+ platform_driver_unregister(&fimc_driver);
+ }
+diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.c b/drivers/media/platform/samsung/exynos4-is/media-dev.c
+index 52b43ea040302..2f3071acb9c97 100644
+--- a/drivers/media/platform/samsung/exynos4-is/media-dev.c
++++ b/drivers/media/platform/samsung/exynos4-is/media-dev.c
+@@ -1380,9 +1380,7 @@ static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
+
+ /* Find platform data for this sensor subdev */
+ for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++)
+- if (fmd->sensor[i].asd &&
+- fmd->sensor[i].asd->match.fwnode ==
+- of_fwnode_handle(subdev->dev->of_node))
++ if (fmd->sensor[i].asd == asd)
+ si = &fmd->sensor[i];
+
+ if (si == NULL)
+@@ -1474,7 +1472,7 @@ static int fimc_md_probe(struct platform_device *pdev)
+ pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(pinctrl)) {
+ ret = PTR_ERR(pinctrl);
+- if (ret != EPROBE_DEFER)
++ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get pinctrl: %d\n", ret);
+ goto err_clk;
+ }
+@@ -1586,7 +1584,11 @@ static int __init fimc_md_init(void)
+ if (ret)
+ return ret;
+
+- return platform_driver_register(&fimc_md_driver);
++ ret = platform_driver_register(&fimc_md_driver);
++ if (ret)
++ fimc_unregister_driver();
++
++ return ret;
+ }
+
+ static void __exit fimc_md_exit(void)
+diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+index fca5c6405eec3..007c7dbee0377 100644
+--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+@@ -1576,8 +1576,18 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = {
+ .port_num = MFC_NUM_PORTS_V7,
+ .buf_size = &buf_size_v7,
+ .fw_name[0] = "s5p-mfc-v7.fw",
+- .clk_names = {"mfc", "sclk_mfc"},
+- .num_clocks = 2,
++ .clk_names = {"mfc"},
++ .num_clocks = 1,
++};
++
++static struct s5p_mfc_variant mfc_drvdata_v7_3250 = {
++ .version = MFC_VERSION_V7,
++ .version_bit = MFC_V7_BIT,
++ .port_num = MFC_NUM_PORTS_V7,
++ .buf_size = &buf_size_v7,
++ .fw_name[0] = "s5p-mfc-v7.fw",
++ .clk_names = {"mfc", "sclk_mfc"},
++ .num_clocks = 2,
+ };
+
+ static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
+@@ -1647,6 +1657,9 @@ static const struct of_device_id exynos_mfc_match[] = {
+ }, {
+ .compatible = "samsung,mfc-v7",
+ .data = &mfc_drvdata_v7,
++ }, {
++ .compatible = "samsung,exynos3250-mfc",
++ .data = &mfc_drvdata_v7_3250,
+ }, {
+ .compatible = "samsung,mfc-v8",
+ .data = &mfc_drvdata_v8,
+diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+index cefe6b7bfdc4e..1dbb89f0ddb8c 100644
+--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
++++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+@@ -925,6 +925,7 @@ static int configure_channels(struct c8sectpfei *fei)
+ if (ret) {
+ dev_err(fei->dev,
+ "configure_memdma_and_inputblock failed\n");
++ of_node_put(child);
+ goto err_unmap;
+ }
+ index++;
+diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
+index 30d6c0c5161f4..484ac5f054d53 100644
+--- a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
++++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
+@@ -498,6 +498,7 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
+ struct v4l2_async_notifier *notifier = &bridge->notifier;
+ struct media_pad *pads = bridge->pads;
+ struct device *dev = csi2_dev->dev;
++ bool notifier_registered = false;
+ int ret;
+
+ mutex_init(&bridge->lock);
+@@ -519,8 +520,10 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
+
+ /* Media Pads */
+
+- pads[SUN6I_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+- pads[SUN6I_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
++ pads[SUN6I_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
++ MEDIA_PAD_FL_MUST_CONNECT;
++ pads[SUN6I_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE |
++ MEDIA_PAD_FL_MUST_CONNECT;
+
+ ret = media_entity_pads_init(&subdev->entity, SUN6I_MIPI_CSI2_PAD_COUNT,
+ pads);
+@@ -533,12 +536,17 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
+ notifier->ops = &sun6i_mipi_csi2_notifier_ops;
+
+ ret = sun6i_mipi_csi2_bridge_source_setup(csi2_dev);
+- if (ret)
++ if (ret && ret != -ENODEV)
+ goto error_v4l2_notifier_cleanup;
+
+- ret = v4l2_async_subdev_nf_register(subdev, notifier);
+- if (ret < 0)
+- goto error_v4l2_notifier_cleanup;
++ /* Only register the notifier when a sensor is connected. */
++ if (ret != -ENODEV) {
++ ret = v4l2_async_subdev_nf_register(subdev, notifier);
++ if (ret < 0)
++ goto error_v4l2_notifier_cleanup;
++
++ notifier_registered = true;
++ }
+
+ /* V4L2 Subdev */
+
+@@ -549,7 +557,8 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
+ return 0;
+
+ error_v4l2_notifier_unregister:
+- v4l2_async_nf_unregister(notifier);
++ if (notifier_registered)
++ v4l2_async_nf_unregister(notifier);
+
+ error_v4l2_notifier_cleanup:
+ v4l2_async_nf_cleanup(notifier);
+diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
+index b032ec13a683a..d993c09a48202 100644
+--- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
++++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
+@@ -536,6 +536,7 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+ struct v4l2_async_notifier *notifier = &bridge->notifier;
+ struct media_pad *pads = bridge->pads;
+ struct device *dev = csi2_dev->dev;
++ bool notifier_registered = false;
+ int ret;
+
+ mutex_init(&bridge->lock);
+@@ -557,8 +558,10 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+
+ /* Media Pads */
+
+- pads[SUN8I_A83T_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+- pads[SUN8I_A83T_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
++ pads[SUN8I_A83T_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
++ MEDIA_PAD_FL_MUST_CONNECT;
++ pads[SUN8I_A83T_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE |
++ MEDIA_PAD_FL_MUST_CONNECT;
+
+ ret = media_entity_pads_init(&subdev->entity,
+ SUN8I_A83T_MIPI_CSI2_PAD_COUNT, pads);
+@@ -571,12 +574,17 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+ notifier->ops = &sun8i_a83t_mipi_csi2_notifier_ops;
+
+ ret = sun8i_a83t_mipi_csi2_bridge_source_setup(csi2_dev);
+- if (ret)
++ if (ret && ret != -ENODEV)
+ goto error_v4l2_notifier_cleanup;
+
+- ret = v4l2_async_subdev_nf_register(subdev, notifier);
+- if (ret < 0)
+- goto error_v4l2_notifier_cleanup;
++ /* Only register the notifier when a sensor is connected. */
++ if (ret != -ENODEV) {
++ ret = v4l2_async_subdev_nf_register(subdev, notifier);
++ if (ret < 0)
++ goto error_v4l2_notifier_cleanup;
++
++ notifier_registered = true;
++ }
+
+ /* V4L2 Subdev */
+
+@@ -587,7 +595,8 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+ return 0;
+
+ error_v4l2_notifier_unregister:
+- v4l2_async_nf_unregister(notifier);
++ if (notifier_registered)
++ v4l2_async_nf_unregister(notifier);
+
+ error_v4l2_notifier_cleanup:
+ v4l2_async_nf_cleanup(notifier);
+diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
+index 6b2768623c883..aa7a580dbecc0 100644
+--- a/drivers/media/radio/si470x/radio-si470x-usb.c
++++ b/drivers/media/radio/si470x/radio-si470x-usb.c
+@@ -727,8 +727,10 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
+
+ /* start radio */
+ retval = si470x_start_usb(radio);
+- if (retval < 0)
++ if (retval < 0 && !radio->int_in_running)
+ goto err_buf;
++ else if (retval < 0) /* in case of radio->int_in_running == 1 */
++ goto err_all;
+
+ /* set initial frequency */
+ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 5edfd8a9e8494..74546f7e34691 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -646,15 +646,14 @@ static int send_packet(struct imon_context *ictx)
+ pr_err_ratelimited("error submitting urb(%d)\n", retval);
+ } else {
+ /* Wait for transmission to complete (or abort) */
+- mutex_unlock(&ictx->lock);
+ retval = wait_for_completion_interruptible(
+ &ictx->tx.finished);
+ if (retval) {
+ usb_kill_urb(ictx->tx_urb);
+ pr_err_ratelimited("task interrupted\n");
+ }
+- mutex_lock(&ictx->lock);
+
++ ictx->tx.busy = false;
+ retval = ictx->tx.status;
+ if (retval)
+ pr_err_ratelimited("packet tx failed (%d)\n", retval);
+@@ -953,7 +952,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
+ if (ictx->disconnected)
+ return -ENODEV;
+
+- mutex_lock(&ictx->lock);
++ if (mutex_lock_interruptible(&ictx->lock))
++ return -ERESTARTSYS;
+
+ if (!ictx->dev_present_intf0) {
+ pr_err_ratelimited("no iMON device present\n");
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+index 82620613d56b8..dff7265a42ca2 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+@@ -459,26 +459,20 @@ fail_dmx_conn:
+ for (j = j - 1; j >= 0; --j)
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx,
+ &dvb->dmx_fe[j]);
+-fail_dmx_dev:
+ dvb_dmxdev_release(&dvb->dmx_dev);
+-fail_dmx:
++fail_dmx_dev:
+ dvb_dmx_release(&dvb->demux);
++fail_dmx:
++fail_demod_probe:
++ for (i = i - 1; i >= 0; --i) {
++ dvb_unregister_frontend(dvb->fe[i]);
+ fail_fe:
+- for (j = i; j >= 0; --j)
+- dvb_unregister_frontend(dvb->fe[j]);
++ dvb_module_release(dvb->i2c_client_tuner[i]);
+ fail_tuner_probe:
+- for (j = i; j >= 0; --j)
+- if (dvb->i2c_client_tuner[j])
+- dvb_module_release(dvb->i2c_client_tuner[j]);
+-
+-fail_demod_probe:
+- for (j = i; j >= 0; --j)
+- if (dvb->i2c_client_demod[j])
+- dvb_module_release(dvb->i2c_client_demod[j]);
+-
++ dvb_module_release(dvb->i2c_client_demod[i]);
++ }
+ fail_adapter:
+ dvb_unregister_adapter(&dvb->adapter);
+-
+ fail_i2c:
+ i2c_del_adapter(&dvb->i2c_adapter);
+
+diff --git a/drivers/media/test-drivers/vimc/vimc-core.c b/drivers/media/test-drivers/vimc/vimc-core.c
+index 2ae7a0f11ebfc..e82cfa5ffbf47 100644
+--- a/drivers/media/test-drivers/vimc/vimc-core.c
++++ b/drivers/media/test-drivers/vimc/vimc-core.c
+@@ -433,7 +433,7 @@ static int __init vimc_init(void)
+ if (ret) {
+ dev_err(&vimc_pdev.dev,
+ "platform driver registration failed (err=%d)\n", ret);
+- platform_driver_unregister(&vimc_pdrv);
++ platform_device_unregister(&vimc_pdev);
+ return ret;
+ }
+
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index 11620eaf941e3..c0999581c599b 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -973,6 +973,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
+ if (dev->has_compose_cap) {
+ v4l2_rect_set_min_size(compose, &min_rect);
+ v4l2_rect_set_max_size(compose, &max_rect);
++ v4l2_rect_map_inside(compose, &fmt);
+ }
+ dev->fmt_cap_rect = fmt;
+ tpg_s_buf_height(&dev->tpg, fmt.height);
+diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
+index cf15988dfb510..7d78ee09be5e1 100644
+--- a/drivers/media/usb/dvb-usb/az6027.c
++++ b/drivers/media/usb/dvb-usb/az6027.c
+@@ -975,6 +975,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
+ if (msg[i].addr == 0x99) {
+ req = 0xBE;
+ index = 0;
++ if (msg[i].len < 1) {
++ i = -EOPNOTSUPP;
++ break;
++ }
+ value = msg[i].buf[0] & 0x00ff;
+ length = 1;
+ az6027_usb_out_op(d, req, value, index, data, length);
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+index 61439c8f33cab..58eea8ab54779 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+@@ -81,7 +81,7 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+
+ ret = dvb_usb_adapter_stream_init(adap);
+ if (ret)
+- return ret;
++ goto stream_init_err;
+
+ ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
+ if (ret)
+@@ -114,6 +114,8 @@ frontend_init_err:
+ dvb_usb_adapter_dvb_exit(adap);
+ dvb_init_err:
+ dvb_usb_adapter_stream_exit(adap);
++stream_init_err:
++ kfree(adap->priv);
+ return ret;
+ }
+
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
+index d0a3aa3806fbd..3d3b6dc24ca63 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
+@@ -150,6 +150,7 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+ * then return an error.
+ */
+ if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last)
++ ctrl->is_new = 1;
+ return -ERANGE;
+ }
+ return ret;
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
+index 0dab1d7b90f0e..29169170880a6 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
+@@ -1827,7 +1827,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len);
+
+- if ((!qmenu && !qmenu_int) || (qmenu_int && max > qmenu_int_len)) {
++ if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
+index fddba75d90745..6876ec25bc512 100644
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -1347,23 +1347,23 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
+ case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break;
+ case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
+ case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break;
+- case V4L2_PIX_FMT_NV12: descr = "Y/CbCr 4:2:0"; break;
+- case V4L2_PIX_FMT_NV21: descr = "Y/CrCb 4:2:0"; break;
+- case V4L2_PIX_FMT_NV16: descr = "Y/CbCr 4:2:2"; break;
+- case V4L2_PIX_FMT_NV61: descr = "Y/CrCb 4:2:2"; break;
+- case V4L2_PIX_FMT_NV24: descr = "Y/CbCr 4:4:4"; break;
+- case V4L2_PIX_FMT_NV42: descr = "Y/CrCb 4:4:4"; break;
+- case V4L2_PIX_FMT_P010: descr = "10-bit Y/CbCr 4:2:0"; break;
+- case V4L2_PIX_FMT_NV12_4L4: descr = "Y/CbCr 4:2:0 (4x4 Linear)"; break;
+- case V4L2_PIX_FMT_NV12_16L16: descr = "Y/CbCr 4:2:0 (16x16 Linear)"; break;
+- case V4L2_PIX_FMT_NV12_32L32: descr = "Y/CbCr 4:2:0 (32x32 Linear)"; break;
+- case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/CbCr 4:2:0 (4x4 Linear)"; break;
+- case V4L2_PIX_FMT_NV12M: descr = "Y/CbCr 4:2:0 (N-C)"; break;
+- case V4L2_PIX_FMT_NV21M: descr = "Y/CrCb 4:2:0 (N-C)"; break;
+- case V4L2_PIX_FMT_NV16M: descr = "Y/CbCr 4:2:2 (N-C)"; break;
+- case V4L2_PIX_FMT_NV61M: descr = "Y/CrCb 4:2:2 (N-C)"; break;
+- case V4L2_PIX_FMT_NV12MT: descr = "Y/CbCr 4:2:0 (64x32 MB, N-C)"; break;
+- case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/CbCr 4:2:0 (16x16 MB, N-C)"; break;
++ case V4L2_PIX_FMT_NV12: descr = "Y/UV 4:2:0"; break;
++ case V4L2_PIX_FMT_NV21: descr = "Y/VU 4:2:0"; break;
++ case V4L2_PIX_FMT_NV16: descr = "Y/UV 4:2:2"; break;
++ case V4L2_PIX_FMT_NV61: descr = "Y/VU 4:2:2"; break;
++ case V4L2_PIX_FMT_NV24: descr = "Y/UV 4:4:4"; break;
++ case V4L2_PIX_FMT_NV42: descr = "Y/VU 4:4:4"; break;
++ case V4L2_PIX_FMT_P010: descr = "10-bit Y/UV 4:2:0"; break;
++ case V4L2_PIX_FMT_NV12_4L4: descr = "Y/UV 4:2:0 (4x4 Linear)"; break;
++ case V4L2_PIX_FMT_NV12_16L16: descr = "Y/UV 4:2:0 (16x16 Linear)"; break;
++ case V4L2_PIX_FMT_NV12_32L32: descr = "Y/UV 4:2:0 (32x32 Linear)"; break;
++ case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break;
++ case V4L2_PIX_FMT_NV12M: descr = "Y/UV 4:2:0 (N-C)"; break;
++ case V4L2_PIX_FMT_NV21M: descr = "Y/VU 4:2:0 (N-C)"; break;
++ case V4L2_PIX_FMT_NV16M: descr = "Y/UV 4:2:2 (N-C)"; break;
++ case V4L2_PIX_FMT_NV61M: descr = "Y/VU 4:2:2 (N-C)"; break;
++ case V4L2_PIX_FMT_NV12MT: descr = "Y/UV 4:2:0 (64x32 MB, N-C)"; break;
++ case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/UV 4:2:0 (16x16 MB, N-C)"; break;
+ case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break;
+diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
+index 52312ce2ba056..f2c4393595574 100644
+--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
++++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
+@@ -36,12 +36,11 @@ struct videobuf_dma_contig_memory {
+
+ static int __videobuf_dc_alloc(struct device *dev,
+ struct videobuf_dma_contig_memory *mem,
+- unsigned long size, gfp_t flags)
++ unsigned long size)
+ {
+ mem->size = size;
+- mem->vaddr = dma_alloc_coherent(dev, mem->size,
+- &mem->dma_handle, flags);
+-
++ mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle,
++ GFP_KERNEL);
+ if (!mem->vaddr) {
+ dev_err(dev, "memory alloc size %ld failed\n", mem->size);
+ return -ENOMEM;
+@@ -258,8 +257,7 @@ static int __videobuf_iolock(struct videobuf_queue *q,
+ return videobuf_dma_contig_user_get(mem, vb);
+
+ /* allocate memory for the read() method */
+- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
+- GFP_KERNEL))
++ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size)))
+ return -ENOMEM;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+@@ -295,22 +293,18 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
+- GFP_KERNEL | __GFP_COMP))
++ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize)))
+ goto error;
+
+- /* Try to remap memory */
+- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-
+ /* the "vm_pgoff" is just used in v4l2 to find the
+ * corresponding buffer data structure which is allocated
+ * earlier and it does not mean the offset from the physical
+ * buffer start address as usual. So set it to 0 to pass
+- * the sanity check in vm_iomap_memory().
++ * the sanity check in dma_mmap_coherent().
+ */
+ vma->vm_pgoff = 0;
+-
+- retval = vm_iomap_memory(vma, mem->dma_handle, mem->size);
++ retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle,
++ mem->size);
+ if (retval) {
+ dev_err(q->dev, "mmap: remap failed with error %d. ",
+ retval);
+diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
+index 4316988d791a5..61c288d403750 100644
+--- a/drivers/memory/renesas-rpc-if.c
++++ b/drivers/memory/renesas-rpc-if.c
+@@ -317,6 +317,9 @@ int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_PHYMEM_MASK,
+ RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0));
+
++ /* DMA Transfer is not supported */
++ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_HS, 0);
++
+ if (rpc->type == RPCIF_RCAR_GEN3)
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
+ RPCIF_PHYCNT_STRTIM(7), RPCIF_PHYCNT_STRTIM(7));
+diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
+index ba84145195158..04115cd92433b 100644
+--- a/drivers/memstick/core/ms_block.c
++++ b/drivers/memstick/core/ms_block.c
+@@ -2116,6 +2116,11 @@ static int msb_init_disk(struct memstick_dev *card)
+ dbg("Set total disk size to %lu sectors", capacity);
+
+ msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
++ if (!msb->io_queue) {
++ rc = -ENOMEM;
++ goto out_cleanup_disk;
++ }
++
+ INIT_WORK(&msb->io_work, msb_io_work);
+ sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+@@ -2125,10 +2130,12 @@ static int msb_init_disk(struct memstick_dev *card)
+ msb_start(card);
+ rc = device_add_disk(&card->dev, msb->disk, NULL);
+ if (rc)
+- goto out_cleanup_disk;
++ goto out_destroy_workqueue;
+ dbg("Disk added");
+ return 0;
+
++out_destroy_workqueue:
++ destroy_workqueue(msb->io_queue);
+ out_cleanup_disk:
+ put_disk(msb->disk);
+ out_free_tag_set:
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 8b93856de432a..9940e2724c05d 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -2027,6 +2027,7 @@ config MFD_ROHM_BD957XMUF
+ depends on I2C=y
+ depends on OF
+ select REGMAP_I2C
++ select REGMAP_IRQ
+ select MFD_CORE
+ help
+ Select this option to get support for the ROHM BD9576MUF and
+diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
+index 88a212a8168cf..880c41fa7021b 100644
+--- a/drivers/mfd/axp20x.c
++++ b/drivers/mfd/axp20x.c
+@@ -842,7 +842,7 @@ static void axp20x_power_off(void)
+ AXP20X_OFF);
+
+ /* Give capacitors etc. time to drain to avoid kernel panic msg. */
+- msleep(500);
++ mdelay(500);
+ }
+
+ int axp20x_match_device(struct axp20x_dev *axp20x)
+diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
+index 6eaa6775b8885..d3b32eb798377 100644
+--- a/drivers/mfd/mt6360-core.c
++++ b/drivers/mfd/mt6360-core.c
+@@ -402,7 +402,7 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
+ struct mt6360_ddata *ddata = context;
+ u8 bank = *(u8 *)reg;
+ u8 reg_addr = *(u8 *)(reg + 1);
+- struct i2c_client *i2c = ddata->i2c[bank];
++ struct i2c_client *i2c;
+ bool crc_needed = false;
+ u8 *buf;
+ int buf_len = MT6360_ALLOC_READ_SIZE(val_size);
+@@ -410,6 +410,11 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
+ u8 crc;
+ int ret;
+
++ if (bank >= MT6360_SLAVE_MAX)
++ return -EINVAL;
++
++ i2c = ddata->i2c[bank];
++
+ if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) {
+ crc_needed = true;
+ ret = mt6360_xlate_pmicldo_addr(&reg_addr, val_size);
+@@ -453,13 +458,18 @@ static int mt6360_regmap_write(void *context, const void *val, size_t val_size)
+ struct mt6360_ddata *ddata = context;
+ u8 bank = *(u8 *)val;
+ u8 reg_addr = *(u8 *)(val + 1);
+- struct i2c_client *i2c = ddata->i2c[bank];
++ struct i2c_client *i2c;
+ bool crc_needed = false;
+ u8 *buf;
+ int buf_len = MT6360_ALLOC_WRITE_SIZE(val_size);
+ int write_size = val_size - MT6360_REGMAP_REG_BYTE_SIZE;
+ int ret;
+
++ if (bank >= MT6360_SLAVE_MAX)
++ return -EINVAL;
++
++ i2c = ddata->i2c[bank];
++
+ if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) {
+ crc_needed = true;
+ ret = mt6360_xlate_pmicldo_addr(&reg_addr, val_size - MT6360_REGMAP_REG_BYTE_SIZE);
+diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
+index 4b8ff947762f2..9f3c4a01b4c1c 100644
+--- a/drivers/mfd/qcom-pm8008.c
++++ b/drivers/mfd/qcom-pm8008.c
+@@ -215,8 +215,8 @@ static int pm8008_probe(struct i2c_client *client)
+
+ dev = &client->dev;
+ regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
+- if (!regmap)
+- return -ENODEV;
++ if (IS_ERR(regmap))
++ return PTR_ERR(regmap);
+
+ i2c_set_clientdata(client, regmap);
+
+diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
+index 71bc34b74bc9c..8fea0e511550a 100644
+--- a/drivers/mfd/qcom_rpm.c
++++ b/drivers/mfd/qcom_rpm.c
+@@ -547,7 +547,7 @@ static int qcom_rpm_probe(struct platform_device *pdev)
+ init_completion(&rpm->ack);
+
+ /* Enable message RAM clock */
+- rpm->ramclk = devm_clk_get(&pdev->dev, "ram");
++ rpm->ramclk = devm_clk_get_enabled(&pdev->dev, "ram");
+ if (IS_ERR(rpm->ramclk)) {
+ ret = PTR_ERR(rpm->ramclk);
+ if (ret == -EPROBE_DEFER)
+@@ -558,7 +558,6 @@ static int qcom_rpm_probe(struct platform_device *pdev)
+ */
+ rpm->ramclk = NULL;
+ }
+- clk_prepare_enable(rpm->ramclk); /* Accepts NULL */
+
+ irq_ack = platform_get_irq_byname(pdev, "ack");
+ if (irq_ack < 0)
+@@ -673,22 +672,11 @@ static int qcom_rpm_probe(struct platform_device *pdev)
+ if (ret)
+ dev_warn(&pdev->dev, "failed to mark wakeup irq as wakeup\n");
+
+- return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+-}
+-
+-static int qcom_rpm_remove(struct platform_device *pdev)
+-{
+- struct qcom_rpm *rpm = dev_get_drvdata(&pdev->dev);
+-
+- of_platform_depopulate(&pdev->dev);
+- clk_disable_unprepare(rpm->ramclk);
+-
+- return 0;
++ return devm_of_platform_populate(&pdev->dev);
+ }
+
+ static struct platform_driver qcom_rpm_driver = {
+ .probe = qcom_rpm_probe,
+- .remove = qcom_rpm_remove,
+ .driver = {
+ .name = "qcom_rpm",
+ .of_match_table = qcom_rpm_of_match,
+diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
+index 375f692ae9d68..fb95a2d5cef48 100644
+--- a/drivers/misc/cxl/guest.c
++++ b/drivers/misc/cxl/guest.c
+@@ -965,10 +965,10 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
+ * if it returns an error!
+ */
+ if ((rc = cxl_register_afu(afu)))
+- goto err_put1;
++ goto err_put_dev;
+
+ if ((rc = cxl_sysfs_afu_add(afu)))
+- goto err_put1;
++ goto err_del_dev;
+
+ /*
+ * pHyp doesn't expose the programming models supported by the
+@@ -984,7 +984,7 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
+ afu->modes_supported = CXL_MODE_DIRECTED;
+
+ if ((rc = cxl_afu_select_best_mode(afu)))
+- goto err_put2;
++ goto err_remove_sysfs;
+
+ adapter->afu[afu->slice] = afu;
+
+@@ -1004,10 +1004,12 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
+
+ return 0;
+
+-err_put2:
++err_remove_sysfs:
+ cxl_sysfs_afu_remove(afu);
+-err_put1:
+- device_unregister(&afu->dev);
++err_del_dev:
++ device_del(&afu->dev);
++err_put_dev:
++ put_device(&afu->dev);
+ free = false;
+ guest_release_serr_irq(afu);
+ err2:
+@@ -1141,18 +1143,20 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
+ * even if it returns an error!
+ */
+ if ((rc = cxl_register_adapter(adapter)))
+- goto err_put1;
++ goto err_put_dev;
+
+ if ((rc = cxl_sysfs_adapter_add(adapter)))
+- goto err_put1;
++ goto err_del_dev;
+
+ /* release the context lock as the adapter is configured */
+ cxl_adapter_context_unlock(adapter);
+
+ return adapter;
+
+-err_put1:
+- device_unregister(&adapter->dev);
++err_del_dev:
++ device_del(&adapter->dev);
++err_put_dev:
++ put_device(&adapter->dev);
+ free = false;
+ cxl_guest_remove_chardev(adapter);
+ err1:
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 3de0aea62ade4..0ff944860dda9 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -387,6 +387,7 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
+ rc = get_phb_index(np, phb_index);
+ if (rc) {
+ pr_err("cxl: invalid phb index\n");
++ of_node_put(np);
+ return rc;
+ }
+
+@@ -1164,10 +1165,10 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+ * if it returns an error!
+ */
+ if ((rc = cxl_register_afu(afu)))
+- goto err_put1;
++ goto err_put_dev;
+
+ if ((rc = cxl_sysfs_afu_add(afu)))
+- goto err_put1;
++ goto err_del_dev;
+
+ adapter->afu[afu->slice] = afu;
+
+@@ -1176,10 +1177,12 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+
+ return 0;
+
+-err_put1:
++err_del_dev:
++ device_del(&afu->dev);
++err_put_dev:
+ pci_deconfigure_afu(afu);
+ cxl_debugfs_afu_remove(afu);
+- device_unregister(&afu->dev);
++ put_device(&afu->dev);
+ return rc;
+
+ err_free_native:
+@@ -1667,23 +1670,25 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
+ * even if it returns an error!
+ */
+ if ((rc = cxl_register_adapter(adapter)))
+- goto err_put1;
++ goto err_put_dev;
+
+ if ((rc = cxl_sysfs_adapter_add(adapter)))
+- goto err_put1;
++ goto err_del_dev;
+
+ /* Release the context lock as adapter is configured */
+ cxl_adapter_context_unlock(adapter);
+
+ return adapter;
+
+-err_put1:
++err_del_dev:
++ device_del(&adapter->dev);
++err_put_dev:
+ /* This should mirror cxl_remove_adapter, except without the
+ * sysfs parts
+ */
+ cxl_debugfs_adapter_remove(adapter);
+ cxl_deconfigure_adapter(adapter);
+- device_unregister(&adapter->dev);
++ put_device(&adapter->dev);
+ return ERR_PTR(rc);
+
+ err_release:
+diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
+index 2de6a9bd564de..f18e53bbba6bb 100644
+--- a/drivers/misc/habanalabs/common/firmware_if.c
++++ b/drivers/misc/habanalabs/common/firmware_if.c
+@@ -2983,7 +2983,7 @@ static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void
+ int rc;
+
+ req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
+- if (!data) {
++ if (!req_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
+ return -ENOMEM;
+diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
+index 5245cf6013c95..fc28714ae3a61 100644
+--- a/drivers/misc/lkdtm/cfi.c
++++ b/drivers/misc/lkdtm/cfi.c
+@@ -54,7 +54,11 @@ static void lkdtm_CFI_FORWARD_PROTO(void)
+ # ifdef CONFIG_ARM64_BTI_KERNEL
+ # define __no_pac "branch-protection=bti"
+ # else
+-# define __no_pac "branch-protection=none"
++# ifdef CONFIG_CC_HAS_BRANCH_PROT_PAC_RET
++# define __no_pac "branch-protection=none"
++# else
++# define __no_pac "sign-return-address=none"
++# endif
+ # endif
+ # define __no_ret_protection __noscs __attribute__((__target__(__no_pac)))
+ #else
+diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
+index e401a51596b9c..92ab49705f645 100644
+--- a/drivers/misc/ocxl/config.c
++++ b/drivers/misc/ocxl/config.c
+@@ -193,6 +193,18 @@ static int read_dvsec_vendor(struct pci_dev *dev)
+ return 0;
+ }
+
++/**
++ * get_dvsec_vendor0() - Find a related PCI device (function 0)
++ * @dev: PCI device to match
++ * @dev0: The PCI device (function 0) found
++ * @out_pos: The position of PCI device (function 0)
++ *
++ * Returns 0 on success, negative on failure.
++ *
++ * NOTE: If it's successful, the reference of dev0 is increased,
++ * so after using it, the callers must call pci_dev_put() to give
++ * up the reference.
++ */
+ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0,
+ int *out_pos)
+ {
+@@ -202,10 +214,14 @@ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0,
+ dev = get_function_0(dev);
+ if (!dev)
+ return -1;
++ } else {
++ dev = pci_dev_get(dev);
+ }
+ pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID);
+- if (!pos)
++ if (!pos) {
++ pci_dev_put(dev);
+ return -1;
++ }
+ *dev0 = dev;
+ *out_pos = pos;
+ return 0;
+@@ -222,6 +238,7 @@ int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val)
+
+ pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
+ &reset_reload);
++ pci_dev_put(dev0);
+ *val = !!(reset_reload & BIT(0));
+ return 0;
+ }
+@@ -243,6 +260,7 @@ int ocxl_config_set_reset_reload(struct pci_dev *dev, int val)
+ reset_reload &= ~BIT(0);
+ pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
+ reset_reload);
++ pci_dev_put(dev0);
+ return 0;
+ }
+
+diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
+index d46dba2df5a10..452d5777a0e4c 100644
+--- a/drivers/misc/ocxl/file.c
++++ b/drivers/misc/ocxl/file.c
+@@ -541,8 +541,11 @@ int ocxl_file_register_afu(struct ocxl_afu *afu)
+ goto err_put;
+
+ rc = device_register(&info->dev);
+- if (rc)
+- goto err_put;
++ if (rc) {
++ free_minor(info);
++ put_device(&info->dev);
++ return rc;
++ }
+
+ rc = ocxl_sysfs_register_afu(info);
+ if (rc)
+diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
+index d7ef61e602ede..b836936e97471 100644
+--- a/drivers/misc/sgi-gru/grufault.c
++++ b/drivers/misc/sgi-gru/grufault.c
+@@ -648,6 +648,7 @@ int gru_handle_user_call_os(unsigned long cb)
+ if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
+ return -EINVAL;
+
++again:
+ gts = gru_find_lock_gts(cb);
+ if (!gts)
+ return -EINVAL;
+@@ -656,7 +657,11 @@ int gru_handle_user_call_os(unsigned long cb)
+ if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
+ goto exit;
+
+- gru_check_context_placement(gts);
++ if (gru_check_context_placement(gts)) {
++ gru_unlock_gts(gts);
++ gru_unload_context(gts, 1);
++ goto again;
++ }
+
+ /*
+ * CCH may contain stale data if ts_force_cch_reload is set.
+@@ -874,7 +879,11 @@ int gru_set_context_option(unsigned long arg)
+ } else {
+ gts->ts_user_blade_id = req.val1;
+ gts->ts_user_chiplet_id = req.val0;
+- gru_check_context_placement(gts);
++ if (gru_check_context_placement(gts)) {
++ gru_unlock_gts(gts);
++ gru_unload_context(gts, 1);
++ return ret;
++ }
+ }
+ break;
+ case sco_gseg_owner:
+diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
+index 6706ef3c59776..4eb4b94551390 100644
+--- a/drivers/misc/sgi-gru/grumain.c
++++ b/drivers/misc/sgi-gru/grumain.c
+@@ -716,9 +716,10 @@ static int gru_check_chiplet_assignment(struct gru_state *gru,
+ * chiplet. Misassignment can occur if the process migrates to a different
+ * blade or if the user changes the selected blade/chiplet.
+ */
+-void gru_check_context_placement(struct gru_thread_state *gts)
++int gru_check_context_placement(struct gru_thread_state *gts)
+ {
+ struct gru_state *gru;
++ int ret = 0;
+
+ /*
+ * If the current task is the context owner, verify that the
+@@ -726,15 +727,23 @@ void gru_check_context_placement(struct gru_thread_state *gts)
+ * references. Pthread apps use non-owner references to the CBRs.
+ */
+ gru = gts->ts_gru;
++ /*
++ * If gru or gts->ts_tgid_owner isn't initialized properly, return
++ * success to indicate that the caller does not need to unload the
++ * gru context.The caller is responsible for their inspection and
++ * reinitialization if needed.
++ */
+ if (!gru || gts->ts_tgid_owner != current->tgid)
+- return;
++ return ret;
+
+ if (!gru_check_chiplet_assignment(gru, gts)) {
+ STAT(check_context_unload);
+- gru_unload_context(gts, 1);
++ ret = -EINVAL;
+ } else if (gru_retarget_intr(gts)) {
+ STAT(check_context_retarget_intr);
+ }
++
++ return ret;
+ }
+
+
+@@ -934,7 +943,12 @@ again:
+ mutex_lock(&gts->ts_ctxlock);
+ preempt_disable();
+
+- gru_check_context_placement(gts);
++ if (gru_check_context_placement(gts)) {
++ preempt_enable();
++ mutex_unlock(&gts->ts_ctxlock);
++ gru_unload_context(gts, 1);
++ return VM_FAULT_NOPAGE;
++ }
+
+ if (!gts->ts_gru) {
+ STAT(load_user_context);
+diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
+index 8c52776db2341..640daf1994df7 100644
+--- a/drivers/misc/sgi-gru/grutables.h
++++ b/drivers/misc/sgi-gru/grutables.h
+@@ -632,7 +632,7 @@ extern int gru_user_flush_tlb(unsigned long arg);
+ extern int gru_user_unload_context(unsigned long arg);
+ extern int gru_get_exception_detail(unsigned long arg);
+ extern int gru_set_context_option(unsigned long address);
+-extern void gru_check_context_placement(struct gru_thread_state *gts);
++extern int gru_check_context_placement(struct gru_thread_state *gts);
+ extern int gru_cpu_fault_map_id(void);
+ extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
+ extern void gru_flush_all_tlb(struct gru_state *gru);
+diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
+index 017c2f7d62871..7dd86a9858aba 100644
+--- a/drivers/misc/tifm_7xx1.c
++++ b/drivers/misc/tifm_7xx1.c
+@@ -190,7 +190,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work)
+ spin_unlock_irqrestore(&fm->lock, flags);
+ }
+ if (sock)
+- tifm_free_device(&sock->dev);
++ put_device(&sock->dev);
+ }
+ spin_lock_irqsave(&fm->lock, flags);
+ }
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 3662bf5320ce5..72b664ed90cf6 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -1259,7 +1259,7 @@ static int sd_read_ext_regs(struct mmc_card *card)
+ */
+ err = sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf);
+ if (err) {
+- pr_warn("%s: error %d reading general info of SD ext reg\n",
++ pr_err("%s: error %d reading general info of SD ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+@@ -1273,7 +1273,12 @@ static int sd_read_ext_regs(struct mmc_card *card)
+ /* Number of extensions to be find. */
+ num_ext = gen_info_buf[4];
+
+- /* We support revision 0, but limit it to 512 bytes for simplicity. */
++ /*
++ * We only support revision 0 and limit it to 512 bytes for simplicity.
++ * No matter what, let's return zero to allow us to continue using the
++ * card, even if we can't support the features from the SD function
++ * extensions registers.
++ */
+ if (rev != 0 || len > 512) {
+ pr_warn("%s: non-supported SD ext reg layout\n",
+ mmc_hostname(card->host));
+@@ -1288,7 +1293,7 @@ static int sd_read_ext_regs(struct mmc_card *card)
+ for (i = 0; i < num_ext; i++) {
+ err = sd_parse_ext_reg(card, gen_info_buf, &next_ext_addr);
+ if (err) {
+- pr_warn("%s: error %d parsing SD ext reg\n",
++ pr_err("%s: error %d parsing SD ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
+index bfb8efeb7eb80..d01df01d4b4d1 100644
+--- a/drivers/mmc/host/alcor.c
++++ b/drivers/mmc/host/alcor.c
+@@ -1114,7 +1114,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
+ alcor_hw_init(host);
+
+ dev_set_drvdata(&pdev->dev, host);
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto free_host;
++
+ return 0;
+
+ free_host:
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 91d52ba7a39fc..bb9bbf1c927b6 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -2222,6 +2222,7 @@ static int atmci_init_slot(struct atmel_mci *host,
+ {
+ struct mmc_host *mmc;
+ struct atmel_mci_slot *slot;
++ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
+ if (!mmc)
+@@ -2305,11 +2306,13 @@ static int atmci_init_slot(struct atmel_mci *host,
+
+ host->slot[id] = slot;
+ mmc_regulator_get_supply(mmc);
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret) {
++ mmc_free_host(mmc);
++ return ret;
++ }
+
+ if (gpio_is_valid(slot->detect_pin)) {
+- int ret;
+-
+ timer_setup(&slot->detect_timer, atmci_detect_change, 0);
+
+ ret = request_irq(gpio_to_irq(slot->detect_pin),
+diff --git a/drivers/mmc/host/litex_mmc.c b/drivers/mmc/host/litex_mmc.c
+index 6ba0d63b8c078..39c6707fdfdbc 100644
+--- a/drivers/mmc/host/litex_mmc.c
++++ b/drivers/mmc/host/litex_mmc.c
+@@ -502,6 +502,7 @@ static int litex_mmc_irq_init(struct platform_device *pdev,
+
+ use_polling:
+ host->mmc->caps |= MMC_CAP_NEEDS_POLL;
++ host->irq = 0;
+ return 0;
+ }
+
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index df05e60bed9a2..6e5ea0213b477 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -1335,7 +1335,9 @@ static int meson_mmc_probe(struct platform_device *pdev)
+ }
+
+ mmc->ops = &meson_mmc_ops;
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto err_free_irq;
+
+ return 0;
+
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index 012aa85489d86..b9e5dfe74e5c7 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -2256,7 +2256,9 @@ static int mmci_probe(struct amba_device *dev,
+ pm_runtime_set_autosuspend_delay(&dev->dev, 50);
+ pm_runtime_use_autosuspend(&dev->dev);
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto clk_disable;
+
+ pm_runtime_put(&dev->dev);
+ return 0;
+diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
+index dfc3ffd5b1f8c..52ed30f2d9f4f 100644
+--- a/drivers/mmc/host/moxart-mmc.c
++++ b/drivers/mmc/host/moxart-mmc.c
+@@ -665,7 +665,9 @@ static int moxart_probe(struct platform_device *pdev)
+ goto out;
+
+ dev_set_drvdata(dev, mmc);
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto out;
+
+ dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width);
+
+diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
+index 2cf0413407ea2..668f865f3efb0 100644
+--- a/drivers/mmc/host/mxcmmc.c
++++ b/drivers/mmc/host/mxcmmc.c
+@@ -1143,7 +1143,9 @@ static int mxcmci_probe(struct platform_device *pdev)
+
+ timer_setup(&host->watchdog, mxcmci_watchdog, 0);
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto out_free_dma;
+
+ return 0;
+
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index fca30add563e9..4bd7447552055 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -1946,7 +1946,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
+ if (!ret)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto err_irq;
+
+ if (mmc_pdata(host)->name != NULL) {
+ ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
+diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
+index e4003f6058eb5..2a988f942b6ca 100644
+--- a/drivers/mmc/host/pxamci.c
++++ b/drivers/mmc/host/pxamci.c
+@@ -763,7 +763,12 @@ static int pxamci_probe(struct platform_device *pdev)
+ dev_warn(dev, "gpio_ro and get_ro() both defined\n");
+ }
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret) {
++ if (host->pdata && host->pdata->exit)
++ host->pdata->exit(dev, mmc);
++ goto out;
++ }
+
+ return 0;
+
+diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
+index c4abfee1ebae1..e4c490729c98e 100644
+--- a/drivers/mmc/host/renesas_sdhi.h
++++ b/drivers/mmc/host/renesas_sdhi.h
+@@ -44,6 +44,7 @@ struct renesas_sdhi_quirks {
+ bool fixed_addr_mode;
+ bool dma_one_rx_only;
+ bool manual_tap_correction;
++ bool old_info1_layout;
+ u32 hs400_bad_taps;
+ const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX];
+ };
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
+index b970699743e0a..e38d0e8b8e0ed 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -546,7 +546,7 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
+ SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
+
+- if (priv->adjust_hs400_calib_table)
++ if (priv->quirks && (priv->quirks->hs400_calib_table || priv->quirks->hs400_bad_taps))
+ renesas_sdhi_adjust_hs400_mode_disable(host);
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+@@ -1068,11 +1068,14 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ if (ver >= SDHI_VER_GEN3_SD)
+ host->get_timeout_cycles = renesas_sdhi_gen3_get_cycles;
+
++ /* Check for SCC so we can reset it if needed */
++ if (of_data && of_data->scc_offset && ver >= SDHI_VER_GEN2_SDR104)
++ priv->scc_ctl = host->ctl + of_data->scc_offset;
++
+ /* Enable tuning iff we have an SCC and a supported mode */
+- if (of_data && of_data->scc_offset &&
+- (host->mmc->caps & MMC_CAP_UHS_SDR104 ||
+- host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
+- MMC_CAP2_HS400_1_8V))) {
++ if (priv->scc_ctl && (host->mmc->caps & MMC_CAP_UHS_SDR104 ||
++ host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
++ MMC_CAP2_HS400_1_8V))) {
+ const struct renesas_sdhi_scc *taps = of_data->taps;
+ bool use_4tap = quirks && quirks->hs400_4taps;
+ bool hit = false;
+@@ -1092,7 +1095,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ if (!hit)
+ dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
+
+- priv->scc_ctl = host->ctl + of_data->scc_offset;
+ host->check_retune = renesas_sdhi_check_scc_error;
+ host->ops.execute_tuning = renesas_sdhi_execute_tuning;
+ host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning;
+diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+index 42937596c4c41..7c81c2680701f 100644
+--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+@@ -49,7 +49,8 @@
+ /* DM_CM_INFO1 and DM_CM_INFO1_MASK */
+ #define INFO1_CLEAR 0
+ #define INFO1_MASK_CLEAR GENMASK_ULL(31, 0)
+-#define INFO1_DTRANEND1 BIT(17)
++#define INFO1_DTRANEND1 BIT(20)
++#define INFO1_DTRANEND1_OLD BIT(17)
+ #define INFO1_DTRANEND0 BIT(16)
+
+ /* DM_CM_INFO2 and DM_CM_INFO2_MASK */
+@@ -165,6 +166,7 @@ static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400_one_rx = {
+ .hs400_disabled = true,
+ .hs400_4taps = true,
+ .dma_one_rx_only = true,
++ .old_info1_layout = true,
+ };
+
+ static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
+diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
+index e1580f78c6b2d..8098726dcc0bf 100644
+--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
+@@ -1474,6 +1474,7 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
+ struct realtek_pci_sdmmc *host;
+ struct rtsx_pcr *pcr;
+ struct pcr_handle *handle = pdev->dev.platform_data;
++ int ret;
+
+ if (!handle)
+ return -ENXIO;
+@@ -1511,7 +1512,13 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret) {
++ pm_runtime_dont_use_autosuspend(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ mmc_free_host(mmc);
++ return ret;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
+index 5798aee066531..2c650cd58693e 100644
+--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
+@@ -1329,6 +1329,7 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
+ #ifdef RTSX_USB_USE_LEDS_CLASS
+ int err;
+ #endif
++ int ret;
+
+ ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent));
+ if (!ucr)
+@@ -1365,7 +1366,15 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
+ INIT_WORK(&host->led_work, rtsx_usb_update_led);
+
+ #endif
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret) {
++#ifdef RTSX_USB_USE_LEDS_CLASS
++ led_classdev_unregister(&host->led);
++#endif
++ mmc_free_host(mmc);
++ pm_runtime_disable(&pdev->dev);
++ return ret;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
+index c71000a07656e..1adaa94c31aca 100644
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -1526,7 +1526,8 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++ SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
+ .ops = &tegra186_sdhci_ops,
+ };
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index c7ad32a75b570..632341911b6e7 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -270,6 +270,11 @@ enum sdhci_reset_reason {
+
+ static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason)
+ {
++ if (host->quirks2 & SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) {
++ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
++ return;
++ }
++
+ switch (reason) {
+ case SDHCI_RESET_FOR_INIT:
+ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 87a3aaa074387..5ce7cdcc192fd 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -478,6 +478,8 @@ struct sdhci_host {
+ * block count.
+ */
+ #define SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1<<18)
++/* Issue CMD and DATA reset together */
++#define SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER (1<<19)
+
+ int irq; /* Device IRQ */
+ void __iomem *ioaddr; /* Mapped address */
+diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
+index 3f5977979cf25..6c4f43e112826 100644
+--- a/drivers/mmc/host/sdhci_f_sdh30.c
++++ b/drivers/mmc/host/sdhci_f_sdh30.c
+@@ -168,6 +168,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+ if (reg & SDHCI_CAN_DO_8BIT)
+ priv->vendor_hs200 = F_SDH30_EMMC_HS200;
+
++ if (!(reg & SDHCI_TIMEOUT_CLK_MASK))
++ host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
++
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_add_host;
+diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
+index 8d037c2071abc..497791ffada6d 100644
+--- a/drivers/mmc/host/toshsd.c
++++ b/drivers/mmc/host/toshsd.c
+@@ -651,7 +651,9 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if (ret)
+ goto unmap;
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto free_irq;
+
+ base = pci_resource_start(pdev, 0);
+ dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq);
+@@ -660,6 +662,8 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ return 0;
+
++free_irq:
++ free_irq(pdev->irq, host);
+ unmap:
+ pci_iounmap(pdev, host->ioaddr);
+ release:
+diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
+index 88662a90ed960..a2b0d9461665b 100644
+--- a/drivers/mmc/host/via-sdmmc.c
++++ b/drivers/mmc/host/via-sdmmc.c
+@@ -1151,7 +1151,9 @@ static int via_sd_probe(struct pci_dev *pcidev,
+ pcidev->subsystem_device == 0x3891)
+ sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto unmap;
+
+ return 0;
+
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 97beece62fec4..72f65f32abbc7 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2049,6 +2049,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
+ return;
+ kref_get(&vub300->kref);
+ if (enable) {
++ set_current_state(TASK_RUNNING);
+ mutex_lock(&vub300->irq_mutex);
+ if (vub300->irqs_queued) {
+ vub300->irqs_queued -= 1;
+@@ -2064,6 +2065,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
+ vub300_queue_poll_work(vub300, 0);
+ }
+ mutex_unlock(&vub300->irq_mutex);
++ set_current_state(TASK_INTERRUPTIBLE);
+ } else {
+ vub300->irq_enabled = 0;
+ }
+@@ -2299,14 +2301,14 @@ static int vub300_probe(struct usb_interface *interface,
+ 0x0000, 0x0000, &vub300->system_port_status,
+ sizeof(vub300->system_port_status), 1000);
+ if (retval < 0) {
+- goto error4;
++ goto error5;
+ } else if (sizeof(vub300->system_port_status) == retval) {
+ vub300->card_present =
+ (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
+ vub300->read_only =
+ (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+ } else {
+- goto error4;
++ goto error5;
+ }
+ usb_set_intfdata(interface, vub300);
+ INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread);
+@@ -2329,8 +2331,13 @@ static int vub300_probe(struct usb_interface *interface,
+ "USB vub300 remote SDIO host controller[%d]"
+ "connected with no SD/SDIO card inserted\n",
+ interface_to_InterfaceNumber(interface));
+- mmc_add_host(mmc);
++ retval = mmc_add_host(mmc);
++ if (retval)
++ goto error6;
++
+ return 0;
++error6:
++ del_timer_sync(&vub300->inactivity_timer);
+ error5:
+ mmc_free_host(mmc);
+ /*
+diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
+index 67ecd342fe5f1..7c7ec8d10232b 100644
+--- a/drivers/mmc/host/wbsd.c
++++ b/drivers/mmc/host/wbsd.c
+@@ -1698,7 +1698,17 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
+ */
+ wbsd_init_device(host);
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret) {
++ if (!pnp)
++ wbsd_chip_poweroff(host);
++
++ wbsd_release_resources(host);
++ wbsd_free_mmc(dev);
++
++ mmc_free_host(mmc);
++ return ret;
++ }
+
+ pr_info("%s: W83L51xD", mmc_hostname(mmc));
+ if (host->chip_id != 0)
+diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
+index 9b5c503e3a3fc..9aa3027ca25e4 100644
+--- a/drivers/mmc/host/wmt-sdmmc.c
++++ b/drivers/mmc/host/wmt-sdmmc.c
+@@ -856,11 +856,15 @@ static int wmt_mci_probe(struct platform_device *pdev)
+ /* configure the controller to a known 'ready' state */
+ wmt_reset_hardware(mmc);
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto fail7;
+
+ dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
+
+ return 0;
++fail7:
++ clk_disable_unprepare(priv->clk_sdmmc);
+ fail6:
+ clk_put(priv->clk_sdmmc);
+ fail5_and_a_half:
+diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
+index 367e2d906de02..e71af4c490969 100644
+--- a/drivers/mtd/lpddr/lpddr2_nvm.c
++++ b/drivers/mtd/lpddr/lpddr2_nvm.c
+@@ -433,6 +433,8 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
+
+ /* lpddr2_nvm address range */
+ add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!add_range)
++ return -ENODEV;
+
+ /* Populate map_info data structure */
+ *map = (struct map_info) {
+diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
+index 1749dbbacc135..62a5bf41a6d72 100644
+--- a/drivers/mtd/maps/pxa2xx-flash.c
++++ b/drivers/mtd/maps/pxa2xx-flash.c
+@@ -64,6 +64,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
+ if (!info->map.virt) {
+ printk(KERN_WARNING "Failed to ioremap %s\n",
+ info->map.name);
++ kfree(info);
+ return -ENOMEM;
+ }
+ info->map.cached = ioremap_cache(info->map.phys, info->map.size);
+@@ -85,6 +86,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
+ iounmap((void *)info->map.virt);
+ if (info->map.cached)
+ iounmap(info->map.cached);
++ kfree(info);
+ return -EIO;
+ }
+ info->mtd->dev.parent = &pdev->dev;
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
+index 0b4ca0aa41321..686ada1a63e9a 100644
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -723,8 +723,10 @@ int add_mtd_device(struct mtd_info *mtd)
+ mtd_check_of_node(mtd);
+ of_node_get(mtd_get_of_node(mtd));
+ error = device_register(&mtd->dev);
+- if (error)
++ if (error) {
++ put_device(&mtd->dev);
+ goto fail_added;
++ }
+
+ /* Add the nvmem provider */
+ error = mtd_nvmem_add(mtd);
+@@ -774,6 +776,7 @@ int del_mtd_device(struct mtd_info *mtd)
+ {
+ int ret;
+ struct mtd_notifier *not;
++ struct device_node *mtd_of_node;
+
+ mutex_lock(&mtd_table_mutex);
+
+@@ -792,6 +795,7 @@ int del_mtd_device(struct mtd_info *mtd)
+ mtd->index, mtd->name, mtd->usecount);
+ ret = -EBUSY;
+ } else {
++ mtd_of_node = mtd_get_of_node(mtd);
+ debugfs_remove_recursive(mtd->dbg.dfs_dir);
+
+ /* Try to remove the NVMEM provider */
+@@ -803,7 +807,7 @@ int del_mtd_device(struct mtd_info *mtd)
+ memset(&mtd->dev, 0, sizeof(mtd->dev));
+
+ idr_remove(&mtd_idr, mtd->index);
+- of_node_put(mtd_get_of_node(mtd));
++ of_node_put(mtd_of_node);
+
+ module_put(THIS_MODULE);
+ ret = 0;
+@@ -2483,6 +2487,7 @@ static int __init init_mtd(void)
+ out_procfs:
+ if (proc_mtd)
+ remove_proc_entry("mtd", NULL);
++ bdi_unregister(mtd_bdi);
+ bdi_put(mtd_bdi);
+ err_bdi:
+ class_unregister(&mtd_class);
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index bee8fc4c9f078..0cf1a1797ea32 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -1914,7 +1914,8 @@ static int spi_nor_spimem_check_readop(struct spi_nor *nor,
+ spi_nor_spimem_setup_op(nor, &op, read->proto);
+
+ /* convert the dummy cycles to the number of bytes */
+- op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
++ op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
++ op.dummy.buswidth / 8;
+ if (spi_nor_protocol_is_dtr(nor->read_proto))
+ op.dummy.nbytes *= 2;
+
+diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
+index 9aec9d8a98ada..4c3b351aef245 100644
+--- a/drivers/mtd/spi-nor/sysfs.c
++++ b/drivers/mtd/spi-nor/sysfs.c
+@@ -67,6 +67,19 @@ static struct bin_attribute *spi_nor_sysfs_bin_entries[] = {
+ NULL
+ };
+
++static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj,
++ struct attribute *attr, int n)
++{
++ struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
++ struct spi_mem *spimem = spi_get_drvdata(spi);
++ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
++
++ if (attr == &dev_attr_jedec_id.attr && !nor->info->id_len)
++ return 0;
++
++ return 0444;
++}
++
+ static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int n)
+ {
+@@ -82,6 +95,7 @@ static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj,
+
+ static const struct attribute_group spi_nor_sysfs_group = {
+ .name = "spi-nor",
++ .is_visible = spi_nor_sysfs_is_visible,
+ .is_bin_visible = spi_nor_sysfs_is_bin_visible,
+ .attrs = spi_nor_sysfs_entries,
+ .bin_attrs = spi_nor_sysfs_bin_entries,
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b9a882f182d29..b108f2f4adc20 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2531,12 +2531,21 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
+ /* called with rcu_read_lock() */
+ static int bond_miimon_inspect(struct bonding *bond)
+ {
++ bool ignore_updelay = false;
+ int link_state, commit = 0;
+ struct list_head *iter;
+ struct slave *slave;
+- bool ignore_updelay;
+
+- ignore_updelay = !rcu_dereference(bond->curr_active_slave);
++ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
++ ignore_updelay = !rcu_dereference(bond->curr_active_slave);
++ } else {
++ struct bond_up_slave *usable_slaves;
++
++ usable_slaves = rcu_dereference(bond->usable_slaves);
++
++ if (usable_slaves && usable_slaves->count == 0)
++ ignore_updelay = true;
++ }
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
+@@ -2644,8 +2653,9 @@ static void bond_miimon_link_change(struct bonding *bond,
+
+ static void bond_miimon_commit(struct bonding *bond)
+ {
+- struct list_head *iter;
+ struct slave *slave, *primary;
++ bool do_failover = false;
++ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter) {
+ switch (slave->link_new_state) {
+@@ -2689,8 +2699,9 @@ static void bond_miimon_commit(struct bonding *bond)
+
+ bond_miimon_link_change(bond, slave, BOND_LINK_UP);
+
+- if (!bond->curr_active_slave || slave == primary)
+- goto do_failover;
++ if (!rcu_access_pointer(bond->curr_active_slave) || slave == primary ||
++ slave->prio > rcu_dereference(bond->curr_active_slave)->prio)
++ do_failover = true;
+
+ continue;
+
+@@ -2711,7 +2722,7 @@ static void bond_miimon_commit(struct bonding *bond)
+ bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
+
+ if (slave == rcu_access_pointer(bond->curr_active_slave))
+- goto do_failover;
++ do_failover = true;
+
+ continue;
+
+@@ -2722,8 +2733,9 @@ static void bond_miimon_commit(struct bonding *bond)
+
+ continue;
+ }
++ }
+
+-do_failover:
++ if (do_failover) {
+ block_netpoll_tx();
+ bond_select_active_slave(bond);
+ unblock_netpoll_tx();
+@@ -3521,6 +3533,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
+ */
+ static void bond_ab_arp_commit(struct bonding *bond)
+ {
++ bool do_failover = false;
+ struct list_head *iter;
+ unsigned long last_tx;
+ struct slave *slave;
+@@ -3550,8 +3563,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
+ slave_info(bond->dev, slave->dev, "link status definitely up\n");
+
+ if (!rtnl_dereference(bond->curr_active_slave) ||
+- slave == rtnl_dereference(bond->primary_slave))
+- goto do_failover;
++ slave == rtnl_dereference(bond->primary_slave) ||
++ slave->prio > rtnl_dereference(bond->curr_active_slave)->prio)
++ do_failover = true;
+
+ }
+
+@@ -3570,7 +3584,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
+
+ if (slave == rtnl_dereference(bond->curr_active_slave)) {
+ RCU_INIT_POINTER(bond->current_arp_slave, NULL);
+- goto do_failover;
++ do_failover = true;
+ }
+
+ continue;
+@@ -3594,8 +3608,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
+ slave->link_new_state);
+ continue;
+ }
++ }
+
+-do_failover:
++ if (do_failover) {
+ block_netpoll_tx();
+ bond_select_active_slave(bond);
+ unblock_netpoll_tx();
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index e5575d2755e4b..2de998b98cb5e 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1233,10 +1233,17 @@ static int m_can_set_bittiming(struct net_device *dev)
+ * - setup bittiming
+ * - configure timestamp generation
+ */
+-static void m_can_chip_config(struct net_device *dev)
++static int m_can_chip_config(struct net_device *dev)
+ {
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ u32 cccr, test;
++ int err;
++
++ err = m_can_init_ram(cdev);
++ if (err) {
++ dev_err(cdev->dev, "Message RAM configuration failed\n");
++ return err;
++ }
+
+ m_can_config_endisable(cdev, true);
+
+@@ -1360,18 +1367,25 @@ static void m_can_chip_config(struct net_device *dev)
+
+ if (cdev->ops->init)
+ cdev->ops->init(cdev);
++
++ return 0;
+ }
+
+-static void m_can_start(struct net_device *dev)
++static int m_can_start(struct net_device *dev)
+ {
+ struct m_can_classdev *cdev = netdev_priv(dev);
++ int ret;
+
+ /* basic m_can configuration */
+- m_can_chip_config(dev);
++ ret = m_can_chip_config(dev);
++ if (ret)
++ return ret;
+
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ m_can_enable_all_interrupts(cdev);
++
++ return 0;
+ }
+
+ static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
+@@ -1799,7 +1813,9 @@ static int m_can_open(struct net_device *dev)
+ }
+
+ /* start the m_can controller */
+- m_can_start(dev);
++ err = m_can_start(dev);
++ if (err)
++ goto exit_irq_fail;
+
+ if (!cdev->is_peripheral)
+ napi_enable(&cdev->napi);
+@@ -2058,9 +2074,13 @@ int m_can_class_resume(struct device *dev)
+ ret = m_can_clk_start(cdev);
+ if (ret)
+ return ret;
++ ret = m_can_start(ndev);
++ if (ret) {
++ m_can_clk_stop(cdev);
++
++ return ret;
++ }
+
+- m_can_init_ram(cdev);
+- m_can_start(ndev);
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
+index eee47bad05920..de6d8e01bf2e8 100644
+--- a/drivers/net/can/m_can/m_can_platform.c
++++ b/drivers/net/can/m_can/m_can_platform.c
+@@ -140,10 +140,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, mcan_class);
+
+- ret = m_can_init_ram(mcan_class);
+- if (ret)
+- goto probe_fail;
+-
+ pm_runtime_enable(mcan_class->dev);
+ ret = m_can_class_register(mcan_class);
+ if (ret)
+diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
+index 41645a24384ce..2342aa011647c 100644
+--- a/drivers/net/can/m_can/tcan4x5x-core.c
++++ b/drivers/net/can/m_can/tcan4x5x-core.c
+@@ -10,7 +10,7 @@
+ #define TCAN4X5X_DEV_ID1 0x04
+ #define TCAN4X5X_REV 0x08
+ #define TCAN4X5X_STATUS 0x0C
+-#define TCAN4X5X_ERROR_STATUS 0x10
++#define TCAN4X5X_ERROR_STATUS_MASK 0x10
+ #define TCAN4X5X_CONTROL 0x14
+
+ #define TCAN4X5X_CONFIG 0x800
+@@ -204,17 +204,7 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev)
+ if (ret)
+ return ret;
+
+- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG,
+- TCAN4X5X_ENABLE_MCAN_INT);
+- if (ret)
+- return ret;
+-
+- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
+- TCAN4X5X_CLEAR_ALL_INT);
+- if (ret)
+- return ret;
+-
+- return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS,
++ return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
+ TCAN4X5X_CLEAR_ALL_INT);
+ }
+
+@@ -234,8 +224,8 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
+ if (ret)
+ return ret;
+
+- /* Zero out the MCAN buffers */
+- ret = m_can_init_ram(cdev);
++ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS_MASK,
++ TCAN4X5X_CLEAR_ALL_INT);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+index f6c0938027ece..ff10b3790d844 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+@@ -76,6 +76,14 @@ struct kvaser_usb_tx_urb_context {
+ u32 echo_index;
+ };
+
++struct kvaser_usb_busparams {
++ __le32 bitrate;
++ u8 tseg1;
++ u8 tseg2;
++ u8 sjw;
++ u8 nsamples;
++} __packed;
++
+ struct kvaser_usb {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+@@ -104,13 +112,19 @@ struct kvaser_usb_net_priv {
+ struct can_priv can;
+ struct can_berr_counter bec;
+
++ /* subdriver-specific data */
++ void *sub_priv;
++
+ struct kvaser_usb *dev;
+ struct net_device *netdev;
+ int channel;
+
+- struct completion start_comp, stop_comp, flush_comp;
++ struct completion start_comp, stop_comp, flush_comp,
++ get_busparams_comp;
+ struct usb_anchor tx_submitted;
+
++ struct kvaser_usb_busparams busparams_nominal, busparams_data;
++
+ spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
+ int active_tx_contexts;
+ struct kvaser_usb_tx_urb_context tx_contexts[];
+@@ -120,11 +134,15 @@ struct kvaser_usb_net_priv {
+ * struct kvaser_usb_dev_ops - Device specific functions
+ * @dev_set_mode: used for can.do_set_mode
+ * @dev_set_bittiming: used for can.do_set_bittiming
++ * @dev_get_busparams: readback arbitration busparams
+ * @dev_set_data_bittiming: used for can.do_set_data_bittiming
++ * @dev_get_data_busparams: readback data busparams
+ * @dev_get_berr_counter: used for can.do_get_berr_counter
+ *
+ * @dev_setup_endpoints: setup USB in and out endpoints
+ * @dev_init_card: initialize card
++ * @dev_init_channel: initialize channel
++ * @dev_remove_channel: uninitialize channel
+ * @dev_get_software_info: get software info
+ * @dev_get_software_details: get software details
+ * @dev_get_card_info: get card info
+@@ -140,12 +158,18 @@ struct kvaser_usb_net_priv {
+ */
+ struct kvaser_usb_dev_ops {
+ int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
+- int (*dev_set_bittiming)(struct net_device *netdev);
+- int (*dev_set_data_bittiming)(struct net_device *netdev);
++ int (*dev_set_bittiming)(const struct net_device *netdev,
++ const struct kvaser_usb_busparams *busparams);
++ int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv);
++ int (*dev_set_data_bittiming)(const struct net_device *netdev,
++ const struct kvaser_usb_busparams *busparams);
++ int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv);
+ int (*dev_get_berr_counter)(const struct net_device *netdev,
+ struct can_berr_counter *bec);
+ int (*dev_setup_endpoints)(struct kvaser_usb *dev);
+ int (*dev_init_card)(struct kvaser_usb *dev);
++ int (*dev_init_channel)(struct kvaser_usb_net_priv *priv);
++ void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv);
+ int (*dev_get_software_info)(struct kvaser_usb *dev);
+ int (*dev_get_software_details)(struct kvaser_usb *dev);
+ int (*dev_get_card_info)(struct kvaser_usb *dev);
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index 802e27c0ecedb..3a2bfaad14065 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -440,10 +440,6 @@ static int kvaser_usb_open(struct net_device *netdev)
+ if (err)
+ return err;
+
+- err = kvaser_usb_setup_rx_urbs(dev);
+- if (err)
+- goto error;
+-
+ err = ops->dev_set_opt_mode(priv);
+ if (err)
+ goto error;
+@@ -534,6 +530,93 @@ static int kvaser_usb_close(struct net_device *netdev)
+ return 0;
+ }
+
++static int kvaser_usb_set_bittiming(struct net_device *netdev)
++{
++ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
++ struct kvaser_usb *dev = priv->dev;
++ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
++ struct can_bittiming *bt = &priv->can.bittiming;
++
++ struct kvaser_usb_busparams busparams;
++ int tseg1 = bt->prop_seg + bt->phase_seg1;
++ int tseg2 = bt->phase_seg2;
++ int sjw = bt->sjw;
++ int err = -EOPNOTSUPP;
++
++ busparams.bitrate = cpu_to_le32(bt->bitrate);
++ busparams.sjw = (u8)sjw;
++ busparams.tseg1 = (u8)tseg1;
++ busparams.tseg2 = (u8)tseg2;
++ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
++ busparams.nsamples = 3;
++ else
++ busparams.nsamples = 1;
++
++ err = ops->dev_set_bittiming(netdev, &busparams);
++ if (err)
++ return err;
++
++ err = kvaser_usb_setup_rx_urbs(priv->dev);
++ if (err)
++ return err;
++
++ err = ops->dev_get_busparams(priv);
++ if (err) {
++ /* Treat EOPNOTSUPP as success */
++ if (err == -EOPNOTSUPP)
++ err = 0;
++ return err;
++ }
++
++ if (memcmp(&busparams, &priv->busparams_nominal,
++ sizeof(priv->busparams_nominal)) != 0)
++ err = -EINVAL;
++
++ return err;
++}
++
++static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
++{
++ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
++ struct kvaser_usb *dev = priv->dev;
++ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
++ struct can_bittiming *dbt = &priv->can.data_bittiming;
++
++ struct kvaser_usb_busparams busparams;
++ int tseg1 = dbt->prop_seg + dbt->phase_seg1;
++ int tseg2 = dbt->phase_seg2;
++ int sjw = dbt->sjw;
++ int err;
++
++ if (!ops->dev_set_data_bittiming ||
++ !ops->dev_get_data_busparams)
++ return -EOPNOTSUPP;
++
++ busparams.bitrate = cpu_to_le32(dbt->bitrate);
++ busparams.sjw = (u8)sjw;
++ busparams.tseg1 = (u8)tseg1;
++ busparams.tseg2 = (u8)tseg2;
++ busparams.nsamples = 1;
++
++ err = ops->dev_set_data_bittiming(netdev, &busparams);
++ if (err)
++ return err;
++
++ err = kvaser_usb_setup_rx_urbs(priv->dev);
++ if (err)
++ return err;
++
++ err = ops->dev_get_data_busparams(priv);
++ if (err)
++ return err;
++
++ if (memcmp(&busparams, &priv->busparams_data,
++ sizeof(priv->busparams_data)) != 0)
++ err = -EINVAL;
++
++ return err;
++}
++
+ static void kvaser_usb_write_bulk_callback(struct urb *urb)
+ {
+ struct kvaser_usb_tx_urb_context *context = urb->context;
+@@ -684,6 +767,7 @@ static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
+
+ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+ {
++ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ int i;
+
+ for (i = 0; i < dev->nchannels; i++) {
+@@ -699,6 +783,9 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+ if (!dev->nets[i])
+ continue;
+
++ if (ops->dev_remove_channel)
++ ops->dev_remove_channel(dev->nets[i]);
++
+ free_candev(dev->nets[i]->netdev);
+ }
+ }
+@@ -730,6 +817,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+ init_completion(&priv->start_comp);
+ init_completion(&priv->stop_comp);
+ init_completion(&priv->flush_comp);
++ init_completion(&priv->get_busparams_comp);
+ priv->can.ctrlmode_supported = 0;
+
+ priv->dev = dev;
+@@ -742,7 +830,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.clock.freq = dev->cfg->clock.freq;
+ priv->can.bittiming_const = dev->cfg->bittiming_const;
+- priv->can.do_set_bittiming = ops->dev_set_bittiming;
++ priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
+ priv->can.do_set_mode = ops->dev_set_mode;
+ if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
+ (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
+@@ -754,7 +842,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
+- priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming;
++ priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
+ }
+
+ netdev->flags |= IFF_ECHO;
+@@ -772,17 +860,26 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+
+ dev->nets[channel] = priv;
+
++ if (ops->dev_init_channel) {
++ err = ops->dev_init_channel(priv);
++ if (err)
++ goto err;
++ }
++
+ err = register_candev(netdev);
+ if (err) {
+ dev_err(&dev->intf->dev, "Failed to register CAN device\n");
+- free_candev(netdev);
+- dev->nets[channel] = NULL;
+- return err;
++ goto err;
+ }
+
+ netdev_dbg(netdev, "device registered\n");
+
+ return 0;
++
++err:
++ free_candev(netdev);
++ dev->nets[channel] = NULL;
++ return err;
+ }
+
+ static int kvaser_usb_probe(struct usb_interface *intf,
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+index 66f672ea631b8..f688124d6d669 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+@@ -45,6 +45,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt;
+
+ /* Minihydra command IDs */
+ #define CMD_SET_BUSPARAMS_REQ 16
++#define CMD_GET_BUSPARAMS_REQ 17
++#define CMD_GET_BUSPARAMS_RESP 18
+ #define CMD_GET_CHIP_STATE_REQ 19
+ #define CMD_CHIP_STATE_EVENT 20
+ #define CMD_SET_DRIVERMODE_REQ 21
+@@ -196,21 +198,26 @@ struct kvaser_cmd_chip_state_event {
+ #define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01
+ #define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02
+ struct kvaser_cmd_set_busparams {
+- __le32 bitrate;
+- u8 tseg1;
+- u8 tseg2;
+- u8 sjw;
+- u8 nsamples;
++ struct kvaser_usb_busparams busparams_nominal;
+ u8 reserved0[4];
+- __le32 bitrate_d;
+- u8 tseg1_d;
+- u8 tseg2_d;
+- u8 sjw_d;
+- u8 nsamples_d;
++ struct kvaser_usb_busparams busparams_data;
+ u8 canfd_mode;
+ u8 reserved1[7];
+ } __packed;
+
++/* Busparam type */
++#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00
++#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01
++struct kvaser_cmd_get_busparams_req {
++ u8 type;
++ u8 reserved[27];
++} __packed;
++
++struct kvaser_cmd_get_busparams_res {
++ struct kvaser_usb_busparams busparams;
++ u8 reserved[20];
++} __packed;
++
+ /* Ctrl modes */
+ #define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
+ #define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
+@@ -281,6 +288,8 @@ struct kvaser_cmd {
+ struct kvaser_cmd_error_event error_event;
+
+ struct kvaser_cmd_set_busparams set_busparams_req;
++ struct kvaser_cmd_get_busparams_req get_busparams_req;
++ struct kvaser_cmd_get_busparams_res get_busparams_res;
+
+ struct kvaser_cmd_chip_state_event chip_state_event;
+
+@@ -363,6 +372,10 @@ struct kvaser_cmd_ext {
+ } __packed;
+ } __packed;
+
++struct kvaser_usb_net_hydra_priv {
++ int pending_get_busparams_type;
++};
++
+ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
+ .name = "kvaser_usb_kcan",
+ .tseg1_min = 1,
+@@ -840,6 +853,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
+ complete(&priv->flush_comp);
+ }
+
++static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev,
++ const struct kvaser_cmd *cmd)
++{
++ struct kvaser_usb_net_priv *priv;
++ struct kvaser_usb_net_hydra_priv *hydra;
++
++ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
++ if (!priv)
++ return;
++
++ hydra = priv->sub_priv;
++ if (!hydra)
++ return;
++
++ switch (hydra->pending_get_busparams_type) {
++ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN:
++ memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams,
++ sizeof(priv->busparams_nominal));
++ break;
++ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD:
++ memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams,
++ sizeof(priv->busparams_nominal));
++ break;
++ default:
++ dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n",
++ hydra->pending_get_busparams_type);
++ break;
++ }
++ hydra->pending_get_busparams_type = -1;
++
++ complete(&priv->get_busparams_comp);
++}
++
+ static void
+ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
+ u8 bus_status,
+@@ -1326,6 +1372,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
+ kvaser_usb_hydra_state_event(dev, cmd);
+ break;
+
++ case CMD_GET_BUSPARAMS_RESP:
++ kvaser_usb_hydra_get_busparams_reply(dev, cmd);
++ break;
++
+ case CMD_ERROR_EVENT:
+ kvaser_usb_hydra_error_event(dev, cmd);
+ break;
+@@ -1522,15 +1572,58 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
+ return err;
+ }
+
+-static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
++static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
++ int busparams_type)
++{
++ struct kvaser_usb *dev = priv->dev;
++ struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
++ struct kvaser_cmd *cmd;
++ int err;
++
++ if (!hydra)
++ return -EINVAL;
++
++ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
++ if (!cmd)
++ return -ENOMEM;
++
++ cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
++ kvaser_usb_hydra_set_cmd_dest_he
++ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
++ kvaser_usb_hydra_set_cmd_transid
++ (cmd, kvaser_usb_hydra_get_next_transid(dev));
++ cmd->get_busparams_req.type = busparams_type;
++ hydra->pending_get_busparams_type = busparams_type;
++
++ reinit_completion(&priv->get_busparams_comp);
++
++ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++ if (err)
++ return err;
++
++ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
++ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
++ return -ETIMEDOUT;
++
++ return err;
++}
++
++static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv)
++{
++ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN);
++}
++
++static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv)
++{
++ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD);
++}
++
++static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
++ const struct kvaser_usb_busparams *busparams)
+ {
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+- struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+- int tseg1 = bt->prop_seg + bt->phase_seg1;
+- int tseg2 = bt->phase_seg2;
+- int sjw = bt->sjw;
+ int err;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+@@ -1538,11 +1631,8 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
+- cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
+- cmd->set_busparams_req.sjw = (u8)sjw;
+- cmd->set_busparams_req.tseg1 = (u8)tseg1;
+- cmd->set_busparams_req.tseg2 = (u8)tseg2;
+- cmd->set_busparams_req.nsamples = 1;
++ memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
++ sizeof(cmd->set_busparams_req.busparams_nominal));
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+@@ -1556,15 +1646,12 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+ return err;
+ }
+
+-static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
++static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
++ const struct kvaser_usb_busparams *busparams)
+ {
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct kvaser_usb *dev = priv->dev;
+- int tseg1 = dbt->prop_seg + dbt->phase_seg1;
+- int tseg2 = dbt->phase_seg2;
+- int sjw = dbt->sjw;
+ int err;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+@@ -1572,11 +1659,8 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
+- cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
+- cmd->set_busparams_req.sjw_d = (u8)sjw;
+- cmd->set_busparams_req.tseg1_d = (u8)tseg1;
+- cmd->set_busparams_req.tseg2_d = (u8)tseg2;
+- cmd->set_busparams_req.nsamples_d = 1;
++ memcpy(&cmd->set_busparams_req.busparams_data, busparams,
++ sizeof(cmd->set_busparams_req.busparams_data));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+@@ -1683,6 +1767,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
+ return 0;
+ }
+
++static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv)
++{
++ struct kvaser_usb_net_hydra_priv *hydra;
++
++ hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL);
++ if (!hydra)
++ return -ENOMEM;
++
++ priv->sub_priv = hydra;
++
++ return 0;
++}
++
+ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
+ {
+ struct kvaser_cmd cmd;
+@@ -2027,10 +2124,13 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
+ .dev_set_mode = kvaser_usb_hydra_set_mode,
+ .dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
++ .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams,
+ .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
++ .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams,
+ .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
+ .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
+ .dev_init_card = kvaser_usb_hydra_init_card,
++ .dev_init_channel = kvaser_usb_hydra_init_channel,
+ .dev_get_software_info = kvaser_usb_hydra_get_software_info,
+ .dev_get_software_details = kvaser_usb_hydra_get_software_details,
+ .dev_get_card_info = kvaser_usb_hydra_get_card_info,
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+index 19958037720f4..b423fd4c79890 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+@@ -21,6 +21,7 @@
+ #include <linux/types.h>
+ #include <linux/units.h>
+ #include <linux/usb.h>
++#include <linux/workqueue.h>
+
+ #include <linux/can.h>
+ #include <linux/can/dev.h>
+@@ -56,6 +57,9 @@
+ #define CMD_RX_EXT_MESSAGE 14
+ #define CMD_TX_EXT_MESSAGE 15
+ #define CMD_SET_BUS_PARAMS 16
++#define CMD_GET_BUS_PARAMS 17
++#define CMD_GET_BUS_PARAMS_REPLY 18
++#define CMD_GET_CHIP_STATE 19
+ #define CMD_CHIP_STATE_EVENT 20
+ #define CMD_SET_CTRL_MODE 21
+ #define CMD_RESET_CHIP 24
+@@ -70,10 +74,13 @@
+ #define CMD_GET_CARD_INFO_REPLY 35
+ #define CMD_GET_SOFTWARE_INFO 38
+ #define CMD_GET_SOFTWARE_INFO_REPLY 39
++#define CMD_ERROR_EVENT 45
+ #define CMD_FLUSH_QUEUE 48
+ #define CMD_TX_ACKNOWLEDGE 50
+ #define CMD_CAN_ERROR_EVENT 51
+ #define CMD_FLUSH_QUEUE_REPLY 68
++#define CMD_GET_CAPABILITIES_REQ 95
++#define CMD_GET_CAPABILITIES_RESP 96
+
+ #define CMD_LEAF_LOG_MESSAGE 106
+
+@@ -83,6 +90,8 @@
+ #define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
+ #define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+
++#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12)
++
+ /* error factors */
+ #define M16C_EF_ACKE BIT(0)
+ #define M16C_EF_CRCE BIT(1)
+@@ -157,11 +166,7 @@ struct usbcan_cmd_softinfo {
+ struct kvaser_cmd_busparams {
+ u8 tid;
+ u8 channel;
+- __le32 bitrate;
+- u8 tseg1;
+- u8 tseg2;
+- u8 sjw;
+- u8 no_samp;
++ struct kvaser_usb_busparams busparams;
+ } __packed;
+
+ struct kvaser_cmd_tx_can {
+@@ -230,7 +235,7 @@ struct kvaser_cmd_tx_acknowledge_header {
+ u8 tid;
+ } __packed;
+
+-struct leaf_cmd_error_event {
++struct leaf_cmd_can_error_event {
+ u8 tid;
+ u8 flags;
+ __le16 time[3];
+@@ -242,7 +247,7 @@ struct leaf_cmd_error_event {
+ u8 error_factor;
+ } __packed;
+
+-struct usbcan_cmd_error_event {
++struct usbcan_cmd_can_error_event {
+ u8 tid;
+ u8 padding;
+ u8 tx_errors_count_ch0;
+@@ -254,6 +259,28 @@ struct usbcan_cmd_error_event {
+ __le16 time;
+ } __packed;
+
++/* CMD_ERROR_EVENT error codes */
++#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8
++#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9
++
++struct leaf_cmd_error_event {
++ u8 tid;
++ u8 error_code;
++ __le16 timestamp[3];
++ __le16 padding;
++ __le16 info1;
++ __le16 info2;
++} __packed;
++
++struct usbcan_cmd_error_event {
++ u8 tid;
++ u8 error_code;
++ __le16 info1;
++ __le16 info2;
++ __le16 timestamp;
++ __le16 padding;
++} __packed;
++
+ struct kvaser_cmd_ctrl_mode {
+ u8 tid;
+ u8 channel;
+@@ -278,6 +305,28 @@ struct leaf_cmd_log_message {
+ u8 data[8];
+ } __packed;
+
++/* Sub commands for cap_req and cap_res */
++#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02
++#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05
++struct kvaser_cmd_cap_req {
++ __le16 padding0;
++ __le16 cap_cmd;
++ __le16 padding1;
++ __le16 channel;
++} __packed;
++
++/* Status codes for cap_res */
++#define KVASER_USB_LEAF_CAP_STAT_OK 0x00
++#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01
++#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02
++struct kvaser_cmd_cap_res {
++ __le16 padding;
++ __le16 cap_cmd;
++ __le16 status;
++ __le32 mask;
++ __le32 value;
++} __packed;
++
+ struct kvaser_cmd {
+ u8 len;
+ u8 id;
+@@ -293,14 +342,18 @@ struct kvaser_cmd {
+ struct leaf_cmd_softinfo softinfo;
+ struct leaf_cmd_rx_can rx_can;
+ struct leaf_cmd_chip_state_event chip_state_event;
+- struct leaf_cmd_error_event error_event;
++ struct leaf_cmd_can_error_event can_error_event;
+ struct leaf_cmd_log_message log_message;
++ struct leaf_cmd_error_event error_event;
++ struct kvaser_cmd_cap_req cap_req;
++ struct kvaser_cmd_cap_res cap_res;
+ } __packed leaf;
+
+ union {
+ struct usbcan_cmd_softinfo softinfo;
+ struct usbcan_cmd_rx_can rx_can;
+ struct usbcan_cmd_chip_state_event chip_state_event;
++ struct usbcan_cmd_can_error_event can_error_event;
+ struct usbcan_cmd_error_event error_event;
+ } __packed usbcan;
+
+@@ -323,7 +376,10 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
+- [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
++ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event),
++ [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res),
++ [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams),
++ [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
+ /* ignored events: */
+ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+ };
+@@ -337,7 +393,8 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
+- [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
++ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
++ [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ /* ignored events: */
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+ };
+@@ -365,6 +422,12 @@ struct kvaser_usb_err_summary {
+ };
+ };
+
++struct kvaser_usb_net_leaf_priv {
++ struct kvaser_usb_net_priv *net;
++
++ struct delayed_work chip_state_req_work;
++};
++
+ static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
+ .name = "kvaser_usb_ucii",
+ .tseg1_min = 4,
+@@ -606,6 +669,9 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
+ dev->fw_version = le32_to_cpu(softinfo->fw_version);
+ dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+
++ if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP)
++ dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP;
++
+ if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
+ /* Firmware expects bittiming parameters calculated for 16MHz
+ * clock, regardless of the actual clock
+@@ -693,6 +759,116 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
+ return 0;
+ }
+
++static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev,
++ u16 cap_cmd_req, u16 *status)
++{
++ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
++ struct kvaser_cmd *cmd;
++ u32 value = 0;
++ u32 mask = 0;
++ u16 cap_cmd_res;
++ int err;
++ int i;
++
++ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
++ if (!cmd)
++ return -ENOMEM;
++
++ cmd->id = CMD_GET_CAPABILITIES_REQ;
++ cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
++ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req);
++
++ err = kvaser_usb_send_cmd(dev, cmd, cmd->len);
++ if (err)
++ goto end;
++
++ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
++ if (err)
++ goto end;
++
++ *status = le16_to_cpu(cmd->u.leaf.cap_res.status);
++
++ if (*status != KVASER_USB_LEAF_CAP_STAT_OK)
++ goto end;
++
++ cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd);
++ switch (cap_cmd_res) {
++ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
++ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
++ value = le32_to_cpu(cmd->u.leaf.cap_res.value);
++ mask = le32_to_cpu(cmd->u.leaf.cap_res.mask);
++ break;
++ default:
++ dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
++ cap_cmd_res);
++ break;
++ }
++
++ for (i = 0; i < dev->nchannels; i++) {
++ if (BIT(i) & (value & mask)) {
++ switch (cap_cmd_res) {
++ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
++ card_data->ctrlmode_supported |=
++ CAN_CTRLMODE_LISTENONLY;
++ break;
++ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
++ card_data->capabilities |=
++ KVASER_USB_CAP_BERR_CAP;
++ break;
++ }
++ }
++ }
++
++end:
++ kfree(cmd);
++
++ return err;
++}
++
++static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev)
++{
++ int err;
++ u16 status;
++
++ if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
++ dev_info(&dev->intf->dev,
++ "No extended capability support. Upgrade device firmware.\n");
++ return 0;
++ }
++
++ err = kvaser_usb_leaf_get_single_capability(dev,
++ KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE,
++ &status);
++ if (err)
++ return err;
++ if (status)
++ dev_info(&dev->intf->dev,
++ "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n",
++ status);
++
++ err = kvaser_usb_leaf_get_single_capability(dev,
++ KVASER_USB_LEAF_CAP_CMD_ERR_REPORT,
++ &status);
++ if (err)
++ return err;
++ if (status)
++ dev_info(&dev->intf->dev,
++ "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n",
++ status);
++
++ return 0;
++}
++
++static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev)
++{
++ int err = 0;
++
++ if (dev->driver_info->family == KVASER_LEAF)
++ err = kvaser_usb_leaf_get_capabilities_leaf(dev);
++
++ return err;
++}
++
+ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+ {
+@@ -721,7 +897,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
+ context = &priv->tx_contexts[tid % dev->max_tx_urbs];
+
+ /* Sometimes the state change doesn't come after a bus-off event */
+- if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
++ if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+@@ -774,6 +950,16 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ return err;
+ }
+
++static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work)
++{
++ struct kvaser_usb_net_leaf_priv *leaf =
++ container_of(work, struct kvaser_usb_net_leaf_priv,
++ chip_state_req_work.work);
++ struct kvaser_usb_net_priv *priv = leaf->net;
++
++ kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE);
++}
++
+ static void
+ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_usb_err_summary *es,
+@@ -792,20 +978,16 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (es->status & M16C_STATE_BUS_PASSIVE) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+- } else if (es->status & M16C_STATE_BUS_ERROR) {
++ } else if ((es->status & M16C_STATE_BUS_ERROR) &&
++ cur_state >= CAN_STATE_BUS_OFF) {
+ /* Guard against spurious error events after a busoff */
+- if (cur_state < CAN_STATE_BUS_OFF) {
+- if (es->txerr >= 128 || es->rxerr >= 128)
+- new_state = CAN_STATE_ERROR_PASSIVE;
+- else if (es->txerr >= 96 || es->rxerr >= 96)
+- new_state = CAN_STATE_ERROR_WARNING;
+- else if (cur_state > CAN_STATE_ERROR_ACTIVE)
+- new_state = CAN_STATE_ERROR_ACTIVE;
+- }
+- }
+-
+- if (!es->status)
++ } else if (es->txerr >= 128 || es->rxerr >= 128) {
++ new_state = CAN_STATE_ERROR_PASSIVE;
++ } else if (es->txerr >= 96 || es->rxerr >= 96) {
++ new_state = CAN_STATE_ERROR_WARNING;
++ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
++ }
+
+ if (new_state != cur_state) {
+ tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
+@@ -815,7 +997,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ }
+
+ if (priv->can.restart_ms &&
+- cur_state >= CAN_STATE_BUS_OFF &&
++ cur_state == CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ priv->can.can_stats.restarts++;
+
+@@ -849,6 +1031,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ struct kvaser_usb_net_priv *priv;
++ struct kvaser_usb_net_leaf_priv *leaf;
+ enum can_state old_state, new_state;
+
+ if (es->channel >= dev->nchannels) {
+@@ -858,8 +1041,13 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ }
+
+ priv = dev->nets[es->channel];
++ leaf = priv->sub_priv;
+ stats = &priv->netdev->stats;
+
++ /* Ignore e.g. state change to bus-off reported just after stopping */
++ if (!netif_running(priv->netdev))
++ return;
++
+ /* Update all of the CAN interface's state and error counters before
+ * trying any memory allocation that can actually fail with -ENOMEM.
+ *
+@@ -874,6 +1062,14 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ new_state = priv->can.state;
+
++ /* If there are errors, request status updates periodically as we do
++ * not get automatic notifications of improved state.
++ */
++ if (new_state < CAN_STATE_BUS_OFF &&
++ (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE))
++ schedule_delayed_work(&leaf->chip_state_req_work,
++ msecs_to_jiffies(500));
++
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+@@ -891,7 +1087,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ }
+
+ if (priv->can.restart_ms &&
+- old_state >= CAN_STATE_BUS_OFF &&
++ old_state == CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+ netif_carrier_on(priv->netdev);
+@@ -990,11 +1186,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
+
+ case CMD_CAN_ERROR_EVENT:
+ es.channel = 0;
+- es.status = cmd->u.usbcan.error_event.status_ch0;
+- es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
+- es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
++ es.status = cmd->u.usbcan.can_error_event.status_ch0;
++ es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0;
++ es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0;
+ es.usbcan.other_ch_status =
+- cmd->u.usbcan.error_event.status_ch1;
++ cmd->u.usbcan.can_error_event.status_ch1;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+
+ /* The USBCAN firmware supports up to 2 channels.
+@@ -1002,13 +1198,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
+ */
+ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
+ es.channel = 1;
+- es.status = cmd->u.usbcan.error_event.status_ch1;
++ es.status = cmd->u.usbcan.can_error_event.status_ch1;
+ es.txerr =
+- cmd->u.usbcan.error_event.tx_errors_count_ch1;
++ cmd->u.usbcan.can_error_event.tx_errors_count_ch1;
+ es.rxerr =
+- cmd->u.usbcan.error_event.rx_errors_count_ch1;
++ cmd->u.usbcan.can_error_event.rx_errors_count_ch1;
+ es.usbcan.other_ch_status =
+- cmd->u.usbcan.error_event.status_ch0;
++ cmd->u.usbcan.can_error_event.status_ch0;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+ }
+ break;
+@@ -1025,11 +1221,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
+
+ switch (cmd->id) {
+ case CMD_CAN_ERROR_EVENT:
+- es.channel = cmd->u.leaf.error_event.channel;
+- es.status = cmd->u.leaf.error_event.status;
+- es.txerr = cmd->u.leaf.error_event.tx_errors_count;
+- es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
+- es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
++ es.channel = cmd->u.leaf.can_error_event.channel;
++ es.status = cmd->u.leaf.can_error_event.status;
++ es.txerr = cmd->u.leaf.can_error_event.tx_errors_count;
++ es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count;
++ es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor;
+ break;
+ case CMD_LEAF_LOG_MESSAGE:
+ es.channel = cmd->u.leaf.log_message.channel;
+@@ -1162,6 +1358,74 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
+ netif_rx(skb);
+ }
+
++static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev,
++ const struct kvaser_cmd *cmd)
++{
++ u16 info1 = 0;
++
++ switch (dev->driver_info->family) {
++ case KVASER_LEAF:
++ info1 = le16_to_cpu(cmd->u.leaf.error_event.info1);
++ break;
++ case KVASER_USBCAN:
++ info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1);
++ break;
++ }
++
++ /* info1 will contain the offending cmd_no */
++ switch (info1) {
++ case CMD_SET_CTRL_MODE:
++ dev_warn(&dev->intf->dev,
++ "CMD_SET_CTRL_MODE error in parameter\n");
++ break;
++
++ case CMD_SET_BUS_PARAMS:
++ dev_warn(&dev->intf->dev,
++ "CMD_SET_BUS_PARAMS error in parameter\n");
++ break;
++
++ default:
++ dev_warn(&dev->intf->dev,
++ "Unhandled parameter error event cmd_no (%u)\n",
++ info1);
++ break;
++ }
++}
++
++static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev,
++ const struct kvaser_cmd *cmd)
++{
++ u8 error_code = 0;
++
++ switch (dev->driver_info->family) {
++ case KVASER_LEAF:
++ error_code = cmd->u.leaf.error_event.error_code;
++ break;
++ case KVASER_USBCAN:
++ error_code = cmd->u.usbcan.error_event.error_code;
++ break;
++ }
++
++ switch (error_code) {
++ case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL:
++ /* Received additional CAN message, when firmware TX queue is
++ * already full. Something is wrong with the driver.
++ * This should never happen!
++ */
++ dev_err(&dev->intf->dev,
++ "Received error event TX_QUEUE_FULL\n");
++ break;
++ case KVASER_USB_LEAF_ERROR_EVENT_PARAM:
++ kvaser_usb_leaf_error_event_parameter(dev, cmd);
++ break;
++
++ default:
++ dev_warn(&dev->intf->dev,
++ "Unhandled error event (%d)\n", error_code);
++ break;
++ }
++}
++
+ static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+ {
+@@ -1202,6 +1466,25 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
+ complete(&priv->stop_comp);
+ }
+
++static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
++ const struct kvaser_cmd *cmd)
++{
++ struct kvaser_usb_net_priv *priv;
++ u8 channel = cmd->u.busparams.channel;
++
++ if (channel >= dev->nchannels) {
++ dev_err(&dev->intf->dev,
++ "Invalid channel number (%d)\n", channel);
++ return;
++ }
++
++ priv = dev->nets[channel];
++ memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams,
++ sizeof(priv->busparams_nominal));
++
++ complete(&priv->get_busparams_comp);
++}
++
+ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+ {
+@@ -1240,6 +1523,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+ kvaser_usb_leaf_tx_acknowledge(dev, cmd);
+ break;
+
++ case CMD_ERROR_EVENT:
++ kvaser_usb_leaf_error_event(dev, cmd);
++ break;
++
++ case CMD_GET_BUS_PARAMS_REPLY:
++ kvaser_usb_leaf_get_busparams_reply(dev, cmd);
++ break;
++
+ /* Ignored commands */
+ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
+ if (dev->driver_info->family != KVASER_USBCAN)
+@@ -1336,10 +1627,13 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
+
+ static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
+ {
++ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
+ int err;
+
+ reinit_completion(&priv->stop_comp);
+
++ cancel_delayed_work(&leaf->chip_state_req_work);
++
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
+ priv->channel);
+ if (err)
+@@ -1386,10 +1680,35 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
+ return 0;
+ }
+
+-static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
++static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv)
++{
++ struct kvaser_usb_net_leaf_priv *leaf;
++
++ leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL);
++ if (!leaf)
++ return -ENOMEM;
++
++ leaf->net = priv;
++ INIT_DELAYED_WORK(&leaf->chip_state_req_work,
++ kvaser_usb_leaf_chip_state_req_work);
++
++ priv->sub_priv = leaf;
++
++ return 0;
++}
++
++static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv)
++{
++ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
++
++ if (leaf)
++ cancel_delayed_work_sync(&leaf->chip_state_req_work);
++}
++
++static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev,
++ const struct kvaser_usb_busparams *busparams)
+ {
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+- struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int rc;
+@@ -1402,15 +1721,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
+ cmd->u.busparams.channel = priv->channel;
+ cmd->u.busparams.tid = 0xff;
+- cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
+- cmd->u.busparams.sjw = bt->sjw;
+- cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
+- cmd->u.busparams.tseg2 = bt->phase_seg2;
+-
+- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+- cmd->u.busparams.no_samp = 3;
+- else
+- cmd->u.busparams.no_samp = 1;
++ memcpy(&cmd->u.busparams.busparams, busparams,
++ sizeof(cmd->u.busparams.busparams));
+
+ rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+
+@@ -1418,6 +1730,27 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+ return rc;
+ }
+
++static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv)
++{
++ int err;
++
++ if (priv->dev->driver_info->family == KVASER_USBCAN)
++ return -EOPNOTSUPP;
++
++ reinit_completion(&priv->get_busparams_comp);
++
++ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS,
++ priv->channel);
++ if (err)
++ return err;
++
++ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
++ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
++ return -ETIMEDOUT;
++
++ return 0;
++}
++
+ static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
+ enum can_mode mode)
+ {
+@@ -1479,14 +1812,18 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
+ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
+ .dev_set_mode = kvaser_usb_leaf_set_mode,
+ .dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
++ .dev_get_busparams = kvaser_usb_leaf_get_busparams,
+ .dev_set_data_bittiming = NULL,
++ .dev_get_data_busparams = NULL,
+ .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
+ .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
+ .dev_init_card = kvaser_usb_leaf_init_card,
++ .dev_init_channel = kvaser_usb_leaf_init_channel,
++ .dev_remove_channel = kvaser_usb_leaf_remove_channel,
+ .dev_get_software_info = kvaser_usb_leaf_get_software_info,
+ .dev_get_software_details = NULL,
+ .dev_get_card_info = kvaser_usb_leaf_get_card_info,
+- .dev_get_capabilities = NULL,
++ .dev_get_capabilities = kvaser_usb_leaf_get_capabilities,
+ .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
+ .dev_start_chip = kvaser_usb_leaf_start_chip,
+ .dev_stop_chip = kvaser_usb_leaf_stop_chip,
+diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
+index 80f07bd205934..2e270b4791432 100644
+--- a/drivers/net/dsa/lan9303-core.c
++++ b/drivers/net/dsa/lan9303-core.c
+@@ -1005,9 +1005,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
+ ret = lan9303_read_switch_port(
+ chip, port, lan9303_mib[u].offset, &reg);
+
+- if (ret)
++ if (ret) {
+ dev_warn(chip->dev, "Reading status port %d reg %u failed\n",
+ port, lan9303_mib[u].offset);
++ reg = 0;
++ }
+ data[u] = reg;
+ }
+ }
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index d612181b3226e..c68f48cd1ec08 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -1883,8 +1883,7 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
+ irq_create_mapping(kirq->domain, n);
+
+ ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn,
+- IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+- kirq->name, kirq);
++ IRQF_ONESHOT, kirq->name, kirq);
+ if (ret)
+ goto out;
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 937cb22cb3d48..3b8b2d0fbafaf 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -689,13 +689,12 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+
+ /* Port 4 supports automedia if the serdes is associated with it. */
+ if (port == 4) {
+- mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err < 0)
+ dev_err(chip->dev, "p%d: failed to read scratch\n",
+ port);
+ if (err <= 0)
+- goto unlock;
++ return;
+
+ cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ if (cmode < 0)
+@@ -703,8 +702,6 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+-unlock:
+- mv88e6xxx_reg_unlock(chip);
+ }
+ }
+
+@@ -831,7 +828,9 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
+ {
+ struct mv88e6xxx_chip *chip = ds->priv;
+
++ mv88e6xxx_reg_lock(chip);
+ chip->info->ops->phylink_get_caps(chip, port, config);
++ mv88e6xxx_reg_unlock(chip);
+
+ if (mv88e6xxx_phy_is_internal(ds, port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+@@ -3307,7 +3306,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
+ struct phylink_config pl_config = {};
+ unsigned long caps;
+
+- mv88e6xxx_get_caps(ds, port, &pl_config);
++ chip->info->ops->phylink_get_caps(chip, port, &pl_config);
+
+ caps = pl_config.mac_capabilities;
+
+diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
+index 606c976108085..9d8dfe1729948 100644
+--- a/drivers/net/ethernet/adi/adin1110.c
++++ b/drivers/net/ethernet/adi/adin1110.c
+@@ -196,7 +196,7 @@ static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
+ {
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ u32 read_len = ADIN1110_REG_LEN;
+- struct spi_transfer t[2] = {0};
++ struct spi_transfer t = {0};
+ int ret;
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+@@ -209,17 +209,15 @@ static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
+ header_len++;
+ }
+
+- t[0].tx_buf = &priv->data[0];
+- t[0].len = header_len;
+-
+ if (priv->append_crc)
+ read_len++;
+
+ memset(&priv->data[header_len], 0, read_len);
+- t[1].rx_buf = &priv->data[header_len];
+- t[1].len = read_len;
++ t.tx_buf = &priv->data[0];
++ t.rx_buf = &priv->data[0];
++ t.len = read_len + header_len;
+
+- ret = spi_sync_transfer(priv->spidev, t, 2);
++ ret = spi_sync_transfer(priv->spidev, &t, 1);
+ if (ret)
+ return ret;
+
+@@ -296,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+ {
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+- struct spi_transfer t[2] = {0};
++ struct spi_transfer t;
+ u32 frame_size_no_fcs;
+ struct sk_buff *rxb;
+ u32 frame_size;
+@@ -327,12 +325,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+ return ret;
+
+ frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
+-
+- rxb = netdev_alloc_skb(port_priv->netdev, round_len);
+- if (!rxb)
+- return -ENOMEM;
+-
+- memset(priv->data, 0, round_len + ADIN1110_RD_HEADER_LEN);
++ memset(priv->data, 0, ADIN1110_RD_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+@@ -342,21 +335,23 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+ header_len++;
+ }
+
+- skb_put(rxb, frame_size_no_fcs + ADIN1110_FRAME_HEADER_LEN);
++ rxb = netdev_alloc_skb(port_priv->netdev, round_len + header_len);
++ if (!rxb)
++ return -ENOMEM;
+
+- t[0].tx_buf = &priv->data[0];
+- t[0].len = header_len;
++ skb_put(rxb, frame_size_no_fcs + header_len + ADIN1110_FRAME_HEADER_LEN);
+
+- t[1].rx_buf = &rxb->data[0];
+- t[1].len = round_len;
++ t.tx_buf = &priv->data[0];
++ t.rx_buf = &rxb->data[0];
++ t.len = header_len + round_len;
+
+- ret = spi_sync_transfer(priv->spidev, t, 2);
++ ret = spi_sync_transfer(priv->spidev, &t, 1);
+ if (ret) {
+ kfree_skb(rxb);
+ return ret;
+ }
+
+- skb_pull(rxb, ADIN1110_FRAME_HEADER_LEN);
++ skb_pull(rxb, header_len + ADIN1110_FRAME_HEADER_LEN);
+ rxb->protocol = eth_type_trans(rxb, port_priv->netdev);
+
+ if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
+diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
+index 3222c48ce6ae4..ec704222925d8 100644
+--- a/drivers/net/ethernet/amd/atarilance.c
++++ b/drivers/net/ethernet/amd/atarilance.c
+@@ -824,7 +824,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ dev->stats.tx_bytes += skb->len;
+- dev_kfree_skb( skb );
++ dev_consume_skb_irq(skb);
+ lp->cur_tx++;
+ while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
+ lp->cur_tx -= TX_RING_SIZE;
+diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
+index fb8686214a327..8971665a4b2ac 100644
+--- a/drivers/net/ethernet/amd/lance.c
++++ b/drivers/net/ethernet/amd/lance.c
+@@ -1001,7 +1001,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
+ lp->tx_ring[entry].base =
+ ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
+- dev_kfree_skb(skb);
++ dev_consume_skb_irq(skb);
+ } else {
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 4064c3e3dd492..c731a04731f83 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -189,6 +189,7 @@ enum xgbe_sfp_cable {
+ XGBE_SFP_CABLE_UNKNOWN = 0,
+ XGBE_SFP_CABLE_ACTIVE,
+ XGBE_SFP_CABLE_PASSIVE,
++ XGBE_SFP_CABLE_FIBER,
+ };
+
+ enum xgbe_sfp_base {
+@@ -236,10 +237,7 @@ enum xgbe_sfp_speed {
+
+ #define XGBE_SFP_BASE_BR 12
+ #define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a
+-#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
+ #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
+-#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
+-#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78
+
+ #define XGBE_SFP_BASE_CU_CABLE_LEN 18
+
+@@ -826,29 +824,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
+ enum xgbe_sfp_speed sfp_speed)
+ {
+- u8 *sfp_base, min, max;
++ u8 *sfp_base, min;
+
+ sfp_base = sfp_eeprom->base;
+
+ switch (sfp_speed) {
+ case XGBE_SFP_SPEED_1000:
+ min = XGBE_SFP_BASE_BR_1GBE_MIN;
+- max = XGBE_SFP_BASE_BR_1GBE_MAX;
+ break;
+ case XGBE_SFP_SPEED_10000:
+ min = XGBE_SFP_BASE_BR_10GBE_MIN;
+- if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+- XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0)
+- max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX;
+- else
+- max = XGBE_SFP_BASE_BR_10GBE_MAX;
+ break;
+ default:
+ return false;
+ }
+
+- return ((sfp_base[XGBE_SFP_BASE_BR] >= min) &&
+- (sfp_base[XGBE_SFP_BASE_BR] <= max));
++ return sfp_base[XGBE_SFP_BASE_BR] >= min;
+ }
+
+ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
+@@ -1149,16 +1140,18 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
+ phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
+
+- /* Assume ACTIVE cable unless told it is PASSIVE */
++ /* Assume FIBER cable unless told otherwise */
+ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
+ phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
+- } else {
++ } else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
++ } else {
++ phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER;
+ }
+
+ /* Determine the type of SFP */
+- if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE &&
++ if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER &&
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
+index 334de0d93c899..9e653e2925f78 100644
+--- a/drivers/net/ethernet/apple/bmac.c
++++ b/drivers/net/ethernet/apple/bmac.c
+@@ -1510,7 +1510,7 @@ static void bmac_tx_timeout(struct timer_list *t)
+ i = bp->tx_empty;
+ ++dev->stats.tx_errors;
+ if (i != bp->tx_fill) {
+- dev_kfree_skb(bp->tx_bufs[i]);
++ dev_kfree_skb_irq(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ if (++i >= N_TX_RING) i = 0;
+ bp->tx_empty = i;
+diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
+index d0a771b65e888..fd1b008b7208c 100644
+--- a/drivers/net/ethernet/apple/mace.c
++++ b/drivers/net/ethernet/apple/mace.c
+@@ -846,7 +846,7 @@ static void mace_tx_timeout(struct timer_list *t)
+ if (mp->tx_bad_runt) {
+ mp->tx_bad_runt = 0;
+ } else if (i != mp->tx_fill) {
+- dev_kfree_skb(mp->tx_bufs[i]);
++ dev_kfree_skb_irq(mp->tx_bufs[i]);
+ if (++i >= N_TX_RING)
+ i = 0;
+ mp->tx_empty = i;
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index fec57f1982c86..dbe3101447804 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -5415,8 +5415,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
+
+ bp->rx_buf_use_size = rx_size;
+ /* hw alignment + build_skb() overhead*/
+- bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+- NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++ bp->rx_buf_size = kmalloc_size_roundup(
++ SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
++ NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
+ bp->rx_ring_size = size;
+ bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
+diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
+index 08184f20f5104..151ca9573be97 100644
+--- a/drivers/net/ethernet/dnet.c
++++ b/drivers/net/ethernet/dnet.c
+@@ -550,11 +550,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ skb_tx_timestamp(skb);
+
++ spin_unlock_irqrestore(&bp->lock, flags);
++
+ /* free the buffer */
+ dev_kfree_skb(skb);
+
+- spin_unlock_irqrestore(&bp->lock, flags);
+-
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 8671591cb7501..3a79ead5219ae 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -1489,23 +1489,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
+ rx_ring->stats.xdp_drops++;
+ }
+
+-static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
+- int rx_ring_last)
+-{
+- while (rx_ring_first != rx_ring_last) {
+- struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
+-
+- if (rx_swbd->page) {
+- dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+- rx_swbd->dir);
+- __free_page(rx_swbd->page);
+- rx_swbd->page = NULL;
+- }
+- enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+- }
+- rx_ring->stats.xdp_redirect_failures++;
+-}
+-
+ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ struct napi_struct *napi, int work_limit,
+ struct bpf_prog *prog)
+@@ -1527,8 +1510,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ int orig_i, orig_cleaned_cnt;
+ struct xdp_buff xdp_buff;
+ struct sk_buff *skb;
+- int tmp_orig_i, err;
+ u32 bd_status;
++ int err;
+
+ rxbd = enetc_rxbd(rx_ring, i);
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+@@ -1615,18 +1598,16 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ break;
+ }
+
+- tmp_orig_i = orig_i;
+-
+- while (orig_i != i) {
+- enetc_flip_rx_buff(rx_ring,
+- &rx_ring->rx_swbd[orig_i]);
+- enetc_bdr_idx_inc(rx_ring, &orig_i);
+- }
+-
+ err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ if (unlikely(err)) {
+- enetc_xdp_free(rx_ring, tmp_orig_i, i);
++ enetc_xdp_drop(rx_ring, orig_i, i);
++ rx_ring->stats.xdp_redirect_failures++;
+ } else {
++ while (orig_i != i) {
++ enetc_flip_rx_buff(rx_ring,
++ &rx_ring->rx_swbd[orig_i]);
++ enetc_bdr_idx_inc(rx_ring, &orig_i);
++ }
+ xdp_redirect_frm_cnt++;
+ rx_ring->stats.xdp_redirect++;
+ }
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 23e1a94b9ce45..f250b0df27fbb 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1642,6 +1642,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+ * bridging applications.
+ */
+ skb = build_skb(page_address(page), PAGE_SIZE);
++ if (unlikely(!skb)) {
++ page_pool_recycle_direct(rxq->page_pool, page);
++ ndev->stats.rx_dropped++;
++
++ netdev_err_once(ndev, "build_skb failed!\n");
++ goto rx_processing_done;
++ }
++
+ skb_reserve(skb, FEC_ENET_XDP_HEADROOM);
+ skb_put(skb, pkt_len - 4);
+ skb_mark_for_recycle(skb);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 6416322d7c18b..e6e349f0c9457 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3693,6 +3693,24 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
+ return err;
+ }
+
++/**
++ * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
++ *
++ * @vsi: VSI to calculate rx_buf_len from
++ */
++static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
++{
++ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
++ return I40E_RXBUFFER_2048;
++
++#if (PAGE_SIZE < 8192)
++ if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN)
++ return I40E_RXBUFFER_1536 - NET_IP_ALIGN;
++#endif
++
++ return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
++}
++
+ /**
+ * i40e_vsi_configure_rx - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+@@ -3704,20 +3722,14 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
+ int err = 0;
+ u16 i;
+
+- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+- vsi->max_frame = I40E_MAX_RXBUFFER;
+- vsi->rx_buf_len = I40E_RXBUFFER_2048;
++ vsi->max_frame = I40E_MAX_RXBUFFER;
++ vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
++
+ #if (PAGE_SIZE < 8192)
+- } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
++ if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
++ vsi->netdev->mtu <= ETH_DATA_LEN)
+ vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+- vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ #endif
+- } else {
+- vsi->max_frame = I40E_MAX_RXBUFFER;
+- vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
+- I40E_RXBUFFER_2048;
+- }
+
+ /* set up individual rings */
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+@@ -13282,7 +13294,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ int i;
+
+ /* Don't allow frames that span over multiple buffers */
+- if (frame_size > vsi->rx_buf_len) {
++ if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
+ return -EINVAL;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index 0f668468d1414..53fec5bbe6e00 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -639,7 +639,7 @@ static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
+ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+ {
+ struct ice_ptp_port *ptp_port;
+- bool ts_handled = true;
++ bool more_timestamps;
+ struct ice_pf *pf;
+ u8 idx;
+
+@@ -701,11 +701,10 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+ * poll for remaining timestamps.
+ */
+ spin_lock(&tx->lock);
+- if (!bitmap_empty(tx->in_use, tx->len))
+- ts_handled = false;
++ more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
+ spin_unlock(&tx->lock);
+
+- return ts_handled;
++ return !more_timestamps;
+ }
+
+ /**
+@@ -776,6 +775,9 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+ {
+ tx->init = 0;
+
++ /* wait for potentially outstanding interrupt to complete */
++ synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
++
+ ice_ptp_flush_tx_tracker(pf, tx);
+
+ kfree(tx->tstamps);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index f8e32833226c1..24a6ae19ad8ed 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -1202,8 +1202,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ if (!q_vector) {
+ q_vector = kzalloc(size, GFP_KERNEL);
+ } else if (size > ksize(q_vector)) {
+- kfree_rcu(q_vector, rcu);
+- q_vector = kzalloc(size, GFP_KERNEL);
++ struct igb_q_vector *new_q_vector;
++
++ new_q_vector = kzalloc(size, GFP_KERNEL);
++ if (new_q_vector)
++ kfree_rcu(q_vector, rcu);
++ q_vector = new_q_vector;
+ } else {
+ memset(q_vector, 0, size);
+ }
+@@ -7521,7 +7525,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+- u32 reg, msgbuf[3];
++ u32 reg, msgbuf[3] = {};
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* process all the same items cleared in a function level reset */
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index 1e7e7071f64d2..df3e26c0cf01a 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -94,6 +94,8 @@ struct igc_ring {
+ u8 queue_index; /* logical index of the ring*/
+ u8 reg_idx; /* physical index of the ring */
+ bool launchtime_enable; /* true if LaunchTime is enabled */
++ ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */
++ ktime_t last_ff_cycle; /* Last cycle with an active first flag */
+
+ u32 start_time;
+ u32 end_time;
+@@ -182,6 +184,7 @@ struct igc_adapter {
+
+ ktime_t base_time;
+ ktime_t cycle_time;
++ bool qbv_enable;
+
+ /* OS defined structs */
+ struct pci_dev *pdev;
+diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
+index 4f9d7f013a958..4ad35fbdc02e8 100644
+--- a/drivers/net/ethernet/intel/igc/igc_defines.h
++++ b/drivers/net/ethernet/intel/igc/igc_defines.h
+@@ -321,6 +321,8 @@
+ #define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+ #define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
++#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080
++
+ /* Transmit Control */
+ #define IGC_TCTL_EN 0x00000002 /* enable Tx */
+ #define IGC_TCTL_PSP 0x00000008 /* pad short packets */
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 34889be63e788..34db1c006b20a 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -1000,25 +1000,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
+ return netdev_mc_count(netdev);
+ }
+
+-static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
++static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
++ bool *first_flag, bool *insert_empty)
+ {
++ struct igc_adapter *adapter = netdev_priv(ring->netdev);
+ ktime_t cycle_time = adapter->cycle_time;
+ ktime_t base_time = adapter->base_time;
++ ktime_t now = ktime_get_clocktai();
++ ktime_t baset_est, end_of_cycle;
+ u32 launchtime;
++ s64 n;
+
+- /* FIXME: when using ETF together with taprio, we may have a
+- * case where 'delta' is larger than the cycle_time, this may
+- * cause problems if we don't read the current value of
+- * IGC_BASET, as the value writen into the launchtime
+- * descriptor field may be misinterpreted.
++ n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
++
++ baset_est = ktime_add_ns(base_time, cycle_time * (n));
++ end_of_cycle = ktime_add_ns(baset_est, cycle_time);
++
++ if (ktime_compare(txtime, end_of_cycle) >= 0) {
++ if (baset_est != ring->last_ff_cycle) {
++ *first_flag = true;
++ ring->last_ff_cycle = baset_est;
++
++ if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
++ *insert_empty = true;
++ }
++ }
++
++ /* Introducing a window at end of cycle on which packets
++ * potentially not honor launchtime. Window of 5us chosen
++ * considering software update the tail pointer and packets
++ * are dma'ed to packet buffer.
+ */
+- div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
++ if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
++ netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
++ txtime);
++
++ ring->last_tx_cycle = end_of_cycle;
++
++ launchtime = ktime_sub_ns(txtime, baset_est);
++ if (launchtime > 0)
++ div_s64_rem(launchtime, cycle_time, &launchtime);
++ else
++ launchtime = 0;
+
+ return cpu_to_le32(launchtime);
+ }
+
++static int igc_init_empty_frame(struct igc_ring *ring,
++ struct igc_tx_buffer *buffer,
++ struct sk_buff *skb)
++{
++ unsigned int size;
++ dma_addr_t dma;
++
++ size = skb_headlen(skb);
++
++ dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
++ if (dma_mapping_error(ring->dev, dma)) {
++ netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
++ return -ENOMEM;
++ }
++
++ buffer->skb = skb;
++ buffer->protocol = 0;
++ buffer->bytecount = skb->len;
++ buffer->gso_segs = 1;
++ buffer->time_stamp = jiffies;
++ dma_unmap_len_set(buffer, len, skb->len);
++ dma_unmap_addr_set(buffer, dma, dma);
++
++ return 0;
++}
++
++static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
++ struct sk_buff *skb,
++ struct igc_tx_buffer *first)
++{
++ union igc_adv_tx_desc *desc;
++ u32 cmd_type, olinfo_status;
++ int err;
++
++ if (!igc_desc_unused(ring))
++ return -EBUSY;
++
++ err = igc_init_empty_frame(ring, first, skb);
++ if (err)
++ return err;
++
++ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
++ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
++ first->bytecount;
++ olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
++
++ desc = IGC_TX_DESC(ring, ring->next_to_use);
++ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
++ desc->read.olinfo_status = cpu_to_le32(olinfo_status);
++ desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
++
++ netdev_tx_sent_queue(txring_txq(ring), skb->len);
++
++ first->next_to_watch = desc;
++
++ ring->next_to_use++;
++ if (ring->next_to_use == ring->count)
++ ring->next_to_use = 0;
++
++ return 0;
++}
++
++#define IGC_EMPTY_FRAME_SIZE 60
++
+ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
+- struct igc_tx_buffer *first,
++ __le32 launch_time, bool first_flag,
+ u32 vlan_macip_lens, u32 type_tucmd,
+ u32 mss_l4len_idx)
+ {
+@@ -1037,26 +1130,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
+ if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ mss_l4len_idx |= tx_ring->reg_idx << 4;
+
++ if (first_flag)
++ mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
++
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+-
+- /* We assume there is always a valid Tx time available. Invalid times
+- * should have been handled by the upper layers.
+- */
+- if (tx_ring->launchtime_enable) {
+- struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+- ktime_t txtime = first->skb->tstamp;
+-
+- skb_txtime_consumed(first->skb);
+- context_desc->launch_time = igc_tx_launchtime(adapter,
+- txtime);
+- } else {
+- context_desc->launch_time = 0;
+- }
++ context_desc->launch_time = launch_time;
+ }
+
+-static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
++static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
++ __le32 launch_time, bool first_flag)
+ {
+ struct sk_buff *skb = first->skb;
+ u32 vlan_macip_lens = 0;
+@@ -1096,7 +1180,8 @@ no_csum:
+ vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
+
+- igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
++ igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
++ vlan_macip_lens, type_tucmd, 0);
+ }
+
+ static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
+@@ -1320,6 +1405,7 @@ dma_error:
+
+ static int igc_tso(struct igc_ring *tx_ring,
+ struct igc_tx_buffer *first,
++ __le32 launch_time, bool first_flag,
+ u8 *hdr_len)
+ {
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+@@ -1406,8 +1492,8 @@ static int igc_tso(struct igc_ring *tx_ring,
+ vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
+
+- igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
+- type_tucmd, mss_l4len_idx);
++ igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
++ vlan_macip_lens, type_tucmd, mss_l4len_idx);
+
+ return 1;
+ }
+@@ -1415,11 +1501,14 @@ static int igc_tso(struct igc_ring *tx_ring,
+ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
+ struct igc_ring *tx_ring)
+ {
++ bool first_flag = false, insert_empty = false;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ __be16 protocol = vlan_get_protocol(skb);
+ struct igc_tx_buffer *first;
++ __le32 launch_time = 0;
+ u32 tx_flags = 0;
+ unsigned short f;
++ ktime_t txtime;
+ u8 hdr_len = 0;
+ int tso = 0;
+
+@@ -1433,11 +1522,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
+
+- if (igc_maybe_stop_tx(tx_ring, count + 3)) {
++ if (igc_maybe_stop_tx(tx_ring, count + 5)) {
+ /* this is a hard error */
+ return NETDEV_TX_BUSY;
+ }
+
++ if (!tx_ring->launchtime_enable)
++ goto done;
++
++ txtime = skb->tstamp;
++ skb->tstamp = ktime_set(0, 0);
++ launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
++
++ if (insert_empty) {
++ struct igc_tx_buffer *empty_info;
++ struct sk_buff *empty;
++ void *data;
++
++ empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
++ empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
++ if (!empty)
++ goto done;
++
++ data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
++ memset(data, 0, IGC_EMPTY_FRAME_SIZE);
++
++ igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
++
++ if (igc_init_tx_empty_descriptor(tx_ring,
++ empty,
++ empty_info) < 0)
++ dev_kfree_skb_any(empty);
++ }
++
++done:
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->type = IGC_TX_BUFFER_TYPE_SKB;
+@@ -1474,11 +1592,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
+ first->tx_flags = tx_flags;
+ first->protocol = protocol;
+
+- tso = igc_tso(tx_ring, first, &hdr_len);
++ tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (!tso)
+- igc_tx_csum(tx_ring, first);
++ igc_tx_csum(tx_ring, first, launch_time, first_flag);
+
+ igc_tx_map(tx_ring, first, hdr_len);
+
+@@ -5918,10 +6036,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ bool queue_configured[IGC_MAX_TX_QUEUES] = { };
+ u32 start_time = 0, end_time = 0;
+ size_t n;
++ int i;
++
++ adapter->qbv_enable = qopt->enable;
+
+ if (!qopt->enable)
+ return igc_tsn_clear_schedule(adapter);
+
++ if (qopt->base_time < 0)
++ return -ERANGE;
++
+ if (adapter->base_time)
+ return -EALREADY;
+
+@@ -5933,10 +6057,24 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+
+ for (n = 0; n < qopt->num_entries; n++) {
+ struct tc_taprio_sched_entry *e = &qopt->entries[n];
+- int i;
+
+ end_time += e->interval;
+
++ /* If any of the conditions below are true, we need to manually
++ * control the end time of the cycle.
++ * 1. Qbv users can specify a cycle time that is not equal
++ * to the total GCL intervals. Hence, recalculation is
++ * necessary here to exclude the time interval that
++ * exceeds the cycle time.
++ * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
++ * once the end of the list is reached, it will switch
++ * to the END_OF_CYCLE state and leave the gates in the
++ * same state until the next cycle is started.
++ */
++ if (end_time > adapter->cycle_time ||
++ n + 1 == qopt->num_entries)
++ end_time = adapter->cycle_time;
++
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+@@ -5957,6 +6095,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ start_time += e->interval;
+ }
+
++ /* Check whether a queue gets configured.
++ * If not, set the start and end time to be end time.
++ */
++ for (i = 0; i < adapter->num_tx_queues; i++) {
++ if (!queue_configured[i]) {
++ struct igc_ring *ring = adapter->tx_ring[i];
++
++ ring->start_time = end_time;
++ ring->end_time = end_time;
++ }
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
+index 0fce22de2ab85..356c7455c5cee 100644
+--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
+@@ -36,7 +36,7 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
+ {
+ unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
+
+- if (adapter->base_time)
++ if (adapter->qbv_enable)
+ new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
+
+ if (is_any_launchtime(adapter))
+@@ -110,15 +110,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+ wr32(IGC_STQT(i), ring->start_time);
+ wr32(IGC_ENDQT(i), ring->end_time);
+
+- if (adapter->base_time) {
+- /* If we have a base_time we are in "taprio"
+- * mode and we need to be strict about the
+- * cycles: only transmit a packet if it can be
+- * completed during that cycle.
+- */
+- txqctl |= IGC_TXQCTL_STRICT_CYCLE |
+- IGC_TXQCTL_STRICT_END;
+- }
++ txqctl |= IGC_TXQCTL_STRICT_CYCLE |
++ IGC_TXQCTL_STRICT_END;
+
+ if (ring->launchtime_enable)
+ txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+index c0bedf402da93..f68a6a0e3aa41 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+@@ -1184,10 +1184,13 @@ static int mcs_register_interrupts(struct mcs *mcs)
+ mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ if (!mcs->tx_sa_active) {
+ ret = -ENOMEM;
+- goto exit;
++ goto free_irq;
+ }
+
+ return ret;
++
++free_irq:
++ free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
+ exit:
+ pci_free_irq_vectors(mcs->pdev);
+ mcs->num_vec = 0;
+@@ -1589,6 +1592,7 @@ static void mcs_remove(struct pci_dev *pdev)
+
+ /* Set MCS to external bypass */
+ mcs_set_external_bypass(mcs, true);
++ free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 1d36619c5ec91..9aa1892a609c7 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3229,6 +3229,30 @@ static void mtk_dim_tx(struct work_struct *work)
+ dim->state = DIM_START_MEASURE;
+ }
+
++static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
++{
++ struct mtk_eth *eth = mac->hw;
++ u32 mcr_cur, mcr_new;
++
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
++ return;
++
++ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
++ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
++
++ if (val <= 1518)
++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
++ else if (val <= 1536)
++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
++ else if (val <= 1552)
++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
++ else
++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
++
++ if (mcr_new != mcr_cur)
++ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
++}
++
+ static int mtk_hw_init(struct mtk_eth *eth)
+ {
+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+@@ -3268,16 +3292,17 @@ static int mtk_hw_init(struct mtk_eth *eth)
+ return 0;
+ }
+
+- val = RSTCTRL_FE | RSTCTRL_PPE;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+-
+- val |= RSTCTRL_ETH;
+- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+- val |= RSTCTRL_PPE1;
++ val = RSTCTRL_PPE0_V2;
++ } else {
++ val = RSTCTRL_PPE0;
+ }
+
+- ethsys_reset(eth, val);
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
++ val |= RSTCTRL_PPE1;
++
++ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+@@ -3303,8 +3328,16 @@ static int mtk_hw_init(struct mtk_eth *eth)
+ * up with the more appropriate value when mtk_mac_config call is being
+ * invoked.
+ */
+- for (i = 0; i < MTK_MAC_COUNT; i++)
++ for (i = 0; i < MTK_MAC_COUNT; i++) {
++ struct net_device *dev = eth->netdev[i];
++
+ mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
++ if (dev) {
++ struct mtk_mac *mac = netdev_priv(dev);
++
++ mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
++ }
++ }
+
+ /* Indicates CDM to parse the MTK special tag from CPU
+ * which also is working out for untag packets.
+@@ -3331,9 +3364,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+- /* PSE should not drop port8 and port9 packets */
++ /* PSE should not drop port8 and port9 packets from WDMA Tx */
+ mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
+
++ /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
++ mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
++
+ /* PSE Free Queue Flow Control */
+ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
+
+@@ -3420,7 +3456,6 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
+ int length = new_mtu + MTK_RX_ETH_HLEN;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+- u32 mcr_cur, mcr_new;
+
+ if (rcu_access_pointer(eth->prog) &&
+ length > MTK_PP_MAX_BUF_SIZE) {
+@@ -3428,23 +3463,7 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
+ return -EINVAL;
+ }
+
+- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+- mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+- mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
+-
+- if (length <= 1518)
+- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
+- else if (length <= 1536)
+- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
+- else if (length <= 1552)
+- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
+- else
+- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
+-
+- if (mcr_new != mcr_cur)
+- mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+- }
+-
++ mtk_set_mcr_max_rx(mac, length);
+ dev->mtu = new_mtu;
+
+ return 0;
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index b52f3b0177efb..306fdc2c608a4 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -120,6 +120,7 @@
+ #define PSE_FQFC_CFG1 0x100
+ #define PSE_FQFC_CFG2 0x104
+ #define PSE_DROP_CFG 0x108
++#define PSE_PPE0_DROP 0x110
+
+ /* PSE Input Queue Reservation Register*/
+ #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
+@@ -447,18 +448,14 @@
+ /* ethernet reset control register */
+ #define ETHSYS_RSTCTRL 0x34
+ #define RSTCTRL_FE BIT(6)
+-#define RSTCTRL_PPE BIT(31)
+-#define RSTCTRL_PPE1 BIT(30)
++#define RSTCTRL_PPE0 BIT(31)
++#define RSTCTRL_PPE0_V2 BIT(30)
++#define RSTCTRL_PPE1 BIT(31)
+ #define RSTCTRL_ETH BIT(23)
+
+ /* ethernet reset check idle register */
+ #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
+
+-/* ethernet reset control register */
+-#define ETHSYS_RSTCTRL 0x34
+-#define RSTCTRL_FE BIT(6)
+-#define RSTCTRL_PPE BIT(31)
+-
+ /* ethernet dma channel agent map */
+ #define ETHSYS_DMA_AG_MAP 0x408
+ #define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+index 9063e2e22cd5c..9a9341a348c00 100644
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -3913,6 +3913,7 @@ abort_with_slices:
+ myri10ge_free_slices(mgp);
+
+ abort_with_firmware:
++ kfree(mgp->msix_vectors);
+ myri10ge_dummy_rdma(mgp, 0);
+
+ abort_with_ioremap:
+diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
+index 1d3c4474b7cb4..700c05fb05b97 100644
+--- a/drivers/net/ethernet/neterion/s2io.c
++++ b/drivers/net/ethernet/neterion/s2io.c
+@@ -2386,7 +2386,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
+ skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
+ if (skb) {
+ swstats->mem_freed += skb->truesize;
+- dev_kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ cnt++;
+ }
+ }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index 5250d1d1e49ca..86ecb080b1536 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -1972,9 +1972,10 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+ u8 split_id)
+ {
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+- u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
++ u8 port_id = 0, pf_id = 0, vf_id = 0;
+ bool read_using_dmae = false;
+ u32 thresh;
++ u16 fid;
+
+ if (!dump)
+ return len;
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+index 9282321c2e7fb..f9dd50152b1e3 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+@@ -221,6 +221,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
+ return 0;
+
+ qlcnic_destroy_async_wq:
++ while (i--)
++ kfree(sriov->vf_info[i].vp);
+ destroy_workqueue(bc->bc_async_wq);
+
+ qlcnic_destroy_trans_wq:
+diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
+index eecd52ed1ed21..f4d434c379e7c 100644
+--- a/drivers/net/ethernet/rdc/r6040.c
++++ b/drivers/net/ethernet/rdc/r6040.c
+@@ -1159,10 +1159,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register net device\n");
+- goto err_out_mdio_unregister;
++ goto err_out_phy_disconnect;
+ }
+ return 0;
+
++err_out_phy_disconnect:
++ phy_disconnect(dev->phydev);
+ err_out_mdio_unregister:
+ mdiobus_unregister(lp->mii_bus);
+ err_out_mdio:
+@@ -1186,6 +1188,7 @@ static void r6040_remove_one(struct pci_dev *pdev)
+ struct r6040_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
++ phy_disconnect(dev->phydev);
+ mdiobus_unregister(lp->mii_bus);
+ mdiobus_free(lp->mii_bus);
+ netif_napi_del(&lp->napi);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+index 764832f4dae1a..8b50f03056b7b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+@@ -47,7 +47,8 @@ static void config_sub_second_increment(void __iomem *ioaddr,
+ if (!(value & PTP_TCR_TSCTRLSSR))
+ data = (data * 1000) / 465;
+
+- data &= PTP_SSIR_SSINC_MASK;
++ if (data > PTP_SSIR_SSINC_MAX)
++ data = PTP_SSIR_SSINC_MAX;
+
+ reg_value = data;
+ if (gmac4)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 23ec0a9e396c6..feb209d4b991e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7097,7 +7097,8 @@ int stmmac_dvr_probe(struct device *device,
+ priv->wq = create_singlethread_workqueue("stmmac_wq");
+ if (!priv->wq) {
+ dev_err(priv->device, "failed to create workqueue\n");
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto error_wq_init;
+ }
+
+ INIT_WORK(&priv->service_task, stmmac_service_task);
+@@ -7325,6 +7326,7 @@ error_mdio_register:
+ stmmac_napi_del(ndev);
+ error_hw_init:
+ destroy_workqueue(priv->wq);
++error_wq_init:
+ bitmap_free(priv->af_xdp_zc_qps);
+
+ return ret;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+index 53172a4398101..bf619295d079f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+@@ -64,7 +64,7 @@
+ #define PTP_TCR_TSENMACADDR BIT(18)
+
+ /* SSIR defines */
+-#define PTP_SSIR_SSINC_MASK 0xff
++#define PTP_SSIR_SSINC_MAX 0xff
+ #define GMAC4_PTP_SSIR_SSINC_SHIFT 16
+
+ /* Auxiliary Control defines */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+index 49af7e78b7f59..687f43cd466c6 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+@@ -1654,12 +1654,16 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
+ }
+
+ ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
+- if (ret)
++ if (ret) {
++ kfree_skb(skb);
+ goto cleanup;
++ }
+
+ ret = dev_set_promiscuity(priv->dev, 1);
+- if (ret)
++ if (ret) {
++ kfree_skb(skb);
+ goto cleanup;
++ }
+
+ ret = dev_direct_xmit(skb, 0);
+ if (ret)
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index b3b0ba842541d..4ff1cfdb9730c 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -564,13 +564,13 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
+ ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
+ if (ret) {
+ dev_err(common->dev, "cannot set real number of tx queues\n");
+- return ret;
++ goto runtime_put;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
+ if (ret) {
+ dev_err(common->dev, "cannot set real number of rx queues\n");
+- return ret;
++ goto runtime_put;
+ }
+
+ for (i = 0; i < common->tx_ch_num; i++)
+@@ -578,7 +578,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
+
+ ret = am65_cpsw_nuss_common_open(common);
+ if (ret)
+- return ret;
++ goto runtime_put;
+
+ common->usage_count++;
+
+@@ -606,6 +606,10 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
+ error_cleanup:
+ am65_cpsw_nuss_ndo_slave_stop(ndev);
+ return ret;
++
++runtime_put:
++ pm_runtime_put(common->dev);
++ return ret;
+ }
+
+ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
+diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
+index aba70bef48945..9eb9eaff4dc90 100644
+--- a/drivers/net/ethernet/ti/netcp_core.c
++++ b/drivers/net/ethernet/ti/netcp_core.c
+@@ -1261,7 +1261,7 @@ out:
+ }
+
+ /* Submit the packet */
+-static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_stats *tx_stats = &netcp->stats;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+index a3967f8de417d..ad2c30d9a4824 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+@@ -536,7 +536,7 @@ static void xemaclite_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ xemaclite_enable_interrupts(lp);
+
+ if (lp->deferred_skb) {
+- dev_kfree_skb(lp->deferred_skb);
++ dev_kfree_skb_irq(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ dev->stats.tx_errors++;
+ }
+diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
+index b584ffe38ad68..1fef8a9b1a0fd 100644
+--- a/drivers/net/fddi/defxx.c
++++ b/drivers/net/fddi/defxx.c
+@@ -3831,10 +3831,24 @@ static int dfx_init(void)
+ int status;
+
+ status = pci_register_driver(&dfx_pci_driver);
+- if (!status)
+- status = eisa_driver_register(&dfx_eisa_driver);
+- if (!status)
+- status = tc_register_driver(&dfx_tc_driver);
++ if (status)
++ goto err_pci_register;
++
++ status = eisa_driver_register(&dfx_eisa_driver);
++ if (status)
++ goto err_eisa_register;
++
++ status = tc_register_driver(&dfx_tc_driver);
++ if (status)
++ goto err_tc_register;
++
++ return 0;
++
++err_tc_register:
++ eisa_driver_unregister(&dfx_eisa_driver);
++err_eisa_register:
++ pci_unregister_driver(&dfx_pci_driver);
++err_pci_register:
+ return status;
+ }
+
+diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
+index 791b4a53d69fd..bd3b0c2655a28 100644
+--- a/drivers/net/hamradio/baycom_epp.c
++++ b/drivers/net/hamradio/baycom_epp.c
+@@ -758,7 +758,7 @@ static void epp_bh(struct work_struct *work)
+ * ===================== network driver interface =========================
+ */
+
+-static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct baycom_state *bc = netdev_priv(dev);
+
+diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
+index f90830d3dfa69..a9184a78650b0 100644
+--- a/drivers/net/hamradio/scc.c
++++ b/drivers/net/hamradio/scc.c
+@@ -302,12 +302,12 @@ static inline void scc_discard_buffers(struct scc_channel *scc)
+ spin_lock_irqsave(&scc->lock, flags);
+ if (scc->tx_buff != NULL)
+ {
+- dev_kfree_skb(scc->tx_buff);
++ dev_kfree_skb_irq(scc->tx_buff);
+ scc->tx_buff = NULL;
+ }
+
+ while (!skb_queue_empty(&scc->tx_queue))
+- dev_kfree_skb(skb_dequeue(&scc->tx_queue));
++ dev_kfree_skb_irq(skb_dequeue(&scc->tx_queue));
+
+ spin_unlock_irqrestore(&scc->lock, flags);
+ }
+@@ -1668,7 +1668,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
+ if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
+ struct sk_buff *skb_del;
+ skb_del = skb_dequeue(&scc->tx_queue);
+- dev_kfree_skb(skb_del);
++ dev_kfree_skb_irq(skb_del);
+ }
+ skb_queue_tail(&scc->tx_queue, skb);
+ netif_trans_update(dev);
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 2fbac51b9b19e..038a787943927 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2593,7 +2593,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ struct macsec_dev *macsec;
+- int ret;
++ int ret = 0;
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+@@ -2606,28 +2606,36 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+ macsec_genl_offload_policy, NULL))
+ return -EINVAL;
+
++ rtnl_lock();
++
+ dev = get_dev_from_nl(genl_info_net(info), attrs);
+- if (IS_ERR(dev))
+- return PTR_ERR(dev);
++ if (IS_ERR(dev)) {
++ ret = PTR_ERR(dev);
++ goto out;
++ }
+ macsec = macsec_priv(dev);
+
+- if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
+- return -EINVAL;
++ if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
+ if (macsec->offload == offload)
+- return 0;
++ goto out;
+
+ /* Check if the offloading mode is supported by the underlying layers */
+ if (offload != MACSEC_OFFLOAD_OFF &&
+- !macsec_check_offload(offload, macsec))
+- return -EOPNOTSUPP;
++ !macsec_check_offload(offload, macsec)) {
++ ret = -EOPNOTSUPP;
++ goto out;
++ }
+
+ /* Check if the net device is busy. */
+- if (netif_running(dev))
+- return -EBUSY;
+-
+- rtnl_lock();
++ if (netif_running(dev)) {
++ ret = -EBUSY;
++ goto out;
++ }
+
+ prev_offload = macsec->offload;
+ macsec->offload = offload;
+@@ -2662,7 +2670,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+
+ rollback:
+ macsec->offload = prev_offload;
+-
++out:
+ rtnl_unlock();
+ return ret;
+ }
+diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
+index 7cd103fd34ef7..9f9eaf896047c 100644
+--- a/drivers/net/mctp/mctp-serial.c
++++ b/drivers/net/mctp/mctp-serial.c
+@@ -35,6 +35,8 @@
+ #define BYTE_FRAME 0x7e
+ #define BYTE_ESC 0x7d
+
++#define FCS_INIT 0xffff
++
+ static DEFINE_IDA(mctp_serial_ida);
+
+ enum mctp_serial_state {
+@@ -123,7 +125,7 @@ static void mctp_serial_tx_work(struct work_struct *work)
+ buf[2] = dev->txlen;
+
+ if (!dev->txpos)
+- dev->txfcs = crc_ccitt(0, buf + 1, 2);
++ dev->txfcs = crc_ccitt(FCS_INIT, buf + 1, 2);
+
+ txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
+ if (txlen <= 0) {
+@@ -303,7 +305,7 @@ static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
+ case 1:
+ if (c == MCTP_SERIAL_VERSION) {
+ dev->rxpos++;
+- dev->rxfcs = crc_ccitt_byte(0, c);
++ dev->rxfcs = crc_ccitt_byte(FCS_INIT, c);
+ } else {
+ dev->rxstate = STATE_ERR;
+ }
+diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
+index a4abea921046b..85dbe7f73e319 100644
+--- a/drivers/net/ntb_netdev.c
++++ b/drivers/net/ntb_netdev.c
+@@ -137,7 +137,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ enqueue_again:
+ rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
+ if (rc) {
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_fifo_errors++;
+ }
+@@ -192,7 +192,7 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ ndev->stats.tx_aborted_errors++;
+ }
+
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+
+ if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
+ /* Make sure anybody stopping the queue after this sees the new
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 9206c660a72ed..d4c821c8cf57c 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1743,6 +1743,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
+ int len;
+ unsigned char *cp;
+
++ skb->dev = ppp->dev;
++
+ if (proto < 0x8000) {
+ #ifdef CONFIG_PPP_FILTER
+ /* check if we should pass this packet */
+diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
+index 6a212c085435b..5b01642ca44e0 100644
+--- a/drivers/net/wan/farsync.c
++++ b/drivers/net/wan/farsync.c
+@@ -2545,6 +2545,7 @@ fst_remove_one(struct pci_dev *pdev)
+ struct net_device *dev = port_to_dev(&card->ports[i]);
+
+ unregister_hdlc_device(dev);
++ free_netdev(dev);
+ }
+
+ fst_disable_intr(card);
+@@ -2564,6 +2565,7 @@ fst_remove_one(struct pci_dev *pdev)
+ card->tx_dma_handle_card);
+ }
+ fst_card_array[card->card_no] = NULL;
++ kfree(card);
+ }
+
+ static struct pci_driver fst_driver = {
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
+index 6f937d2cc1263..ce3d613fa36c4 100644
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
+@@ -241,6 +241,11 @@ static void ar5523_cmd_tx_cb(struct urb *urb)
+ }
+ }
+
++static void ar5523_cancel_tx_cmd(struct ar5523 *ar)
++{
++ usb_kill_urb(ar->tx_cmd.urb_tx);
++}
++
+ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+ int ilen, void *odata, int olen, int flags)
+ {
+@@ -280,6 +285,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+ }
+
+ if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
++ ar5523_cancel_tx_cmd(ar);
+ cmd->odata = NULL;
+ ar5523_err(ar, "timeout waiting for command %02x reply\n",
+ code);
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 400f332a7ff01..5eb131ab916fd 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -99,6 +99,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA988X_HW_2_0_VERSION,
+@@ -138,6 +139,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA9887_HW_1_0_VERSION,
+@@ -178,6 +180,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA6174_HW_3_2_VERSION,
+@@ -213,6 +216,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = true,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA6174_HW_2_1_VERSION,
+@@ -252,6 +256,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA6174_HW_2_1_VERSION,
+@@ -291,6 +296,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA6174_HW_3_0_VERSION,
+@@ -330,6 +336,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA6174_HW_3_2_VERSION,
+@@ -373,6 +380,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = true,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA99X0_HW_2_0_DEV_VERSION,
+@@ -418,6 +426,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA9984_HW_1_0_DEV_VERSION,
+@@ -470,6 +479,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA9888_HW_2_0_DEV_VERSION,
+@@ -519,6 +529,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA9377_HW_1_0_DEV_VERSION,
+@@ -558,6 +569,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA9377_HW_1_1_DEV_VERSION,
+@@ -599,6 +611,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA9377_HW_1_1_DEV_VERSION,
+@@ -631,6 +644,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = QCA4019_HW_1_0_DEV_VERSION,
+@@ -677,6 +691,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
++ .delay_unmap_buffer = false,
+ },
+ {
+ .id = WCN3990_HW_1_0_DEV_VERSION,
+@@ -709,6 +724,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .dynamic_sar_support = true,
+ .hw_restart_disconnect = true,
+ .use_fw_tx_credits = false,
++ .delay_unmap_buffer = true,
+ },
+ };
+
+diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
+index 6d1784f74bea4..5bfeecb95fca2 100644
+--- a/drivers/net/wireless/ath/ath10k/htc.c
++++ b/drivers/net/wireless/ath/ath10k/htc.c
+@@ -56,6 +56,15 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
+ ep->eid, skb);
+
++ /* A corner case where the copy completion is reaching to host but still
++ * copy engine is processing it due to which host unmaps corresponding
++ * memory and causes SMMU fault, hence as workaround adding delay
++ * the unmapping memory to avoid SMMU faults.
++ */
++ if (ar->hw_params.delay_unmap_buffer &&
++ ep->ul_pipe_id == 3)
++ mdelay(2);
++
+ hdr = (struct ath10k_htc_hdr *)skb->data;
+ ath10k_htc_restore_tx_skb(ep->htc, skb);
+
+diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
+index 1b99f3a39a113..9643031a4427a 100644
+--- a/drivers/net/wireless/ath/ath10k/hw.h
++++ b/drivers/net/wireless/ath/ath10k/hw.h
+@@ -637,6 +637,8 @@ struct ath10k_hw_params {
+ bool hw_restart_disconnect;
+
+ bool use_fw_tx_credits;
++
++ bool delay_unmap_buffer;
+ };
+
+ struct htt_resp;
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index e56c6a6b13791..728d607289c36 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -3792,18 +3792,22 @@ static struct pci_driver ath10k_pci_driver = {
+
+ static int __init ath10k_pci_init(void)
+ {
+- int ret;
++ int ret1, ret2;
+
+- ret = pci_register_driver(&ath10k_pci_driver);
+- if (ret)
++ ret1 = pci_register_driver(&ath10k_pci_driver);
++ if (ret1)
+ printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
+- ret);
++ ret1);
+
+- ret = ath10k_ahb_init();
+- if (ret)
+- printk(KERN_ERR "ahb init failed: %d\n", ret);
++ ret2 = ath10k_ahb_init();
++ if (ret2)
++ printk(KERN_ERR "ahb init failed: %d\n", ret2);
+
+- return ret;
++ if (ret1 && ret2)
++ return ret1;
++
++ /* registered to at least one bus */
++ return 0;
+ }
+ module_init(ath10k_pci_init);
+
+diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
+index cf2f52cc4e30d..c20e84e031fad 100644
+--- a/drivers/net/wireless/ath/ath11k/core.h
++++ b/drivers/net/wireless/ath/ath11k/core.h
+@@ -505,6 +505,8 @@ struct ath11k_sta {
+ u64 ps_start_jiffies;
+ u64 ps_total_duration;
+ bool peer_current_ps_valid;
++
++ u32 bw_prev;
+ };
+
+ #define ATH11K_MIN_5G_FREQ 4150
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 2d1e3fd9b526c..ef7617802491e 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -4215,10 +4215,11 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+- u32 changed, bw, nss, smps;
++ u32 changed, bw, nss, smps, bw_prev;
+ int err, num_vht_rates, num_he_rates;
+ const struct cfg80211_bitrate_mask *mask;
+ struct peer_assoc_params peer_arg;
++ enum wmi_phy_mode peer_phymode;
+
+ arsta = container_of(wk, struct ath11k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+@@ -4239,6 +4240,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+ arsta->changed = 0;
+
+ bw = arsta->bw;
++ bw_prev = arsta->bw_prev;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+@@ -4252,26 +4254,57 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+ ath11k_mac_max_he_nss(he_mcs_mask)));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+- /* Send peer assoc command before set peer bandwidth param to
+- * avoid the mismatch between the peer phymode and the peer
+- * bandwidth.
+- */
+- ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true);
+-
+- peer_arg.is_assoc = false;
+- err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+- if (err) {
+- ath11k_warn(ar->ab, "failed to send peer assoc for STA %pM vdev %i: %d\n",
+- sta->addr, arvif->vdev_id, err);
+- } else if (wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
++ /* Get the peer phymode */
++ ath11k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg);
++ peer_phymode = peer_arg.peer_phymode;
++
++ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
++ sta->addr, bw, peer_phymode);
++
++ if (bw > bw_prev) {
++ /* BW is upgraded. In this case we send WMI_PEER_PHYMODE
++ * followed by WMI_PEER_CHWIDTH
++ */
++ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac BW upgrade for sta %pM new BW %d, old BW %d\n",
++ sta->addr, bw, bw_prev);
++
++ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
++ WMI_PEER_PHYMODE, peer_phymode);
++
++ if (err) {
++ ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
++ sta->addr, peer_phymode, err);
++ goto err_rc_bw_changed;
++ }
++
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
++
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ } else {
+- ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+- sta->addr, arvif->vdev_id);
++ /* BW is downgraded. In this case we send WMI_PEER_CHWIDTH
++ * followed by WMI_PEER_PHYMODE
++ */
++ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac BW downgrade for sta %pM new BW %d,old BW %d\n",
++ sta->addr, bw, bw_prev);
++
++ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
++ WMI_PEER_CHWIDTH, bw);
++
++ if (err) {
++ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
++ sta->addr, bw, err);
++ goto err_rc_bw_changed;
++ }
++
++ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
++ WMI_PEER_PHYMODE, peer_phymode);
++
++ if (err)
++ ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
++ sta->addr, peer_phymode, err);
+ }
+ }
+
+@@ -4352,6 +4385,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+ }
+ }
+
++err_rc_bw_changed:
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+@@ -4505,6 +4539,34 @@ exit:
+ return ret;
+ }
+
++static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
++ struct ieee80211_sta *sta)
++{
++ u32 bw = WMI_PEER_CHWIDTH_20MHZ;
++
++ switch (sta->deflink.bandwidth) {
++ case IEEE80211_STA_RX_BW_20:
++ bw = WMI_PEER_CHWIDTH_20MHZ;
++ break;
++ case IEEE80211_STA_RX_BW_40:
++ bw = WMI_PEER_CHWIDTH_40MHZ;
++ break;
++ case IEEE80211_STA_RX_BW_80:
++ bw = WMI_PEER_CHWIDTH_80MHZ;
++ break;
++ case IEEE80211_STA_RX_BW_160:
++ bw = WMI_PEER_CHWIDTH_160MHZ;
++ break;
++ default:
++ ath11k_warn(ar->ab, "Invalid bandwidth %d for %pM\n",
++ sta->deflink.bandwidth, sta->addr);
++ bw = WMI_PEER_CHWIDTH_20MHZ;
++ break;
++ }
++
++ return bw;
++}
++
+ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+@@ -4590,6 +4652,12 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
++
++ spin_lock_bh(&ar->data_lock);
++ /* Set arsta bw and prev bw */
++ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
++ arsta->bw_prev = arsta->bw;
++ spin_unlock_bh(&ar->data_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ spin_lock_bh(&ar->ab->base_lock);
+@@ -4713,28 +4781,8 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
+ spin_lock_bh(&ar->data_lock);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+- bw = WMI_PEER_CHWIDTH_20MHZ;
+-
+- switch (sta->deflink.bandwidth) {
+- case IEEE80211_STA_RX_BW_20:
+- bw = WMI_PEER_CHWIDTH_20MHZ;
+- break;
+- case IEEE80211_STA_RX_BW_40:
+- bw = WMI_PEER_CHWIDTH_40MHZ;
+- break;
+- case IEEE80211_STA_RX_BW_80:
+- bw = WMI_PEER_CHWIDTH_80MHZ;
+- break;
+- case IEEE80211_STA_RX_BW_160:
+- bw = WMI_PEER_CHWIDTH_160MHZ;
+- break;
+- default:
+- ath11k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
+- sta->deflink.bandwidth, sta->addr);
+- bw = WMI_PEER_CHWIDTH_20MHZ;
+- break;
+- }
+-
++ bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
++ arsta->bw_prev = arsta->bw;
+ arsta->bw = bw;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index 51de2208b7899..8358fe08c2344 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -3087,6 +3087,9 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
+ sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01),
+ .fn = ath11k_qmi_msg_fw_init_done_cb,
+ },
++
++ /* end of list */
++ {},
+ };
+
+ static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index 4d9002a9d082c..1a2e0c7eeb023 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -708,14 +708,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ struct hif_device_usb *hif_dev = rx_buf->hif_dev;
+ struct sk_buff *skb = rx_buf->skb;
+- struct sk_buff *nskb;
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+- goto free;
++ goto free_skb;
+
+ switch (urb->status) {
+ case 0:
+@@ -724,7 +723,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+- goto free;
++ goto free_skb;
+ default:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+@@ -735,25 +734,27 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+
+- /* Process the command first */
++ /*
++ * Process the command first.
++ * skb is either freed here or passed to be
++ * managed to another callback function.
++ */
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
+ skb->len, USB_REG_IN_PIPE);
+
+-
+- nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
+- if (!nskb) {
++ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
++ if (!skb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: REG_IN memory allocation failure\n");
+- urb->context = NULL;
+- return;
++ goto free_rx_buf;
+ }
+
+- rx_buf->skb = nskb;
++ rx_buf->skb = skb;
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev,
+ USB_REG_IN_PIPE),
+- nskb->data, MAX_REG_IN_BUF_SIZE,
++ skb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, rx_buf, 1);
+ }
+
+@@ -762,12 +763,13 @@ resubmit:
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ usb_unanchor_urb(urb);
+- goto free;
++ goto free_skb;
+ }
+
+ return;
+-free:
++free_skb:
+ kfree_skb(skb);
++free_rx_buf:
+ kfree(rx_buf);
+ urb->context = NULL;
+ }
+@@ -780,14 +782,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_buf, list) {
+- usb_get_urb(tx_buf->urb);
+- spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+- usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+- spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ }
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+@@ -1329,10 +1327,24 @@ static int send_eject_command(struct usb_interface *interface)
+ static int ath9k_hif_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+ {
++ struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in, *int_out;
+ struct usb_device *udev = interface_to_usbdev(interface);
++ struct usb_host_interface *alt;
+ struct hif_device_usb *hif_dev;
+ int ret = 0;
+
++ /* Verify the expected endpoints are present */
++ alt = interface->cur_altsetting;
++ if (usb_find_common_endpoints(alt, &bulk_in, &bulk_out, &int_in, &int_out) < 0 ||
++ usb_endpoint_num(bulk_in) != USB_WLAN_RX_PIPE ||
++ usb_endpoint_num(bulk_out) != USB_WLAN_TX_PIPE ||
++ usb_endpoint_num(int_in) != USB_REG_IN_PIPE ||
++ usb_endpoint_num(int_out) != USB_REG_OUT_PIPE) {
++ dev_err(&udev->dev,
++ "ath9k_htc: Device endpoint numbers are not the expected ones\n");
++ return -ENODEV;
++ }
++
+ if (id->driver_info == STORAGE_DEVICE)
+ return send_eject_command(interface);
+
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+index 74020fa100659..22344e68fd597 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+@@ -305,8 +305,12 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
+ brcmf_info("Firmware: %s %s\n", ri->chipname, buf);
+
+ /* locate firmware version number for ethtool */
+- ptr = strrchr(buf, ' ') + 1;
+- strscpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
++ ptr = strrchr(buf, ' ');
++ if (!ptr) {
++ bphy_err(drvr, "Retrieving version number failed");
++ goto done;
++ }
++ strscpy(ifp->drvr->fwver, ptr + 1, sizeof(ifp->drvr->fwver));
+
+ /* Query for 'clmver' to get CLM version info from firmware */
+ memset(buf, 0, sizeof(buf));
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+index f2207793f6e27..09d2f2dc2b46f 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+@@ -803,6 +803,11 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
+ u32 i, j;
+ char end = '\0';
+
++ if (chiprev >= BITS_PER_TYPE(u32)) {
++ brcmf_err("Invalid chip revision %u\n", chiprev);
++ return NULL;
++ }
++
+ for (i = 0; i < table_size; i++) {
+ if (mapping_table[i].chipid == chip &&
+ mapping_table[i].revmask & BIT(chiprev))
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index 80083f9ea3116..5630f6e718e12 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -726,7 +726,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
+ }
+
+ if (!brcmf_chip_set_active(devinfo->ci, resetintr))
+- return -EINVAL;
++ return -EIO;
+ return 0;
+ }
+
+@@ -1218,6 +1218,10 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
+ BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
+ }
++ if (max_flowrings > 256) {
++ brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
++ return -EIO;
++ }
+
+ if (devinfo->dma_idx_sz != 0) {
+ bufsz = (max_submissionrings + max_completionrings) *
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index 465d95d837592..e265a2e411a09 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -3414,6 +3414,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
+ /* Take arm out of reset */
+ if (!brcmf_chip_set_active(bus->ci, rstvec)) {
+ brcmf_err("error getting out of ARM core reset\n");
++ bcmerror = -EIO;
+ goto err;
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
+index 67122cfa22920..5409699c9a1fd 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
++++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
+@@ -446,9 +446,10 @@ void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
+ void iwl_mei_host_disassociated(void);
+
+ /**
+- * iwl_mei_device_down() - must be called when the device is down
++ * iwl_mei_device_state() - must be called when the device changes up/down state
++ * @up: true if the device is up, false otherwise.
+ */
+-void iwl_mei_device_down(void);
++void iwl_mei_device_state(bool up);
+
+ #else
+
+@@ -497,7 +498,7 @@ static inline void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_
+ static inline void iwl_mei_host_disassociated(void)
+ {}
+
+-static inline void iwl_mei_device_down(void)
++static inline void iwl_mei_device_state(bool up)
+ {}
+
+ #endif /* CONFIG_IWLMEI */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
+index 357f14626cf43..c0142093c7682 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
++++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
+@@ -147,9 +147,13 @@ struct iwl_mei_filters {
+ * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
+ * flow.
+ * @link_prot_state: true when we are in link protection PASSIVE
++ * @device_down: true if the device is down. Used to remember to send
++ * CSME_OWNERSHIP_CONFIRMED when the driver is already down.
+ * @csa_throttle_end_wk: used when &csa_throttled is true
+ * @data_q_lock: protects the access to the data queues which are
+ * accessed without the mutex.
++ * @netdev_work: used to defer registering and unregistering of the netdev to
++ * avoid taking the rtnl lock in the SAP messages handlers.
+ * @sap_seq_no: the sequence number for the SAP messages
+ * @seq_no: the sequence number for the SAP messages
+ * @dbgfs_dir: the debugfs dir entry
+@@ -167,8 +171,10 @@ struct iwl_mei {
+ bool csa_throttled;
+ bool csme_taking_ownership;
+ bool link_prot_state;
++ bool device_down;
+ struct delayed_work csa_throttle_end_wk;
+ spinlock_t data_q_lock;
++ struct work_struct netdev_work;
+
+ atomic_t sap_seq_no;
+ atomic_t seq_no;
+@@ -588,13 +594,38 @@ static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
+ return res;
+ }
+
++static void iwl_mei_netdev_work(struct work_struct *wk)
++{
++ struct iwl_mei *mei =
++ container_of(wk, struct iwl_mei, netdev_work);
++ struct net_device *netdev;
++
++ /*
++ * First take rtnl and only then the mutex to avoid an ABBA
++ * with iwl_mei_set_netdev()
++ */
++ rtnl_lock();
++ mutex_lock(&iwl_mei_mutex);
++
++ netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
++ lockdep_is_held(&iwl_mei_mutex));
++ if (netdev) {
++ if (mei->amt_enabled)
++ netdev_rx_handler_register(netdev, iwl_mei_rx_handler,
++ mei);
++ else
++ netdev_rx_handler_unregister(netdev);
++ }
++
++ mutex_unlock(&iwl_mei_mutex);
++ rtnl_unlock();
++}
++
+ static void
+ iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
+ const struct iwl_sap_me_msg_start_ok *rsp,
+ ssize_t len)
+ {
+- struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+-
+ if (len != sizeof(*rsp)) {
+ dev_err(&cldev->dev,
+ "got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
+@@ -613,13 +644,10 @@ iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
+
+ mutex_lock(&iwl_mei_mutex);
+ set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
+- /* wifi driver has registered already */
+- if (iwl_mei_cache.ops) {
+- iwl_mei_send_sap_msg(mei->cldev,
+- SAP_MSG_NOTIF_WIFIDR_UP);
+- iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
+- }
+-
++ /*
++ * We'll receive AMT_STATE SAP message in a bit and
++ * that will continue the flow
++ */
+ mutex_unlock(&iwl_mei_mutex);
+ }
+
+@@ -712,6 +740,13 @@ static void iwl_mei_set_init_conf(struct iwl_mei *mei)
+ .val = cpu_to_le32(iwl_mei_cache.rf_kill),
+ };
+
++ /* wifi driver has registered already */
++ if (iwl_mei_cache.ops) {
++ iwl_mei_send_sap_msg(mei->cldev,
++ SAP_MSG_NOTIF_WIFIDR_UP);
++ iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
++ }
++
+ iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
+
+ if (iwl_mei_cache.conn_info) {
+@@ -738,38 +773,23 @@ static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
+ const struct iwl_sap_msg_dw *dw)
+ {
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+- struct net_device *netdev;
+
+- /*
+- * First take rtnl and only then the mutex to avoid an ABBA
+- * with iwl_mei_set_netdev()
+- */
+- rtnl_lock();
+ mutex_lock(&iwl_mei_mutex);
+
+- netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
+- lockdep_is_held(&iwl_mei_mutex));
+-
+ if (mei->amt_enabled == !!le32_to_cpu(dw->val))
+ goto out;
+
+ mei->amt_enabled = dw->val;
+
+- if (mei->amt_enabled) {
+- if (netdev)
+- netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
+-
++ if (mei->amt_enabled)
+ iwl_mei_set_init_conf(mei);
+- } else {
+- if (iwl_mei_cache.ops)
+- iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
+- if (netdev)
+- netdev_rx_handler_unregister(netdev);
+- }
++ else if (iwl_mei_cache.ops)
++ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
++
++ schedule_work(&mei->netdev_work);
+
+ out:
+ mutex_unlock(&iwl_mei_mutex);
+- rtnl_unlock();
+ }
+
+ static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
+@@ -798,14 +818,18 @@ static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
+
+ mei->got_ownership = false;
+
+- /*
+- * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver
+- * is finished taking the device down.
+- */
+- mei->csme_taking_ownership = true;
++ if (iwl_mei_cache.ops && !mei->device_down) {
++ /*
++ * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi
++ * driver is finished taking the device down.
++ */
++ mei->csme_taking_ownership = true;
+
+- if (iwl_mei_cache.ops)
+- iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
++ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
++ } else {
++ iwl_mei_send_sap_msg(cldev,
++ SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
++ }
+ }
+
+ static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
+@@ -1413,10 +1437,7 @@ void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+- if (!mei)
+- goto out;
+-
+- if (!mei->amt_enabled)
++ if (!mei && !mei->amt_enabled)
+ goto out;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+@@ -1445,7 +1466,7 @@ void iwl_mei_host_disassociated(void)
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+- if (!mei)
++ if (!mei && !mei->amt_enabled)
+ goto out;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+@@ -1481,7 +1502,7 @@ void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+- if (!mei)
++ if (!mei && !mei->amt_enabled)
+ goto out;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+@@ -1510,7 +1531,7 @@ void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+- if (!mei)
++ if (!mei && !mei->amt_enabled)
+ goto out;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+@@ -1538,7 +1559,7 @@ void iwl_mei_set_country_code(u16 mcc)
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+- if (!mei)
++ if (!mei && !mei->amt_enabled)
+ goto out;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+@@ -1564,7 +1585,7 @@ void iwl_mei_set_power_limit(const __le16 *power_limit)
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+- if (!mei)
++ if (!mei && !mei->amt_enabled)
+ goto out;
+
+ memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
+@@ -1616,7 +1637,7 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
+
+-void iwl_mei_device_down(void)
++void iwl_mei_device_state(bool up)
+ {
+ struct iwl_mei *mei;
+
+@@ -1630,7 +1651,9 @@ void iwl_mei_device_down(void)
+ if (!mei)
+ goto out;
+
+- if (!mei->csme_taking_ownership)
++ mei->device_down = !up;
++
++ if (up || !mei->csme_taking_ownership)
+ goto out;
+
+ iwl_mei_send_sap_msg(mei->cldev,
+@@ -1639,7 +1662,7 @@ void iwl_mei_device_down(void)
+ out:
+ mutex_unlock(&iwl_mei_mutex);
+ }
+-EXPORT_SYMBOL_GPL(iwl_mei_device_down);
++EXPORT_SYMBOL_GPL(iwl_mei_device_state);
+
+ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
+ {
+@@ -1669,9 +1692,10 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
+
+ /* we have already a SAP connection */
+ if (iwl_mei_is_connected()) {
+- iwl_mei_send_sap_msg(mei->cldev,
+- SAP_MSG_NOTIF_WIFIDR_UP);
+- ops->rfkill(priv, mei->link_prot_state);
++ if (mei->amt_enabled)
++ iwl_mei_send_sap_msg(mei->cldev,
++ SAP_MSG_NOTIF_WIFIDR_UP);
++ ops->rfkill(priv, mei->link_prot_state, false);
+ }
+ }
+ ret = 0;
+@@ -1818,9 +1842,11 @@ static int iwl_mei_probe(struct mei_cl_device *cldev,
+ iwl_mei_csa_throttle_end_wk);
+ init_waitqueue_head(&mei->get_ownership_wq);
+ spin_lock_init(&mei->data_q_lock);
++ INIT_WORK(&mei->netdev_work, iwl_mei_netdev_work);
+
+ mei_cldev_set_drvdata(cldev, mei);
+ mei->cldev = cldev;
++ mei->device_down = true;
+
+ do {
+ ret = iwl_mei_alloc_shared_mem(cldev);
+@@ -1921,29 +1947,32 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
+
+ mutex_lock(&iwl_mei_mutex);
+
+- /*
+- * Tell CSME that we are going down so that it won't access the
+- * memory anymore, make sure this message goes through immediately.
+- */
+- mei->csa_throttled = false;
+- iwl_mei_send_sap_msg(mei->cldev,
+- SAP_MSG_NOTIF_HOST_GOES_DOWN);
++ if (mei->amt_enabled) {
++ /*
++ * Tell CSME that we are going down so that it won't access the
++ * memory anymore, make sure this message goes through immediately.
++ */
++ mei->csa_throttled = false;
++ iwl_mei_send_sap_msg(mei->cldev,
++ SAP_MSG_NOTIF_HOST_GOES_DOWN);
+
+- for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
+- if (!iwl_mei_host_to_me_data_pending(mei))
+- break;
++ for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
++ if (!iwl_mei_host_to_me_data_pending(mei))
++ break;
+
+- msleep(5);
+- }
++ msleep(20);
++ }
+
+- /*
+- * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message,
+- * it means that it will probably keep reading memory that we are going
+- * to unmap and free, expect IOMMU error messages.
+- */
+- if (i == SEND_SAP_MAX_WAIT_ITERATION)
+- dev_err(&mei->cldev->dev,
+- "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
++ /*
++ * If we couldn't make sure that CSME saw the HOST_GOES_DOWN
++ * message, it means that it will probably keep reading memory
++ * that we are going to unmap and free, expect IOMMU error
++ * messages.
++ */
++ if (i == SEND_SAP_MAX_WAIT_ITERATION)
++ dev_err(&mei->cldev->dev,
++ "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
++ }
+
+ mutex_unlock(&iwl_mei_mutex);
+
+@@ -1976,6 +2005,7 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
+ */
+ cancel_work_sync(&mei->send_csa_msg_wk);
+ cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
++ cancel_work_sync(&mei->netdev_work);
+
+ /*
+ * If someone waits for the ownership, let him know that we are going
+diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
+index 3472167c83707..eac46d1a397a8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mei/net.c
++++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (C) 2021 Intel Corporation
++ * Copyright (C) 2021-2022 Intel Corporation
+ */
+
+ #include <uapi/linux/if_ether.h>
+@@ -337,10 +337,14 @@ rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *orig_skb,
+ if (!*pass_to_csme)
+ return RX_HANDLER_PASS;
+
+- if (ret == RX_HANDLER_PASS)
++ if (ret == RX_HANDLER_PASS) {
+ skb = skb_copy(orig_skb, GFP_ATOMIC);
+- else
++
++ if (!skb)
++ return RX_HANDLER_PASS;
++ } else {
+ skb = orig_skb;
++ }
+
+ /* CSME wants the MAC header as well, push it back */
+ skb_push(skb, skb->data - skb_mac_header(skb));
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index f041e77af059e..5de34edc51fe9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -1665,6 +1665,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
+ iwl_rfi_send_config_cmd(mvm, NULL);
+ }
+
++ iwl_mvm_mei_device_state(mvm, true);
++
+ IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+ return 0;
+ error:
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index 97cba526e4651..1ccb3cad7cdc1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -2201,10 +2201,10 @@ static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm)
+ iwl_mei_host_disassociated();
+ }
+
+-static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
++static inline void iwl_mvm_mei_device_state(struct iwl_mvm *mvm, bool up)
+ {
+ if (mvm->mei_registered)
+- iwl_mei_device_down();
++ iwl_mei_device_state(up);
+ }
+
+ static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index d2d42cd48af22..5b8e9a06f6d4a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -1375,7 +1375,7 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
+ iwl_trans_stop_device(mvm->trans);
+ iwl_free_fw_paging(&mvm->fwrt);
+ iwl_fw_dump_conf_clear(&mvm->fwrt);
+- iwl_mvm_mei_device_down(mvm);
++ iwl_mvm_mei_device_state(mvm, false);
+ }
+
+ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 86d20e13bf47a..ba944175546d4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1171,9 +1171,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+ /* From now on, we cannot access info->control */
+ iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
++ /*
++ * The IV is introduced by the HW for new tx api, and it is not present
++ * in the skb, hence, don't tell iwl_mvm_mei_tx_copy_to_csme about the
++ * IV for those devices.
++ */
+ if (ieee80211_is_data(fc))
+ iwl_mvm_mei_tx_copy_to_csme(mvm, skb,
+- info->control.hw_key ?
++ info->control.hw_key &&
++ !iwl_mvm_has_new_tx_api(mvm) ?
+ info->control.hw_key->iv_len : 0);
+
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
+@@ -1206,6 +1212,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct sk_buff_head mpdus_skbs;
+ unsigned int payload_len;
+ int ret;
++ struct sk_buff *orig_skb = skb;
+
+ if (WARN_ON_ONCE(!mvmsta))
+ return -1;
+@@ -1238,8 +1245,17 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+
+ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ if (ret) {
++ /* Free skbs created as part of TSO logic that have not yet been dequeued */
+ __skb_queue_purge(&mpdus_skbs);
+- return ret;
++ /* skb here is not necessarily same as skb that entered this method,
++ * so free it explicitly.
++ */
++ if (skb == orig_skb)
++ ieee80211_free_txskb(mvm->hw, skb);
++ else
++ kfree_skb(skb);
++ /* there was error, but we consumed skb one way or another, so return 0 */
++ return 0;
+ }
+ }
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 87db9498dea44..7bcf7a6b67df3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -1107,8 +1107,9 @@ static inline bool mt76_is_skb_pktid(u8 pktid)
+ static inline u8 mt76_tx_power_nss_delta(u8 nss)
+ {
+ static const u8 nss_delta[4] = { 0, 6, 9, 12 };
++ u8 idx = nss - 1;
+
+- return nss_delta[nss - 1];
++ return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0;
+ }
+
+ static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 011fc9729b38c..025a237c1cce8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -2834,6 +2834,9 @@ mt76_connac_mcu_send_ram_firmware(struct mt76_dev *dev,
+ len = le32_to_cpu(region->len);
+ addr = le32_to_cpu(region->addr);
+
++ if (region->feature_set & FW_FEATURE_NON_DL)
++ goto next;
++
+ if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
+ override = addr;
+
+@@ -2850,6 +2853,7 @@ mt76_connac_mcu_send_ram_firmware(struct mt76_dev *dev,
+ return err;
+ }
+
++next:
+ offset += len;
+ }
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+index 4b1a9811646fd..0bce0ce51be00 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+@@ -173,60 +173,50 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
+ void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy)
+ {
+- u8 nss, nss_band, nss_band_max, *eeprom = dev->mt76.eeprom.data;
++ u8 path, nss, nss_max = 4, *eeprom = dev->mt76.eeprom.data;
+ struct mt76_phy *mphy = phy->mt76;
+- bool ext_phy = phy != &dev->phy;
+
+ mt7915_eeprom_parse_band_config(phy);
+
+- /* read tx/rx mask from eeprom */
++ /* read tx/rx path from eeprom */
+ if (is_mt7915(&dev->mt76)) {
+- nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+- eeprom[MT_EE_WIFI_CONF]);
++ path = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
++ eeprom[MT_EE_WIFI_CONF]);
+ } else {
+- nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+- eeprom[MT_EE_WIFI_CONF + phy->band_idx]);
++ path = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
++ eeprom[MT_EE_WIFI_CONF + phy->band_idx]);
+ }
+
+- if (!nss || nss > 4)
+- nss = 4;
++ if (!path || path > 4)
++ path = 4;
+
+ /* read tx/rx stream */
+- nss_band = nss;
+-
++ nss = path;
+ if (dev->dbdc_support) {
+ if (is_mt7915(&dev->mt76)) {
+- nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
+- eeprom[MT_EE_WIFI_CONF + 3]);
++ path = min_t(u8, path, 2);
++ nss = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
++ eeprom[MT_EE_WIFI_CONF + 3]);
+ if (phy->band_idx)
+- nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
+- eeprom[MT_EE_WIFI_CONF + 3]);
++ nss = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
++ eeprom[MT_EE_WIFI_CONF + 3]);
+ } else {
+- nss_band = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
+- eeprom[MT_EE_WIFI_CONF + 2 + phy->band_idx]);
++ nss = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
++ eeprom[MT_EE_WIFI_CONF + 2 + phy->band_idx]);
+ }
+
+- nss_band_max = is_mt7986(&dev->mt76) ?
+- MT_EE_NSS_MAX_DBDC_MA7986 : MT_EE_NSS_MAX_DBDC_MA7915;
+- } else {
+- nss_band_max = is_mt7986(&dev->mt76) ?
+- MT_EE_NSS_MAX_MA7986 : MT_EE_NSS_MAX_MA7915;
++ if (!is_mt7986(&dev->mt76))
++ nss_max = 2;
+ }
+
+- if (!nss_band || nss_band > nss_band_max)
+- nss_band = nss_band_max;
+-
+- if (nss_band > nss) {
+- dev_warn(dev->mt76.dev,
+- "nss mismatch, nss(%d) nss_band(%d) band(%d) ext_phy(%d)\n",
+- nss, nss_band, phy->band_idx, ext_phy);
+- nss = nss_band;
+- }
++ if (!nss)
++ nss = nss_max;
++ nss = min_t(u8, min_t(u8, nss_max, nss), path);
+
+- mphy->chainmask = BIT(nss) - 1;
+- if (ext_phy)
++ mphy->chainmask = BIT(path) - 1;
++ if (phy->band_idx)
+ mphy->chainmask <<= dev->chainshift;
+- mphy->antenna_mask = BIT(nss_band) - 1;
++ mphy->antenna_mask = BIT(nss) - 1;
+ dev->chainmask |= mphy->chainmask;
+ dev->chainshift = hweight8(dev->mphy.chainmask);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+index 7578ac6d0be62..f3e56817d36e9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+@@ -58,11 +58,6 @@ enum mt7915_eeprom_field {
+ #define MT_EE_RATE_DELTA_SIGN BIT(6)
+ #define MT_EE_RATE_DELTA_EN BIT(7)
+
+-#define MT_EE_NSS_MAX_MA7915 4
+-#define MT_EE_NSS_MAX_DBDC_MA7915 2
+-#define MT_EE_NSS_MAX_MA7986 4
+-#define MT_EE_NSS_MAX_DBDC_MA7986 4
+-
+ enum mt7915_adie_sku {
+ MT7976_ONE_ADIE_DBDC = 0x7,
+ MT7975_ONE_ADIE = 0x8,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index a4bcc617c1a34..e6bf6e04d4b9c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -1151,7 +1151,7 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
+- int offset;
++ int eifs_ofdm = 360, sifs = 10, offset;
+ bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+@@ -1169,17 +1169,26 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
+ reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+
++ if (!is_mt7915(&dev->mt76)) {
++ if (!a_band) {
++ mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx),
++ FIELD_PREP(MT_IFS_EIFS_CCK, 314));
++ eifs_ofdm = 78;
++ } else {
++ eifs_ofdm = 84;
++ }
++ } else if (a_band) {
++ sifs = 16;
++ }
++
+ mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset);
+ mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx),
+- FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
++ FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+- FIELD_PREP(MT_IFS_SIFS, 10) |
++ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+
+- mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx),
+- FIELD_PREP(MT_IFS_EIFS_CCK, 314));
+-
+ if (phy->slottime < 20 || a_band)
+ val = MT7915_CFEND_RATE_DEFAULT;
+ else
+@@ -1600,7 +1609,7 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
+
+ aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ if (is_mt7915(&dev->mt76)) {
+- for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
++ for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4)));
+ mib->ba_miss_cnt +=
+ FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+index 728a879c3b008..3808ce1647d9e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+@@ -65,10 +65,17 @@ static void mt7915_put_hif2(struct mt7915_hif *hif)
+
+ static struct mt7915_hif *mt7915_pci_init_hif2(struct pci_dev *pdev)
+ {
++ struct pci_dev *tmp_pdev;
++
+ hif_idx++;
+- if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL) &&
+- !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL))
+- return NULL;
++
++ tmp_pdev = pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL);
++ if (!tmp_pdev) {
++ tmp_pdev = pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL);
++ if (!tmp_pdev)
++ return NULL;
++ }
++ pci_dev_put(tmp_pdev);
+
+ writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
+ pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+index dcdb3cf04ac1b..4ad66b3443838 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+@@ -37,6 +37,7 @@ mt7921_regd_notifier(struct wiphy *wiphy,
+
+ memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
+ dev->mt76.region = request->dfs_region;
++ dev->country_ie_env = request->country_ie_env;
+
+ mt7921_mutex_acquire(dev);
+ mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index 650ab97ae0524..1c0d8cf19b8eb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -396,6 +396,27 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+ if (v0 & MT_PRXV_HT_AD_CODE)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
++ ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
++ rxv, &mode);
++ if (ret < 0)
++ return ret;
++
++ if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
++ rxd += 6;
++ if ((u8 *)rxd - skb->data >= skb->len)
++ return -EINVAL;
++
++ rxv = rxd;
++ /* Monitor mode would use RCPI described in GROUP 5
++ * instead.
++ */
++ v1 = le32_to_cpu(rxv[0]);
++
++ rxd += 12;
++ if ((u8 *)rxd - skb->data >= skb->len)
++ return -EINVAL;
++ }
++
+ status->chains = mphy->antenna_mask;
+ status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
+ status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
+@@ -410,17 +431,6 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+ status->signal = max(status->signal,
+ status->chain_signal[i]);
+ }
+-
+- ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
+- rxv, &mode);
+- if (ret < 0)
+- return ret;
+-
+- if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
+- rxd += 18;
+- if ((u8 *)rxd - skb->data >= skb->len)
+- return -EINVAL;
+- }
+ }
+
+ amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
+@@ -974,7 +984,7 @@ void mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
+ mib->tx_amsdu_cnt += val;
+ }
+
+- for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
++ for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
+ u32 val2;
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 7e409ac7d9a82..111d9221b94f5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -1504,7 +1504,13 @@ static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
+ int err;
+
+ mt7921_mutex_acquire(dev);
++ err = mt7921_mcu_set_clc(dev, dev->mt76.alpha2,
++ dev->country_ie_env);
++ if (err < 0)
++ goto out;
++
+ err = mt7921_set_tx_sar_pwr(hw, sar);
++out:
+ mt7921_mutex_release(dev);
+
+ return err;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+index eaba114a9c7e4..d36b940c0a07a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+@@ -171,7 +171,7 @@ struct mt7921_clc {
+ u8 type;
+ u8 rsv[8];
+ u8 data[];
+-};
++} __packed;
+
+ struct mt7921_phy {
+ struct mt76_phy *mt76;
+@@ -244,6 +244,8 @@ struct mt7921_dev {
+ struct work_struct ipv6_ns_work;
+ /* IPv6 addresses for WoWLAN */
+ struct sk_buff_head ipv6_ns_list;
++
++ enum environment_cap country_ie_env;
+ };
+
+ enum {
+diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
+index 4c4033bb1bb35..0597df2729a62 100644
+--- a/drivers/net/wireless/mediatek/mt76/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/usb.c
+@@ -766,6 +766,9 @@ static void mt76u_status_worker(struct mt76_worker *w)
+ struct mt76_queue *q;
+ int i;
+
++ if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
++ return;
++
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = dev->phy.q_tx[i];
+ if (!q)
+@@ -785,11 +788,11 @@ static void mt76u_status_worker(struct mt76_worker *w)
+ wake_up(&dev->tx_wait);
+
+ mt76_worker_schedule(&dev->tx_worker);
+-
+- if (dev->drv->tx_status_data &&
+- !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+- queue_work(dev->wq, &dev->usb.stat_work);
+ }
++
++ if (dev->drv->tx_status_data &&
++ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
++ queue_work(dev->wq, &dev->usb.stat_work);
+ }
+
+ static void mt76u_tx_status_data(struct work_struct *work)
+diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
+index 39e54b3787d6a..76d0a778636a4 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
++++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
+@@ -247,6 +247,7 @@ error:
+ for (i = 0; i < RX_URBS_COUNT; i++)
+ free_rx_urb(urbs[i]);
+ }
++ kfree(urbs);
+ return r;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+index 782b089a2e1ba..1ba66b8f70c95 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+@@ -1190,7 +1190,7 @@ struct rtl8723bu_c2h {
+ u8 bw;
+ } __packed ra_report;
+ };
+-};
++} __packed;
+
+ struct rtl8xxxu_fileops;
+
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index ac641a56efb09..e9c1b62c9c3c2 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -1608,18 +1608,18 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ {
+ struct device *dev = &priv->udev->dev;
+ struct ieee80211_hw *hw = priv->hw;
+- u32 val32, bonding;
++ u32 val32, bonding, sys_cfg;
+ u16 val16;
+
+- val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
+- priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >>
++ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
++ priv->chip_cut = (sys_cfg & SYS_CFG_CHIP_VERSION_MASK) >>
+ SYS_CFG_CHIP_VERSION_SHIFT;
+- if (val32 & SYS_CFG_TRP_VAUX_EN) {
++ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
+ dev_info(dev, "Unsupported test chip\n");
+ return -ENOTSUPP;
+ }
+
+- if (val32 & SYS_CFG_BT_FUNC) {
++ if (sys_cfg & SYS_CFG_BT_FUNC) {
+ if (priv->chip_cut >= 3) {
+ sprintf(priv->chip_name, "8723BU");
+ priv->rtl_chip = RTL8723B;
+@@ -1641,7 +1641,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ if (val32 & MULTI_GPS_FUNC_EN)
+ priv->has_gps = 1;
+ priv->is_multi_func = 1;
+- } else if (val32 & SYS_CFG_TYPE_ID) {
++ } else if (sys_cfg & SYS_CFG_TYPE_ID) {
+ bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
+ bonding &= HPON_FSM_BONDING_MASK;
+ if (priv->fops->tx_desc_size ==
+@@ -1692,7 +1692,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ case RTL8188E:
+ case RTL8192E:
+ case RTL8723B:
+- switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
++ switch (sys_cfg & SYS_CFG_VENDOR_EXT_MASK) {
+ case SYS_CFG_VENDOR_ID_TSMC:
+ sprintf(priv->chip_vendor, "TSMC");
+ break;
+@@ -1709,7 +1709,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ }
+ break;
+ default:
+- if (val32 & SYS_CFG_VENDOR_ID) {
++ if (sys_cfg & SYS_CFG_VENDOR_ID) {
+ sprintf(priv->chip_vendor, "UMC");
+ priv->vendor_umc = 1;
+ } else {
+@@ -4654,7 +4654,6 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ if (sta->deflink.ht_cap.cap &
+ (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
+ sgi = 1;
+- rcu_read_unlock();
+
+ highest_rate = fls(ramask) - 1;
+ if (highest_rate < DESC_RATE_MCS0) {
+@@ -4679,6 +4678,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ else
+ rarpt->txrate.bw = RATE_INFO_BW_20;
+ }
++ rcu_read_unlock();
+ bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
+ rarpt->bit_rate = bit_rate;
+ rarpt->desc_rate = highest_rate;
+@@ -5574,7 +5574,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+ rarpt->txrate.flags = 0;
+ rate = c2h->ra_report.rate;
+ sgi = c2h->ra_report.sgi;
+- bw = c2h->ra_report.bw;
+
+ if (rate < DESC_RATE_MCS0) {
+ rarpt->txrate.legacy =
+@@ -5591,8 +5590,13 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+ RATE_INFO_FLAGS_SHORT_GI;
+ }
+
+- if (bw == RATE_INFO_BW_20)
+- rarpt->txrate.bw |= RATE_INFO_BW_20;
++ if (skb->len >= offsetofend(typeof(*c2h), ra_report.bw)) {
++ if (c2h->ra_report.bw == RTL8XXXU_CHANNEL_WIDTH_40)
++ bw = RATE_INFO_BW_40;
++ else
++ bw = RATE_INFO_BW_20;
++ rarpt->txrate.bw = bw;
++ }
+ }
+ bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
+ rarpt->bit_rate = bit_rate;
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index bc2994865372b..ad420d7ec8af9 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -2527,7 +2527,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
+ }
+
+ /* update cam aid mac_id net_type */
+- rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
++ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c cam\n");
+ return ret;
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
+index 0508dfca8edf7..077fddc5fa1ea 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.c
++++ b/drivers/net/wireless/realtek/rtw89/mac.c
+@@ -1429,10 +1429,8 @@ static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg
+ #define INVALID_QT_WCPU U16_MAX
+ #define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \
+ do { \
+- val = ((_min_x) & \
+- B_AX_ ## _module ## _MIN_SIZE_MASK) | \
+- (((_max_x) << 16) & \
+- B_AX_ ## _module ## _MAX_SIZE_MASK); \
++ val = u32_encode_bits(_min_x, B_AX_ ## _module ## _MIN_SIZE_MASK) | \
++ u32_encode_bits(_max_x, B_AX_ ## _module ## _MAX_SIZE_MASK); \
+ rtw89_write32(rtwdev, \
+ R_AX_ ## _module ## _QTA ## _idx ## _CFG, \
+ val); \
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
+index 6a6bdc652e09e..c894a2b614eb1 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.c
++++ b/drivers/net/wireless/realtek/rtw89/phy.c
+@@ -3139,7 +3139,7 @@ void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
+
+ static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
+ {
+- if (*ie_page > RTW89_PHYSTS_BITMAP_NUM ||
++ if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
+ *ie_page == RTW89_RSVD_9)
+ return false;
+ else if (*ie_page > RTW89_RSVD_9)
+diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
+index 0f3a80f66b61c..ead4d4e043280 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_core.c
++++ b/drivers/net/wireless/rsi/rsi_91x_core.c
+@@ -466,7 +466,9 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
+ tid, 0);
+ }
+ }
+- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
++
++ if (IEEE80211_SKB_CB(skb)->control.flags &
++ IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
+ q_num = MGMT_SOFT_Q;
+ skb->priority = q_num;
+ }
+diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
+index c61f83a7333b6..c7460fbba0142 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
++++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
+@@ -162,12 +162,16 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
+ u8 header_size;
+ u8 vap_id = 0;
+ u8 dword_align_bytes;
++ bool tx_eapol;
+ u16 seq_num;
+
+ info = IEEE80211_SKB_CB(skb);
+ vif = info->control.vif;
+ tx_params = (struct skb_info *)info->driver_data;
+
++ tx_eapol = IEEE80211_SKB_CB(skb)->control.flags &
++ IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
++
+ header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc);
+ if (header_size > skb_headroom(skb)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+@@ -231,7 +235,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
+ }
+ }
+
+- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
++ if (tx_eapol) {
+ rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n");
+
+ data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE);
+diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
+index d9f6367b9993d..f0cac19005527 100644
+--- a/drivers/nfc/pn533/pn533.c
++++ b/drivers/nfc/pn533/pn533.c
+@@ -1295,6 +1295,8 @@ static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
++ memset(&nfc_target, 0, sizeof(struct nfc_target));
++
+ rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
+
+ rc = rsp->status & PN533_CMD_RET_MASK;
+@@ -1926,6 +1928,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
+
+ dev_dbg(dev->dev, "Creating new target\n");
+
++ memset(&nfc_target, 0, sizeof(struct nfc_target));
++
+ nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ nfc_target.nfcid1_len = 10;
+ memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 7e3893d06babd..108b5022ceadc 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3049,7 +3049,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+- return 0;
++ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.cns = NVME_ID_CNS_CS_CTRL;
+@@ -3745,13 +3745,17 @@ static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_secret)) {
++ struct nvme_dhchap_key *key, *host_key;
+ int ret;
+
+- ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key);
++ ret = nvme_auth_generate_key(dhchap_secret, &key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = dhchap_secret;
++ host_key = ctrl->host_key;
++ ctrl->host_key = key;
++ nvme_auth_free_key(host_key);
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+@@ -3795,13 +3799,17 @@ static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
++ struct nvme_dhchap_key *key, *ctrl_key;
+ int ret;
+
+- ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key);
++ ret = nvme_auth_generate_key(dhchap_secret, &key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = dhchap_secret;
++ ctrl_key = ctrl->ctrl_key;
++ ctrl->ctrl_key = key;
++ nvme_auth_free_key(ctrl_key);
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+@@ -4867,7 +4875,7 @@ EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
+
+ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+- unsigned int cmd_size)
++ unsigned int nr_maps, unsigned int cmd_size)
+ {
+ int ret;
+
+@@ -4881,8 +4889,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ set->driver_data = ctrl;
+ set->nr_hw_queues = ctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+- if (ops->map_queues)
+- set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
++ set->nr_maps = nr_maps;
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ret;
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 5d57a042dbcad..20b0c29a9a341 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2903,7 +2903,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+ nvme_fc_init_io_queues(ctrl);
+
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+- &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
++ &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
+ if (ret)
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index a29877217ee65..8a0db9e06dc65 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -743,7 +743,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
+ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+- unsigned int cmd_size);
++ unsigned int nr_maps, unsigned int cmd_size);
+ void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
+
+ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 488ad7dabeb8e..115d81def5671 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -35,7 +35,7 @@
+ #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
+ #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
+
+-#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
++#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
+
+ /*
+ * These can be higher, but we need to ensure that any command doesn't
+@@ -144,9 +144,9 @@ struct nvme_dev {
+ mempool_t *iod_mempool;
+
+ /* shadow doorbell buffer support: */
+- u32 *dbbuf_dbs;
++ __le32 *dbbuf_dbs;
+ dma_addr_t dbbuf_dbs_dma_addr;
+- u32 *dbbuf_eis;
++ __le32 *dbbuf_eis;
+ dma_addr_t dbbuf_eis_dma_addr;
+
+ /* host memory buffer support: */
+@@ -210,10 +210,10 @@ struct nvme_queue {
+ #define NVMEQ_SQ_CMB 1
+ #define NVMEQ_DELETE_ERROR 2
+ #define NVMEQ_POLLED 3
+- u32 *dbbuf_sq_db;
+- u32 *dbbuf_cq_db;
+- u32 *dbbuf_sq_ei;
+- u32 *dbbuf_cq_ei;
++ __le32 *dbbuf_sq_db;
++ __le32 *dbbuf_cq_db;
++ __le32 *dbbuf_sq_ei;
++ __le32 *dbbuf_cq_ei;
+ struct completion delete_done;
+ };
+
+@@ -340,11 +340,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
+ }
+
+ /* Update dbbuf and return true if an MMIO is required */
+-static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+- volatile u32 *dbbuf_ei)
++static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
++ volatile __le32 *dbbuf_ei)
+ {
+ if (dbbuf_db) {
+- u16 old_value;
++ u16 old_value, event_idx;
+
+ /*
+ * Ensure that the queue is written before updating
+@@ -352,8 +352,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+ */
+ wmb();
+
+- old_value = *dbbuf_db;
+- *dbbuf_db = value;
++ old_value = le32_to_cpu(*dbbuf_db);
++ *dbbuf_db = cpu_to_le32(value);
+
+ /*
+ * Ensure that the doorbell is updated before reading the event
+@@ -363,7 +363,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+ */
+ mb();
+
+- if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
++ event_idx = le32_to_cpu(*dbbuf_ei);
++ if (!nvme_dbbuf_need_event(event_idx, value, old_value))
+ return false;
+ }
+
+@@ -377,9 +378,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+ */
+ static int nvme_pci_npages_prp(void)
+ {
+- unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
+- NVME_CTRL_PAGE_SIZE);
+- return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
++ unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
++ unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
++ return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
+ }
+
+ /*
+@@ -389,7 +390,7 @@ static int nvme_pci_npages_prp(void)
+ static int nvme_pci_npages_sgl(void)
+ {
+ return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
+- PAGE_SIZE);
++ NVME_CTRL_PAGE_SIZE);
+ }
+
+ static size_t nvme_pci_iod_alloc_size(void)
+@@ -713,7 +714,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+- sge->length = cpu_to_le32(PAGE_SIZE);
++ sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
+ sge->type = NVME_SGL_FMT_SEG_DESC << 4;
+ }
+ }
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 6e079abb22ee9..a55d3e8b607d5 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -798,7 +798,9 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
+ NVME_RDMA_METADATA_SGL_SIZE;
+
+ return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
+- &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
++ &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE,
++ ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
++ cmd_size);
+ }
+
+ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 9b47dcb2a7d97..83735c52d34a0 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1868,6 +1868,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+ ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
+ &nvme_tcp_mq_ops,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
++ ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+ sizeof(struct nvme_tcp_request));
+ if (ret)
+ goto out_free_io_queues;
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index aecb5853f8da4..683b75a992b3d 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -15,6 +15,7 @@
+
+ #include "nvmet.h"
+
++struct kmem_cache *nvmet_bvec_cache;
+ struct workqueue_struct *buffered_io_wq;
+ struct workqueue_struct *zbd_wq;
+ static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
+@@ -1631,26 +1632,28 @@ void nvmet_subsys_put(struct nvmet_subsys *subsys)
+
+ static int __init nvmet_init(void)
+ {
+- int error;
++ int error = -ENOMEM;
+
+ nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
+
++ nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
++ NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
++ SLAB_HWCACHE_ALIGN, NULL);
++ if (!nvmet_bvec_cache)
++ return -ENOMEM;
++
+ zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
+ if (!zbd_wq)
+- return -ENOMEM;
++ goto out_destroy_bvec_cache;
+
+ buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
+ WQ_MEM_RECLAIM, 0);
+- if (!buffered_io_wq) {
+- error = -ENOMEM;
++ if (!buffered_io_wq)
+ goto out_free_zbd_work_queue;
+- }
+
+ nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+- if (!nvmet_wq) {
+- error = -ENOMEM;
++ if (!nvmet_wq)
+ goto out_free_buffered_work_queue;
+- }
+
+ error = nvmet_init_discovery();
+ if (error)
+@@ -1669,6 +1672,8 @@ out_free_buffered_work_queue:
+ destroy_workqueue(buffered_io_wq);
+ out_free_zbd_work_queue:
+ destroy_workqueue(zbd_wq);
++out_destroy_bvec_cache:
++ kmem_cache_destroy(nvmet_bvec_cache);
+ return error;
+ }
+
+@@ -1680,6 +1685,7 @@ static void __exit nvmet_exit(void)
+ destroy_workqueue(nvmet_wq);
+ destroy_workqueue(buffered_io_wq);
+ destroy_workqueue(zbd_wq);
++ kmem_cache_destroy(nvmet_bvec_cache);
+
+ BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
+ BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
+diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
+index 64b47e2a46330..e55ec6fefd7f4 100644
+--- a/drivers/nvme/target/io-cmd-file.c
++++ b/drivers/nvme/target/io-cmd-file.c
+@@ -11,7 +11,6 @@
+ #include <linux/fs.h>
+ #include "nvmet.h"
+
+-#define NVMET_MAX_MPOOL_BVEC 16
+ #define NVMET_MIN_MPOOL_OBJ 16
+
+ void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
+@@ -26,8 +25,6 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
+ flush_workqueue(buffered_io_wq);
+ mempool_destroy(ns->bvec_pool);
+ ns->bvec_pool = NULL;
+- kmem_cache_destroy(ns->bvec_cache);
+- ns->bvec_cache = NULL;
+ fput(ns->file);
+ ns->file = NULL;
+ }
+@@ -59,16 +56,8 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
+ ns->blksize_shift = min_t(u8,
+ file_inode(ns->file)->i_blkbits, 12);
+
+- ns->bvec_cache = kmem_cache_create("nvmet-bvec",
+- NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
+- 0, SLAB_HWCACHE_ALIGN, NULL);
+- if (!ns->bvec_cache) {
+- ret = -ENOMEM;
+- goto err;
+- }
+-
+ ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
+- mempool_free_slab, ns->bvec_cache);
++ mempool_free_slab, nvmet_bvec_cache);
+
+ if (!ns->bvec_pool) {
+ ret = -ENOMEM;
+@@ -77,9 +66,10 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
+
+ return ret;
+ err:
++ fput(ns->file);
++ ns->file = NULL;
+ ns->size = 0;
+ ns->blksize_shift = 0;
+- nvmet_file_ns_disable(ns);
+ return ret;
+ }
+
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index b45fe3adf015f..08c583258e90f 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -494,7 +494,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
+ return ret;
+
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+- &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
++ &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
+ if (ret)
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index dfe3894205aa7..bda1c1f71f394 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -77,7 +77,6 @@ struct nvmet_ns {
+
+ struct completion disable_done;
+ mempool_t *bvec_pool;
+- struct kmem_cache *bvec_cache;
+
+ int use_p2pmem;
+ struct pci_dev *p2p_dev;
+@@ -393,6 +392,8 @@ struct nvmet_req {
+ u64 error_slba;
+ };
+
++#define NVMET_MAX_MPOOL_BVEC 16
++extern struct kmem_cache *nvmet_bvec_cache;
+ extern struct workqueue_struct *buffered_io_wq;
+ extern struct workqueue_struct *zbd_wq;
+ extern struct workqueue_struct *nvmet_wq;
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index 79af5140af8bf..adc0958755d66 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -334,14 +334,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
+ }
+
+ /*
+- * If there are effects for the command we are about to execute, or
+- * an end_req function we need to use nvme_execute_passthru_rq()
+- * synchronously in a work item seeing the end_req function and
+- * nvme_passthru_end() can't be called in the request done callback
+- * which is typically in interrupt context.
++ * If a command needs post-execution fixups, or there are any
++ * non-trivial effects, make sure to execute the command synchronously
++ * in a workqueue so that nvme_passthru_end gets called.
+ */
+ effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
+- if (req->p.use_workqueue || effects) {
++ if (req->p.use_workqueue ||
++ (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) {
+ INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
+ req->p.rq = rq;
+ queue_work(nvmet_wq, &req->p.work);
+diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
+index bd8ff4df723da..ed4e6c144a681 100644
+--- a/drivers/of/overlay.c
++++ b/drivers/of/overlay.c
+@@ -545,7 +545,7 @@ static int find_dup_cset_node_entry(struct overlay_changeset *ovcs,
+
+ fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np);
+ fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np);
+- node_path_match = !strcmp(fn_1, fn_2);
++ node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2);
+ kfree(fn_1);
+ kfree(fn_2);
+ if (node_path_match) {
+@@ -580,7 +580,7 @@ static int find_dup_cset_prop(struct overlay_changeset *ovcs,
+
+ fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np);
+ fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np);
+- node_path_match = !strcmp(fn_1, fn_2);
++ node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2);
+ kfree(fn_1);
+ kfree(fn_2);
+ if (node_path_match &&
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 2616585ca5f8a..1dde5c579edc8 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -952,12 +952,6 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+ }
+ }
+
+- ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+- if (ret < 0) {
+- dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
+- goto err_phy_off;
+- }
+-
+ if (imx6_pcie->phy) {
+ ret = phy_power_on(imx6_pcie->phy);
+ if (ret) {
+@@ -965,6 +959,13 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+ goto err_phy_off;
+ }
+ }
++
++ ret = imx6_pcie_deassert_core_reset(imx6_pcie);
++ if (ret < 0) {
++ dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
++ goto err_phy_off;
++ }
++
+ imx6_setup_phy_mpll(imx6_pcie);
+
+ return 0;
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index c6725c519a479..9e4d96e5a3f5a 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -641,7 +641,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ if (pci->n_fts[1]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_N_FTS_MASK;
+- val |= pci->n_fts[pci->link_gen - 1];
++ val |= pci->n_fts[1];
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ }
+
+diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
+index 4bd1abf26008f..ee7aad09d6277 100644
+--- a/drivers/pci/controller/pcie-mt7621.c
++++ b/drivers/pci/controller/pcie-mt7621.c
+@@ -466,7 +466,8 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host)
+ }
+
+ static const struct soc_device_attribute mt7621_pcie_quirks_match[] = {
+- { .soc_id = "mt7621", .revision = "E2" }
++ { .soc_id = "mt7621", .revision = "E2" },
++ { /* sentinel */ }
+ };
+
+ static int mt7621_pcie_probe(struct platform_device *pdev)
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index e06e9f4fc50f7..769eedeb8802a 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -719,6 +719,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+ resource_size_t offset[2] = {0};
+ resource_size_t membar2_offset = 0x2000;
+ struct pci_bus *child;
++ struct pci_dev *dev;
+ int ret;
+
+ /*
+@@ -859,8 +860,25 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+
+ pci_scan_child_bus(vmd->bus);
+ vmd_domain_reset(vmd);
+- list_for_each_entry(child, &vmd->bus->children, node)
+- pci_reset_bus(child->self);
++
++ /* When Intel VMD is enabled, the OS does not discover the Root Ports
++ * owned by Intel VMD within the MMCFG space. pci_reset_bus() applies
++ * a reset to the parent of the PCI device supplied as argument. This
++ * is why we pass a child device, so the reset can be triggered at
++ * the Intel bridge level and propagated to all the children in the
++ * hierarchy.
++ */
++ list_for_each_entry(child, &vmd->bus->children, node) {
++ if (!list_empty(&child->devices)) {
++ dev = list_first_entry(&child->devices,
++ struct pci_dev, bus_list);
++ if (pci_reset_bus(dev))
++ pci_warn(dev, "can't reset device: %d\n", ret);
++
++ break;
++ }
++ }
++
+ pci_assign_unassigned_bus_resources(vmd->bus);
+
+ /*
+@@ -980,6 +998,11 @@ static int vmd_resume(struct device *dev)
+ struct vmd_dev *vmd = pci_get_drvdata(pdev);
+ int err, i;
+
++ if (vmd->irq_domain)
++ vmd_set_msi_remapping(vmd, true);
++ else
++ vmd_set_msi_remapping(vmd, false);
++
+ for (i = 0; i < vmd->msix_count; i++) {
+ err = devm_request_irq(dev, vmd->irqs[i].virq,
+ vmd_irq, IRQF_NO_THREAD,
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index 36b1801a061b7..55283d2379a6a 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -979,7 +979,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
+ if (ret)
+ epf_test->dma_supported = false;
+
+- if (linkup_notifier) {
++ if (linkup_notifier || core_init_notifier) {
+ epf->nb.notifier_call = pci_epf_test_notifier;
+ pci_epc_register_notifier(epc, &epf->nb);
+ } else {
+diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+index 0ea85e1d292ec..fba0179939b8f 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
++++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+@@ -557,7 +557,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
+ return ret;
+
+ err_alloc_peer_mem:
+- pci_epc_mem_free_addr(ntb->epf->epc, epf_bar->phys_addr, mw_addr, epf_bar->size);
++ pci_epf_free_space(ntb->epf, mw_addr, barno, 0);
+ return -1;
+ }
+
+diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
+index 12ecd0aaa28d6..0050e8f6814ed 100644
+--- a/drivers/pci/irq.c
++++ b/drivers/pci/irq.c
+@@ -44,6 +44,8 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler,
+ va_start(ap, fmt);
+ devname = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
++ if (!devname)
++ return -ENOMEM;
+
+ ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn,
+ irqflags, devname, dev_id);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index b66fa42c4b1fa..1d6f7b502020d 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1891,9 +1891,6 @@ int pci_setup_device(struct pci_dev *dev)
+
+ dev->broken_intx_masking = pci_intx_mask_broken(dev);
+
+- /* Clear errors left from system firmware */
+- pci_write_config_word(dev, PCI_STATUS, 0xffff);
+-
+ switch (dev->hdr_type) { /* header type */
+ case PCI_HEADER_TYPE_NORMAL: /* standard header */
+ if (class == PCI_CLASS_BRIDGE_PCI)
+diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
+index 280a6ae3e27cf..54aa4658fb36e 100644
+--- a/drivers/perf/arm_dmc620_pmu.c
++++ b/drivers/perf/arm_dmc620_pmu.c
+@@ -725,6 +725,8 @@ static struct platform_driver dmc620_pmu_driver = {
+
+ static int __init dmc620_pmu_init(void)
+ {
++ int ret;
++
+ cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ DMC620_DRVNAME,
+ NULL,
+@@ -732,7 +734,11 @@ static int __init dmc620_pmu_init(void)
+ if (cpuhp_state_num < 0)
+ return cpuhp_state_num;
+
+- return platform_driver_register(&dmc620_pmu_driver);
++ ret = platform_driver_register(&dmc620_pmu_driver);
++ if (ret)
++ cpuhp_remove_multi_state(cpuhp_state_num);
++
++ return ret;
+ }
+
+ static void __exit dmc620_pmu_exit(void)
+diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
+index 4a15c86f45efb..fe2abb412c004 100644
+--- a/drivers/perf/arm_dsu_pmu.c
++++ b/drivers/perf/arm_dsu_pmu.c
+@@ -858,7 +858,11 @@ static int __init dsu_pmu_init(void)
+ if (ret < 0)
+ return ret;
+ dsu_pmu_cpuhp_state = ret;
+- return platform_driver_register(&dsu_pmu_driver);
++ ret = platform_driver_register(&dsu_pmu_driver);
++ if (ret)
++ cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
++
++ return ret;
+ }
+
+ static void __exit dsu_pmu_exit(void)
+diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
+index 00d4c45a8017d..25a269d431e45 100644
+--- a/drivers/perf/arm_smmuv3_pmu.c
++++ b/drivers/perf/arm_smmuv3_pmu.c
+@@ -959,6 +959,8 @@ static struct platform_driver smmu_pmu_driver = {
+
+ static int __init arm_smmu_pmu_init(void)
+ {
++ int ret;
++
+ cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/arm/pmcg:online",
+ NULL,
+@@ -966,7 +968,11 @@ static int __init arm_smmu_pmu_init(void)
+ if (cpuhp_state_num < 0)
+ return cpuhp_state_num;
+
+- return platform_driver_register(&smmu_pmu_driver);
++ ret = platform_driver_register(&smmu_pmu_driver);
++ if (ret)
++ cpuhp_remove_multi_state(cpuhp_state_num);
++
++ return ret;
+ }
+ module_init(arm_smmu_pmu_init);
+
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index 21771708597db..071e63d9a9ac6 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -693,10 +693,10 @@ static struct attribute *hisi_pcie_pmu_events_attr[] = {
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011),
+- HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x1005),
+- HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x11005),
+- HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x2004),
+- HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x12004),
++ HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804),
++ HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804),
++ HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405),
++ HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405),
+ NULL
+ };
+
+diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
+index 69c3050a4348b..a1166afb37024 100644
+--- a/drivers/perf/marvell_cn10k_tad_pmu.c
++++ b/drivers/perf/marvell_cn10k_tad_pmu.c
+@@ -408,7 +408,11 @@ static int __init tad_pmu_init(void)
+ if (ret < 0)
+ return ret;
+ tad_pmu_cpuhp_state = ret;
+- return platform_driver_register(&tad_pmu_driver);
++ ret = platform_driver_register(&tad_pmu_driver);
++ if (ret)
++ cpuhp_remove_multi_state(tad_pmu_cpuhp_state);
++
++ return ret;
+ }
+
+ static void __exit tad_pmu_exit(void)
+diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
+index 3a3831f6059a3..5472db9e87ef8 100644
+--- a/drivers/phy/allwinner/phy-sun4i-usb.c
++++ b/drivers/phy/allwinner/phy-sun4i-usb.c
+@@ -120,6 +120,7 @@ struct sun4i_usb_phy_cfg {
+ u8 phyctl_offset;
+ bool dedicated_clocks;
+ bool phy0_dual_route;
++ bool needs_phy2_siddq;
+ int missing_phys;
+ };
+
+@@ -289,6 +290,50 @@ static int sun4i_usb_phy_init(struct phy *_phy)
+ return ret;
+ }
+
++ /* Some PHYs on some SoCs need the help of PHY2 to work. */
++ if (data->cfg->needs_phy2_siddq && phy->index != 2) {
++ struct sun4i_usb_phy *phy2 = &data->phys[2];
++
++ ret = clk_prepare_enable(phy2->clk);
++ if (ret) {
++ reset_control_assert(phy->reset);
++ clk_disable_unprepare(phy->clk2);
++ clk_disable_unprepare(phy->clk);
++ return ret;
++ }
++
++ ret = reset_control_deassert(phy2->reset);
++ if (ret) {
++ clk_disable_unprepare(phy2->clk);
++ reset_control_assert(phy->reset);
++ clk_disable_unprepare(phy->clk2);
++ clk_disable_unprepare(phy->clk);
++ return ret;
++ }
++
++ /*
++ * This extra clock is just needed to access the
++ * REG_HCI_PHY_CTL PMU register for PHY2.
++ */
++ ret = clk_prepare_enable(phy2->clk2);
++ if (ret) {
++ reset_control_assert(phy2->reset);
++ clk_disable_unprepare(phy2->clk);
++ reset_control_assert(phy->reset);
++ clk_disable_unprepare(phy->clk2);
++ clk_disable_unprepare(phy->clk);
++ return ret;
++ }
++
++ if (phy2->pmu && data->cfg->hci_phy_ctl_clear) {
++ val = readl(phy2->pmu + REG_HCI_PHY_CTL);
++ val &= ~data->cfg->hci_phy_ctl_clear;
++ writel(val, phy2->pmu + REG_HCI_PHY_CTL);
++ }
++
++ clk_disable_unprepare(phy->clk2);
++ }
++
+ if (phy->pmu && data->cfg->hci_phy_ctl_clear) {
+ val = readl(phy->pmu + REG_HCI_PHY_CTL);
+ val &= ~data->cfg->hci_phy_ctl_clear;
+@@ -354,6 +399,13 @@ static int sun4i_usb_phy_exit(struct phy *_phy)
+ data->phy0_init = false;
+ }
+
++ if (data->cfg->needs_phy2_siddq && phy->index != 2) {
++ struct sun4i_usb_phy *phy2 = &data->phys[2];
++
++ clk_disable_unprepare(phy2->clk);
++ reset_control_assert(phy2->reset);
++ }
++
+ sun4i_usb_phy_passby(phy, 0);
+ reset_control_assert(phy->reset);
+ clk_disable_unprepare(phy->clk2);
+@@ -785,6 +837,13 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
+ dev_err(dev, "failed to get clock %s\n", name);
+ return PTR_ERR(phy->clk2);
+ }
++ } else {
++ snprintf(name, sizeof(name), "pmu%d_clk", i);
++ phy->clk2 = devm_clk_get_optional(dev, name);
++ if (IS_ERR(phy->clk2)) {
++ dev_err(dev, "failed to get clock %s\n", name);
++ return PTR_ERR(phy->clk2);
++ }
+ }
+
+ snprintf(name, sizeof(name), "usb%d_reset", i);
+@@ -973,6 +1032,17 @@ static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = {
+ .missing_phys = BIT(1) | BIT(2),
+ };
+
++static const struct sun4i_usb_phy_cfg sun50i_h616_cfg = {
++ .num_phys = 4,
++ .type = sun50i_h6_phy,
++ .disc_thresh = 3,
++ .phyctl_offset = REG_PHYCTL_A33,
++ .dedicated_clocks = true,
++ .phy0_dual_route = true,
++ .hci_phy_ctl_clear = PHY_CTL_SIDDQ,
++ .needs_phy2_siddq = true,
++};
++
+ static const struct of_device_id sun4i_usb_phy_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-usb-phy", .data = &sun4i_a10_cfg },
+ { .compatible = "allwinner,sun5i-a13-usb-phy", .data = &sun5i_a13_cfg },
+@@ -988,6 +1058,7 @@ static const struct of_device_id sun4i_usb_phy_of_match[] = {
+ { .compatible = "allwinner,sun50i-a64-usb-phy",
+ .data = &sun50i_a64_cfg},
+ { .compatible = "allwinner,sun50i-h6-usb-phy", .data = &sun50i_h6_cfg },
++ { .compatible = "allwinner,sun50i-h616-usb-phy", .data = &sun50i_h616_cfg },
+ { },
+ };
+ MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match);
+diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+index d2524b70ea161..3b374b37b965b 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
++++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+@@ -331,13 +331,12 @@ static void usb_uninit_common_7216(struct brcm_usb_init_params *params)
+
+ pr_debug("%s\n", __func__);
+
+- if (!params->wake_enabled) {
+- USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+-
++ if (params->wake_enabled) {
+ /* Switch to using slower clock during suspend to save power */
+ USB_CTRL_SET(ctrl, USB_PM, XHC_S2_CLK_SWITCH_EN);
+- } else {
+ usb_wake_enable_7216(params, true);
++ } else {
++ USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+ }
+ }
+
+@@ -425,7 +424,6 @@ void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params)
+
+ params->family_name = "7216";
+ params->ops = &bcm7216_ops;
+- params->suspend_with_clocks = true;
+ }
+
+ void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params)
+@@ -435,5 +433,4 @@ void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params)
+
+ params->family_name = "7211";
+ params->ops = &bcm7211b0_ops;
+- params->suspend_with_clocks = true;
+ }
+diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
+index 1ccb5ddab865c..3236e94988428 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
++++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
+@@ -61,7 +61,6 @@ struct brcm_usb_init_params {
+ const struct brcm_usb_init_ops *ops;
+ struct regmap *syscon_piarbctl;
+ bool wake_enabled;
+- bool suspend_with_clocks;
+ };
+
+ void brcm_usb_dvr_init_4908(struct brcm_usb_init_params *params);
+diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
+index 2cb3779fcdf82..2bfd78e2d8fd6 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb.c
++++ b/drivers/phy/broadcom/phy-brcm-usb.c
+@@ -102,9 +102,9 @@ static int brcm_pm_notifier(struct notifier_block *notifier,
+
+ static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
+ {
+- struct phy *gphy = dev_id;
++ struct device *dev = dev_id;
+
+- pm_wakeup_event(&gphy->dev, 0);
++ pm_wakeup_event(dev, 0);
+
+ return IRQ_HANDLED;
+ }
+@@ -451,7 +451,7 @@ static int brcm_usb_phy_dvr_init(struct platform_device *pdev,
+ if (priv->wake_irq >= 0) {
+ err = devm_request_irq(dev, priv->wake_irq,
+ brcm_usb_phy_wake_isr, 0,
+- dev_name(dev), gphy);
++ dev_name(dev), dev);
+ if (err < 0)
+ return err;
+ device_set_wakeup_capable(dev, 1);
+@@ -598,7 +598,7 @@ static int brcm_usb_phy_suspend(struct device *dev)
+ * and newer XHCI->2.0-clks/3.0-clks.
+ */
+
+- if (!priv->ini.suspend_with_clocks) {
++ if (!priv->ini.wake_enabled) {
+ if (priv->phys[BRCM_USB_PHY_3_0].inited)
+ clk_disable_unprepare(priv->usb_30_clk);
+ if (priv->phys[BRCM_USB_PHY_2_0].inited ||
+@@ -615,8 +615,10 @@ static int brcm_usb_phy_resume(struct device *dev)
+ {
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+
+- clk_prepare_enable(priv->usb_20_clk);
+- clk_prepare_enable(priv->usb_30_clk);
++ if (!priv->ini.wake_enabled) {
++ clk_prepare_enable(priv->usb_20_clk);
++ clk_prepare_enable(priv->usb_30_clk);
++ }
+ brcm_usb_init_ipp(&priv->ini);
+
+ /*
+diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+index 67712c77d806f..d641b345afa35 100644
+--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+@@ -826,6 +826,9 @@ mvebu_a3700_comphy_usb3_power_on(struct mvebu_a3700_comphy_lane *lane)
+ if (ret)
+ return ret;
+
++ /* COMPHY register reset (cleared automatically) */
++ comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST);
++
+ /*
+ * 0. Set PHY OTG Control(0x5d034), bit 4, Power up OTG module The
+ * register belong to UTMI module, so it is set in UTMI phy driver.
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+index 5be5348fbb26b..bb40172e23d49 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+@@ -14,6 +14,7 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/of_address.h>
++#include <linux/phy/pcie.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
+ #include <linux/regulator/consumer.h>
+@@ -505,6 +506,13 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNTRL1, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x1),
++ QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
++ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
++ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
++ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
++};
++
++static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+@@ -517,11 +525,7 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x50),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x1a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x6),
+- QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
+- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+ };
+
+ static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
+@@ -1184,15 +1188,29 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
+ };
+
+ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
++};
++
++static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rc_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+@@ -1200,8 +1218,6 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+@@ -1214,17 +1230,8 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x20),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
+ };
+
+ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_tx_tbl[] = {
+@@ -1285,46 +1292,80 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rx_tbl[] = {
+ };
+
+ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_tbl[] = {
+- QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x16),
+- QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x22),
+- QMP_PHY_INIT_CFG(QPHY_V5_PCS_G3S2_PRE_GAIN, 0x2e),
+- QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x99),
++ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16),
++ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22),
++ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e),
++ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x99),
+ };
+
+ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
+- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
+ };
+
++static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rc_pcs_misc_tbl[] = {
++ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
++ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
++ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_PRESET_P10_POST, 0x00),
++};
++
++static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_ep_serdes_tbl[] = {
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BG_TIMER, 0x02),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYS_CLK_CTRL, 0x07),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x27),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x0a),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x17),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x19),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x00),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x03),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x00),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0xff),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x04),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0xff),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x09),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x19),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x28),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE0, 0xfb),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE0, 0x01),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE1, 0xfb),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE1, 0x01),
++ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x60),
++};
++
++static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl[] = {
++ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x08),
++};
++
++struct qmp_phy_cfg_tables {
++ const struct qmp_phy_init_tbl *serdes;
++ int serdes_num;
++ const struct qmp_phy_init_tbl *tx;
++ int tx_num;
++ const struct qmp_phy_init_tbl *rx;
++ int rx_num;
++ const struct qmp_phy_init_tbl *pcs;
++ int pcs_num;
++ const struct qmp_phy_init_tbl *pcs_misc;
++ int pcs_misc_num;
++};
++
+ /* struct qmp_phy_cfg - per-PHY initialization config */
+ struct qmp_phy_cfg {
+ int lanes;
+
+- /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+- const struct qmp_phy_init_tbl *serdes_tbl;
+- int serdes_tbl_num;
+- const struct qmp_phy_init_tbl *serdes_tbl_sec;
+- int serdes_tbl_num_sec;
+- const struct qmp_phy_init_tbl *tx_tbl;
+- int tx_tbl_num;
+- const struct qmp_phy_init_tbl *tx_tbl_sec;
+- int tx_tbl_num_sec;
+- const struct qmp_phy_init_tbl *rx_tbl;
+- int rx_tbl_num;
+- const struct qmp_phy_init_tbl *rx_tbl_sec;
+- int rx_tbl_num_sec;
+- const struct qmp_phy_init_tbl *pcs_tbl;
+- int pcs_tbl_num;
+- const struct qmp_phy_init_tbl *pcs_tbl_sec;
+- int pcs_tbl_num_sec;
+- const struct qmp_phy_init_tbl *pcs_misc_tbl;
+- int pcs_misc_tbl_num;
+- const struct qmp_phy_init_tbl *pcs_misc_tbl_sec;
+- int pcs_misc_tbl_num_sec;
++ /* Main init sequence for PHY blocks - serdes, tx, rx, pcs */
++ const struct qmp_phy_cfg_tables tables;
++ /*
++ * Additional init sequences for PHY blocks, providing additional
++ * register programming. They are used for providing separate sequences
++ * for the Root Complex and End Point use cases.
++ *
++ * If EP mode is not supported, both tables can be left unset.
++ */
++ const struct qmp_phy_cfg_tables *tables_rc;
++ const struct qmp_phy_cfg_tables *tables_ep;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+@@ -1344,11 +1385,7 @@ struct qmp_phy_cfg {
+ /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
+ unsigned int phy_status;
+
+- /* true, if PHY needs delay after POWER_DOWN */
+- bool has_pwrdn_delay;
+- /* power_down delay in usec */
+- int pwrdn_delay_min;
+- int pwrdn_delay_max;
++ bool skip_start_delay;
+
+ /* QMP PHY pipe clock interface rate */
+ unsigned long pipe_clock_rate;
+@@ -1368,6 +1405,7 @@ struct qmp_phy_cfg {
+ * @pcs_misc: iomapped memory space for lane's pcs_misc
+ * @pipe_clk: pipe clock
+ * @qmp: QMP phy to which this lane belongs
++ * @mode: currently selected PHY mode
+ */
+ struct qmp_phy {
+ struct phy *phy;
+@@ -1381,6 +1419,7 @@ struct qmp_phy {
+ void __iomem *pcs_misc;
+ struct clk *pipe_clk;
+ struct qcom_qmp *qmp;
++ int mode;
+ };
+
+ /**
+@@ -1459,14 +1498,16 @@ static const char * const sdm845_pciephy_reset_l[] = {
+ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+ .lanes = 1,
+
+- .serdes_tbl = ipq8074_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
+- .tx_tbl = ipq8074_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl),
+- .rx_tbl = ipq8074_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
+- .pcs_tbl = ipq8074_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
++ .tables = {
++ .serdes = ipq8074_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
++ .tx = ipq8074_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl),
++ .rx = ipq8074_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
++ .pcs = ipq8074_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
++ },
+ .clk_list = ipq8074_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+@@ -1478,23 +1519,23 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
+ };
+
+ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
+ .lanes = 1,
+
+- .serdes_tbl = ipq8074_pcie_gen3_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl),
+- .tx_tbl = ipq8074_pcie_gen3_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl),
+- .rx_tbl = ipq8074_pcie_gen3_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_rx_tbl),
+- .pcs_tbl = ipq8074_pcie_gen3_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_pcs_tbl),
++ .tables = {
++ .serdes = ipq8074_pcie_gen3_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl),
++ .tx = ipq8074_pcie_gen3_tx_tbl,
++ .tx_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl),
++ .rx = ipq8074_pcie_gen3_rx_tbl,
++ .rx_num = ARRAY_SIZE(ipq8074_pcie_gen3_rx_tbl),
++ .pcs = ipq8074_pcie_gen3_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(ipq8074_pcie_gen3_pcs_tbl),
++ .pcs_misc = ipq8074_pcie_gen3_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(ipq8074_pcie_gen3_pcs_misc_tbl),
++ },
+ .clk_list = ipq8074_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+@@ -1505,10 +1546,7 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
++ .phy_status = PHYSTATUS,
+
+ .pipe_clock_rate = 250000000,
+ };
+@@ -1516,16 +1554,18 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
+ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
+ .lanes = 1,
+
+- .serdes_tbl = ipq6018_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
+- .tx_tbl = ipq6018_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(ipq6018_pcie_tx_tbl),
+- .rx_tbl = ipq6018_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(ipq6018_pcie_rx_tbl),
+- .pcs_tbl = ipq6018_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(ipq6018_pcie_pcs_tbl),
+- .pcs_misc_tbl = ipq6018_pcie_pcs_misc_tbl,
+- .pcs_misc_tbl_num = ARRAY_SIZE(ipq6018_pcie_pcs_misc_tbl),
++ .tables = {
++ .serdes = ipq6018_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
++ .tx = ipq6018_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(ipq6018_pcie_tx_tbl),
++ .rx = ipq6018_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(ipq6018_pcie_rx_tbl),
++ .pcs = ipq6018_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(ipq6018_pcie_pcs_tbl),
++ .pcs_misc = ipq6018_pcie_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(ipq6018_pcie_pcs_misc_tbl),
++ },
+ .clk_list = ipq8074_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+@@ -1536,25 +1576,24 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
++ .phy_status = PHYSTATUS,
+ };
+
+ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
+ .lanes = 1,
+
+- .serdes_tbl = sdm845_qmp_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
+- .tx_tbl = sdm845_qmp_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl),
+- .rx_tbl = sdm845_qmp_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl),
+- .pcs_tbl = sdm845_qmp_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl),
+- .pcs_misc_tbl = sdm845_qmp_pcie_pcs_misc_tbl,
+- .pcs_misc_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl),
++ .tables = {
++ .serdes = sdm845_qmp_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
++ .tx = sdm845_qmp_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl),
++ .rx = sdm845_qmp_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl),
++ .pcs = sdm845_qmp_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl),
++ .pcs_misc = sdm845_qmp_pcie_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl),
++ },
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+@@ -1566,23 +1605,21 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
+ };
+
+ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
+ .lanes = 1,
+
+- .serdes_tbl = sdm845_qhp_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
+- .tx_tbl = sdm845_qhp_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl),
+- .rx_tbl = sdm845_qhp_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl),
+- .pcs_tbl = sdm845_qhp_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl),
++ .tables = {
++ .serdes = sdm845_qhp_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
++ .tx = sdm845_qhp_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl),
++ .rx = sdm845_qhp_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl),
++ .pcs = sdm845_qhp_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl),
++ },
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+@@ -1594,33 +1631,33 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
+ };
+
+ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
+ .lanes = 1,
+
+- .serdes_tbl = sm8250_qmp_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
+- .serdes_tbl_sec = sm8250_qmp_gen3x1_pcie_serdes_tbl,
+- .serdes_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl),
+- .tx_tbl = sm8250_qmp_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
+- .rx_tbl = sm8250_qmp_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
+- .rx_tbl_sec = sm8250_qmp_gen3x1_pcie_rx_tbl,
+- .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl),
+- .pcs_tbl = sm8250_qmp_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
+- .pcs_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_tbl,
+- .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl),
+- .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl,
+- .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
+- .pcs_misc_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_misc_tbl,
+- .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl),
++ .tables = {
++ .serdes = sm8250_qmp_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
++ .tx = sm8250_qmp_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
++ .rx = sm8250_qmp_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
++ .pcs = sm8250_qmp_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
++ .pcs_misc = sm8250_qmp_pcie_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
++ },
++ .tables_rc = &(const struct qmp_phy_cfg_tables) {
++ .serdes = sm8250_qmp_gen3x1_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl),
++ .rx = sm8250_qmp_gen3x1_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl),
++ .pcs = sm8250_qmp_gen3x1_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl),
++ .pcs_misc = sm8250_qmp_gen3x1_pcie_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl),
++ },
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+@@ -1632,33 +1669,33 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
+ };
+
+ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
+ .lanes = 2,
+
+- .serdes_tbl = sm8250_qmp_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
+- .tx_tbl = sm8250_qmp_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
+- .tx_tbl_sec = sm8250_qmp_gen3x2_pcie_tx_tbl,
+- .tx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl),
+- .rx_tbl = sm8250_qmp_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
+- .rx_tbl_sec = sm8250_qmp_gen3x2_pcie_rx_tbl,
+- .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl),
+- .pcs_tbl = sm8250_qmp_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
+- .pcs_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_tbl,
+- .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl),
+- .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl,
+- .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
+- .pcs_misc_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_misc_tbl,
+- .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl),
++ .tables = {
++ .serdes = sm8250_qmp_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
++ .tx = sm8250_qmp_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
++ .rx = sm8250_qmp_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
++ .pcs = sm8250_qmp_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
++ .pcs_misc = sm8250_qmp_pcie_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
++ },
++ .tables_rc = &(const struct qmp_phy_cfg_tables) {
++ .tx = sm8250_qmp_gen3x2_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl),
++ .rx = sm8250_qmp_gen3x2_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl),
++ .pcs = sm8250_qmp_gen3x2_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl),
++ .pcs_misc = sm8250_qmp_gen3x2_pcie_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl),
++ },
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+@@ -1670,23 +1707,21 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
+ };
+
+ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
+ .lanes = 1,
+
+- .serdes_tbl = msm8998_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl),
+- .tx_tbl = msm8998_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(msm8998_pcie_tx_tbl),
+- .rx_tbl = msm8998_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(msm8998_pcie_rx_tbl),
+- .pcs_tbl = msm8998_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(msm8998_pcie_pcs_tbl),
++ .tables = {
++ .serdes = msm8998_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl),
++ .tx = msm8998_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(msm8998_pcie_tx_tbl),
++ .rx = msm8998_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(msm8998_pcie_rx_tbl),
++ .pcs = msm8998_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(msm8998_pcie_pcs_tbl),
++ },
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+@@ -1698,21 +1733,25 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
++
++ .skip_start_delay = true,
+ };
+
+ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
+ .lanes = 1,
+
+- .serdes_tbl = sc8180x_qmp_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl),
+- .tx_tbl = sc8180x_qmp_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_tx_tbl),
+- .rx_tbl = sc8180x_qmp_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_rx_tbl),
+- .pcs_tbl = sc8180x_qmp_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_tbl),
+- .pcs_misc_tbl = sc8180x_qmp_pcie_pcs_misc_tbl,
+- .pcs_misc_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl),
++ .tables = {
++ .serdes = sc8180x_qmp_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl),
++ .tx = sc8180x_qmp_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(sc8180x_qmp_pcie_tx_tbl),
++ .rx = sc8180x_qmp_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(sc8180x_qmp_pcie_rx_tbl),
++ .pcs = sc8180x_qmp_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_tbl),
++ .pcs_misc = sc8180x_qmp_pcie_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl),
++ },
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+@@ -1723,25 +1762,24 @@ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
++ .phy_status = PHYSTATUS,
+ };
+
+ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
+ .lanes = 2,
+
+- .serdes_tbl = sdx55_qmp_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
+- .tx_tbl = sdx55_qmp_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl),
+- .rx_tbl = sdx55_qmp_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl),
+- .pcs_tbl = sdx55_qmp_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl),
+- .pcs_misc_tbl = sdx55_qmp_pcie_pcs_misc_tbl,
+- .pcs_misc_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl),
++ .tables = {
++ .serdes = sdx55_qmp_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
++ .tx = sdx55_qmp_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl),
++ .rx = sdx55_qmp_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl),
++ .pcs = sdx55_qmp_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl),
++ .pcs_misc = sdx55_qmp_pcie_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl),
++ },
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+@@ -1753,25 +1791,23 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS_4_20,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
+ };
+
+ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
+ .lanes = 1,
+
+- .serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
+- .tx_tbl = sm8450_qmp_gen3x1_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl),
+- .rx_tbl = sm8450_qmp_gen3x1_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl),
+- .pcs_tbl = sm8450_qmp_gen3x1_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl),
+- .pcs_misc_tbl = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
+- .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
++ .tables = {
++ .serdes = sm8450_qmp_gen3x1_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
++ .tx = sm8450_qmp_gen3x1_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl),
++ .rx = sm8450_qmp_gen3x1_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl),
++ .pcs = sm8450_qmp_gen3x1_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl),
++ .pcs_misc = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
++ },
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+@@ -1783,25 +1819,38 @@ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
+ };
+
+ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
+ .lanes = 2,
+
+- .serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl,
+- .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
+- .tx_tbl = sm8450_qmp_gen4x2_pcie_tx_tbl,
+- .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl),
+- .rx_tbl = sm8450_qmp_gen4x2_pcie_rx_tbl,
+- .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl),
+- .pcs_tbl = sm8450_qmp_gen4x2_pcie_pcs_tbl,
+- .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl),
+- .pcs_misc_tbl = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl,
+- .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl),
++ .tables = {
++ .serdes = sm8450_qmp_gen4x2_pcie_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
++ .tx = sm8450_qmp_gen4x2_pcie_tx_tbl,
++ .tx_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl),
++ .rx = sm8450_qmp_gen4x2_pcie_rx_tbl,
++ .rx_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl),
++ .pcs = sm8450_qmp_gen4x2_pcie_pcs_tbl,
++ .pcs_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl),
++ .pcs_misc = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl),
++ },
++
++ .tables_rc = &(const struct qmp_phy_cfg_tables) {
++ .serdes = sm8450_qmp_gen4x2_pcie_rc_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rc_serdes_tbl),
++ .pcs_misc = sm8450_qmp_gen4x2_pcie_rc_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rc_pcs_misc_tbl),
++ },
++
++ .tables_ep = &(const struct qmp_phy_cfg_tables) {
++ .serdes = sm8450_qmp_gen4x2_pcie_ep_serdes_tbl,
++ .serdes_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_ep_serdes_tbl),
++ .pcs_misc = sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl,
++ .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl),
++ },
++
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+@@ -1813,10 +1862,6 @@ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = 995, /* us */
+- .pwrdn_delay_max = 1005, /* us */
+ };
+
+ static void qmp_pcie_configure_lane(void __iomem *base,
+@@ -1850,17 +1895,49 @@ static void qmp_pcie_configure(void __iomem *base,
+ qmp_pcie_configure_lane(base, regs, tbl, num, 0xff);
+ }
+
+-static int qmp_pcie_serdes_init(struct qmp_phy *qphy)
++static void qmp_pcie_serdes_init(struct qmp_phy *qphy, const struct qmp_phy_cfg_tables *tables)
+ {
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
+- const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
+- int serdes_tbl_num = cfg->serdes_tbl_num;
+
+- qmp_pcie_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+- qmp_pcie_configure(serdes, cfg->regs, cfg->serdes_tbl_sec, cfg->serdes_tbl_num_sec);
++ if (!tables)
++ return;
+
+- return 0;
++ qmp_pcie_configure(serdes, cfg->regs, tables->serdes, tables->serdes_num);
++}
++
++static void qmp_pcie_lanes_init(struct qmp_phy *qphy, const struct qmp_phy_cfg_tables *tables)
++{
++ const struct qmp_phy_cfg *cfg = qphy->cfg;
++ void __iomem *tx = qphy->tx;
++ void __iomem *rx = qphy->rx;
++
++ if (!tables)
++ return;
++
++ qmp_pcie_configure_lane(tx, cfg->regs, tables->tx, tables->tx_num, 1);
++
++ if (cfg->lanes >= 2)
++ qmp_pcie_configure_lane(qphy->tx2, cfg->regs, tables->tx, tables->tx_num, 2);
++
++ qmp_pcie_configure_lane(rx, cfg->regs, tables->rx, tables->rx_num, 1);
++ if (cfg->lanes >= 2)
++ qmp_pcie_configure_lane(qphy->rx2, cfg->regs, tables->rx, tables->rx_num, 2);
++}
++
++static void qmp_pcie_pcs_init(struct qmp_phy *qphy, const struct qmp_phy_cfg_tables *tables)
++{
++ const struct qmp_phy_cfg *cfg = qphy->cfg;
++ void __iomem *pcs = qphy->pcs;
++ void __iomem *pcs_misc = qphy->pcs_misc;
++
++ if (!tables)
++ return;
++
++ qmp_pcie_configure(pcs, cfg->regs,
++ tables->pcs, tables->pcs_num);
++ qmp_pcie_configure(pcs_misc, cfg->regs,
++ tables->pcs_misc, tables->pcs_misc_num);
+ }
+
+ static int qmp_pcie_init(struct phy *phy)
+@@ -1932,15 +2009,19 @@ static int qmp_pcie_power_on(struct phy *phy)
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+- void __iomem *tx = qphy->tx;
+- void __iomem *rx = qphy->rx;
++ const struct qmp_phy_cfg_tables *mode_tables;
+ void __iomem *pcs = qphy->pcs;
+- void __iomem *pcs_misc = qphy->pcs_misc;
+ void __iomem *status;
+ unsigned int mask, val, ready;
+ int ret;
+
+- qmp_pcie_serdes_init(qphy);
++ if (qphy->mode == PHY_MODE_PCIE_RC)
++ mode_tables = cfg->tables_rc;
++ else
++ mode_tables = cfg->tables_ep;
++
++ qmp_pcie_serdes_init(qphy, &cfg->tables);
++ qmp_pcie_serdes_init(qphy, mode_tables);
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+@@ -1949,40 +2030,11 @@ static int qmp_pcie_power_on(struct phy *phy)
+ }
+
+ /* Tx, Rx, and PCS configurations */
+- qmp_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+- qmp_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec, cfg->tx_tbl_num_sec, 1);
+-
+- if (cfg->lanes >= 2) {
+- qmp_pcie_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl,
+- cfg->tx_tbl_num, 2);
+- qmp_pcie_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl_sec,
+- cfg->tx_tbl_num_sec, 2);
+- }
+-
+- qmp_pcie_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+- qmp_pcie_configure_lane(rx, cfg->regs, cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
+-
+- if (cfg->lanes >= 2) {
+- qmp_pcie_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl,
+- cfg->rx_tbl_num, 2);
+- qmp_pcie_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl_sec,
+- cfg->rx_tbl_num_sec, 2);
+- }
++ qmp_pcie_lanes_init(qphy, &cfg->tables);
++ qmp_pcie_lanes_init(qphy, mode_tables);
+
+- qmp_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+- qmp_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl_sec, cfg->pcs_tbl_num_sec);
+-
+- qmp_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, cfg->pcs_misc_tbl_num);
+- qmp_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec, cfg->pcs_misc_tbl_num_sec);
+-
+- /*
+- * Pull out PHY from POWER DOWN state.
+- * This is active low enable signal to power-down PHY.
+- */
+- qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+-
+- if (cfg->has_pwrdn_delay)
+- usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
++ qmp_pcie_pcs_init(qphy, &cfg->tables);
++ qmp_pcie_pcs_init(qphy, mode_tables);
+
+ /* Pull PHY out of reset state */
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+@@ -1990,6 +2042,9 @@ static int qmp_pcie_power_on(struct phy *phy)
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
++ if (!cfg->skip_start_delay)
++ usleep_range(1000, 1200);
++
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ mask = cfg->phy_status;
+ ready = 0;
+@@ -2060,6 +2115,23 @@ static int qmp_pcie_disable(struct phy *phy)
+ return qmp_pcie_exit(phy);
+ }
+
++static int qmp_pcie_set_mode(struct phy *phy, enum phy_mode mode, int submode)
++{
++ struct qmp_phy *qphy = phy_get_drvdata(phy);
++
++ switch (submode) {
++ case PHY_MODE_PCIE_RC:
++ case PHY_MODE_PCIE_EP:
++ qphy->mode = submode;
++ break;
++ default:
++ dev_err(&phy->dev, "Unsupported submode %d\n", submode);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ static int qmp_pcie_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+ {
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+@@ -2183,6 +2255,7 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
+ static const struct phy_ops qmp_pcie_ops = {
+ .power_on = qmp_pcie_enable,
+ .power_off = qmp_pcie_disable,
++ .set_mode = qmp_pcie_set_mode,
+ .owner = THIS_MODULE,
+ };
+
+@@ -2198,6 +2271,8 @@ static int qmp_pcie_create(struct device *dev, struct device_node *np, int id,
+ if (!qphy)
+ return -ENOMEM;
+
++ qphy->mode = PHY_MODE_PCIE_RC;
++
+ qphy->cfg = cfg;
+ qphy->serdes = serdes;
+ /*
+@@ -2240,7 +2315,9 @@ static int qmp_pcie_create(struct device *dev, struct device_node *np, int id,
+ qphy->pcs_misc = qphy->pcs + 0x400;
+
+ if (IS_ERR(qphy->pcs_misc)) {
+- if (cfg->pcs_misc_tbl || cfg->pcs_misc_tbl_sec)
++ if (cfg->tables.pcs_misc ||
++ (cfg->tables_rc && cfg->tables_rc->pcs_misc) ||
++ (cfg->tables_ep && cfg->tables_ep->pcs_misc))
+ return PTR_ERR(qphy->pcs_misc);
+ }
+
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
+index 1eedf50cf9cbc..3d9713d348fe6 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
+@@ -8,8 +8,10 @@
+
+ /* Only for QMP V5_20 PHY - PCIe PCS registers */
+ #define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x01c
++#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x084
+ #define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090
+ #define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1 0x0a0
++#define QPHY_V5_20_PCS_PCIE_PRESET_P10_POST 0x0e0
+ #define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
+ #define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c
+ #define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3 0x184
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h
+new file mode 100644
+index 0000000000000..9a5a20daf62cd
+--- /dev/null
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (c) 2022, Linaro Ltd.
++ */
++
++#ifndef QCOM_PHY_QMP_PCS_V5_20_H_
++#define QCOM_PHY_QMP_PCS_V5_20_H_
++
++#define QPHY_V5_20_PCS_G3S2_PRE_GAIN 0x170
++#define QPHY_V5_20_PCS_RX_SIGDET_LVL 0x188
++#define QPHY_V5_20_PCS_EQ_CONFIG4 0x1e0
++#define QPHY_V5_20_PCS_EQ_CONFIG5 0x1e4
++
++#endif
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+index b84c0d4b57541..f0ba35bb73c1b 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+@@ -63,8 +63,6 @@
+ #define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+ #define PHY_INIT_COMPLETE_TIMEOUT 10000
+-#define POWER_DOWN_DELAY_US_MIN 10
+-#define POWER_DOWN_DELAY_US_MAX 11
+
+ struct qmp_phy_init_tbl {
+ unsigned int offset;
+@@ -126,6 +124,7 @@ static const unsigned int usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
++ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
+ };
+
+ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+@@ -135,6 +134,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
++ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
+ };
+
+ static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+@@ -1456,16 +1456,8 @@ struct qmp_phy_cfg {
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+- unsigned int start_ctrl;
+- unsigned int pwrdn_ctrl;
+- /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
+- unsigned int phy_status;
+-
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+- /* power_down delay in usec */
+- int pwrdn_delay_min;
+- int pwrdn_delay_max;
+
+ /* true, if PHY has a separate DP_COM control block */
+ bool has_phy_dp_com_ctrl;
+@@ -1616,11 +1608,7 @@ static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+- .regs = usb3phy_regs_layout,
+-
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
++ .regs = qmp_v3_usb3phy_regs_layout,
+ };
+
+ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+@@ -1641,10 +1629,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = usb3phy_regs_layout,
+-
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+ };
+
+ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+@@ -1666,14 +1650,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+-
+ .has_phy_dp_com_ctrl = true,
+ };
+
+@@ -1696,14 +1673,7 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+-
+ .has_phy_dp_com_ctrl = true,
+ };
+
+@@ -1725,14 +1695,7 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+-
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+- .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
++ .pcs_usb_offset = 0x1000,
+ };
+
+ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
+@@ -1754,13 +1717,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+ };
+
+ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
+@@ -1781,10 +1738,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+-
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+ };
+
+ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+@@ -1809,15 +1762,7 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+-
+ .has_phy_dp_com_ctrl = true,
+ };
+
+@@ -1843,13 +1788,7 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x600,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+ };
+
+ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
+@@ -1874,14 +1813,7 @@ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+-
+ .has_phy_dp_com_ctrl = true,
+ };
+
+@@ -1907,13 +1839,7 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x600,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+ };
+
+ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
+@@ -1938,13 +1864,7 @@ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x600,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+ };
+
+ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
+@@ -1969,13 +1889,7 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x1000,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+ };
+
+ static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
+@@ -2000,14 +1914,7 @@ static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+-
+ .has_phy_dp_com_ctrl = true,
+ };
+
+@@ -2033,13 +1940,7 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x1000,
+
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+-
+ .has_pwrdn_delay = true,
+- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+ };
+
+ static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
+@@ -2060,10 +1961,6 @@ static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qcm2290_usb3phy_regs_layout,
+-
+- .start_ctrl = SERDES_START | PCS_START,
+- .pwrdn_ctrl = SW_PWRDN,
+- .phy_status = PHYSTATUS,
+ };
+
+ static void qmp_usb_configure_lane(void __iomem *base,
+@@ -2164,13 +2061,7 @@ static int qmp_usb_init(struct phy *phy)
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+ }
+
+- if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
+- qphy_setbits(pcs,
+- cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+- cfg->pwrdn_ctrl);
+- else
+- qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+- cfg->pwrdn_ctrl);
++ qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN);
+
+ return 0;
+
+@@ -2206,7 +2097,7 @@ static int qmp_usb_power_on(struct phy *phy)
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *status;
+- unsigned int mask, val, ready;
++ unsigned int val;
+ int ret;
+
+ qmp_usb_serdes_init(qphy);
+@@ -2236,19 +2127,16 @@ static int qmp_usb_power_on(struct phy *phy)
+ qmp_usb_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ if (cfg->has_pwrdn_delay)
+- usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
++ usleep_range(10, 20);
+
+ /* Pull PHY out of reset state */
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+- qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
++ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], SERDES_START | PCS_START);
+
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+- mask = cfg->phy_status;
+- ready = 0;
+-
+- ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
++ ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+@@ -2274,16 +2162,12 @@ static int qmp_usb_power_off(struct phy *phy)
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+- qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
++ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL],
++ SERDES_START | PCS_START);
+
+ /* Put PHY into POWER DOWN state: active low */
+- if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
+- qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+- cfg->pwrdn_ctrl);
+- } else {
+- qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+- cfg->pwrdn_ctrl);
+- }
++ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
++ SW_PWRDN);
+
+ return 0;
+ }
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
+index 26274e3c0cf95..29a48f0436d2a 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
++++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
+@@ -38,6 +38,7 @@
+ #include "phy-qcom-qmp-pcs-pcie-v4_20.h"
+
+ #include "phy-qcom-qmp-pcs-v5.h"
++#include "phy-qcom-qmp-pcs-v5_20.h"
+ #include "phy-qcom-qmp-pcs-pcie-v5.h"
+ #include "phy-qcom-qmp-pcs-usb-v5.h"
+ #include "phy-qcom-qmp-pcs-ufs-v5.h"
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+index 50cb736f9f116..b587299697481 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt7986.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+@@ -316,10 +316,10 @@ static const struct mtk_pin_field_calc mt7986_pin_pupd_range[] = {
+ PIN_FIELD_BASE(38, 38, IOCFG_LT_BASE, 0x30, 0x10, 9, 1),
+ PIN_FIELD_BASE(39, 40, IOCFG_RB_BASE, 0x60, 0x10, 18, 1),
+ PIN_FIELD_BASE(41, 41, IOCFG_RB_BASE, 0x60, 0x10, 12, 1),
+- PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x60, 0x10, 22, 1),
+- PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x60, 0x10, 20, 1),
+- PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x60, 0x10, 26, 1),
+- PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x60, 0x10, 24, 1),
++ PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x60, 0x10, 23, 1),
++ PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x60, 0x10, 21, 1),
++ PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x60, 0x10, 27, 1),
++ PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x60, 0x10, 25, 1),
+ PIN_FIELD_BASE(50, 57, IOCFG_RT_BASE, 0x40, 0x10, 2, 1),
+ PIN_FIELD_BASE(58, 58, IOCFG_RT_BASE, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(59, 59, IOCFG_RT_BASE, 0x40, 0x10, 0, 1),
+@@ -354,10 +354,10 @@ static const struct mtk_pin_field_calc mt7986_pin_r0_range[] = {
+ PIN_FIELD_BASE(38, 38, IOCFG_LT_BASE, 0x40, 0x10, 9, 1),
+ PIN_FIELD_BASE(39, 40, IOCFG_RB_BASE, 0x70, 0x10, 18, 1),
+ PIN_FIELD_BASE(41, 41, IOCFG_RB_BASE, 0x70, 0x10, 12, 1),
+- PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x70, 0x10, 22, 1),
+- PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x70, 0x10, 20, 1),
+- PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x70, 0x10, 26, 1),
+- PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x70, 0x10, 24, 1),
++ PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x70, 0x10, 23, 1),
++ PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x70, 0x10, 21, 1),
++ PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x70, 0x10, 27, 1),
++ PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x70, 0x10, 25, 1),
+ PIN_FIELD_BASE(50, 57, IOCFG_RT_BASE, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(58, 58, IOCFG_RT_BASE, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(59, 59, IOCFG_RT_BASE, 0x50, 0x10, 0, 1),
+@@ -392,10 +392,10 @@ static const struct mtk_pin_field_calc mt7986_pin_r1_range[] = {
+ PIN_FIELD_BASE(38, 38, IOCFG_LT_BASE, 0x50, 0x10, 9, 1),
+ PIN_FIELD_BASE(39, 40, IOCFG_RB_BASE, 0x80, 0x10, 18, 1),
+ PIN_FIELD_BASE(41, 41, IOCFG_RB_BASE, 0x80, 0x10, 12, 1),
+- PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x80, 0x10, 22, 1),
+- PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x80, 0x10, 20, 1),
+- PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x80, 0x10, 26, 1),
+- PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x80, 0x10, 24, 1),
++ PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x80, 0x10, 23, 1),
++ PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x80, 0x10, 21, 1),
++ PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x80, 0x10, 27, 1),
++ PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x80, 0x10, 25, 1),
+ PIN_FIELD_BASE(50, 57, IOCFG_RT_BASE, 0x60, 0x10, 2, 1),
+ PIN_FIELD_BASE(58, 58, IOCFG_RT_BASE, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(59, 59, IOCFG_RT_BASE, 0x60, 0x10, 0, 1),
+diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
+index 415d1df8f46a5..365c4b0ca4654 100644
+--- a/drivers/pinctrl/pinconf-generic.c
++++ b/drivers/pinctrl/pinconf-generic.c
+@@ -395,8 +395,10 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
+ for_each_available_child_of_node(np_config, np) {
+ ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
+ &reserved_maps, num_maps, type);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(np);
+ goto exit;
++ }
+ }
+ return 0;
+
+diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
+index ecab6bf63dc6d..ad4db99094a79 100644
+--- a/drivers/pinctrl/pinctrl-k210.c
++++ b/drivers/pinctrl/pinctrl-k210.c
+@@ -862,8 +862,10 @@ static int k210_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ for_each_available_child_of_node(np_config, np) {
+ ret = k210_pinctrl_dt_subnode_to_map(pctldev, np, map,
+ &reserved_maps, num_maps);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(np);
+ goto err;
++ }
+ }
+ return 0;
+
+diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
+index 687aaa6015555..3d5995cbcb782 100644
+--- a/drivers/pinctrl/pinctrl-ocelot.c
++++ b/drivers/pinctrl/pinctrl-ocelot.c
+@@ -2047,6 +2047,11 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
+ return devm_regmap_init_mmio(&pdev->dev, base, &regmap_config);
+ }
+
++static void ocelot_destroy_workqueue(void *data)
++{
++ destroy_workqueue(data);
++}
++
+ static int ocelot_pinctrl_probe(struct platform_device *pdev)
+ {
+ const struct ocelot_match_data *data;
+@@ -2078,6 +2083,11 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
+ if (!info->wq)
+ return -ENOMEM;
+
++ ret = devm_add_action_or_reset(dev, ocelot_destroy_workqueue,
++ info->wq);
++ if (ret)
++ return ret;
++
+ info->pincfg_data = &data->pincfg_data;
+
+ reset = devm_reset_control_get_optional_shared(dev, "switch");
+@@ -2119,15 +2129,6 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int ocelot_pinctrl_remove(struct platform_device *pdev)
+-{
+- struct ocelot_pinctrl *info = platform_get_drvdata(pdev);
+-
+- destroy_workqueue(info->wq);
+-
+- return 0;
+-}
+-
+ static struct platform_driver ocelot_pinctrl_driver = {
+ .driver = {
+ .name = "pinctrl-ocelot",
+@@ -2135,7 +2136,6 @@ static struct platform_driver ocelot_pinctrl_driver = {
+ .suppress_bind_attrs = true,
+ },
+ .probe = ocelot_pinctrl_probe,
+- .remove = ocelot_pinctrl_remove,
+ };
+ module_platform_driver(ocelot_pinctrl_driver);
+
+diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c
+index 9328b17485cf0..590bbbf619afc 100644
+--- a/drivers/pinctrl/pinctrl-thunderbay.c
++++ b/drivers/pinctrl/pinctrl-thunderbay.c
+@@ -808,7 +808,7 @@ static int thunderbay_add_functions(struct thunderbay_pinctrl *tpc, struct funct
+ funcs[i].num_group_names,
+ funcs[i].data);
+ }
+- kfree(funcs);
++
+ return 0;
+ }
+
+@@ -817,6 +817,7 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc)
+ struct function_desc *thunderbay_funcs;
+ void *ptr;
+ int pin;
++ int ret;
+
+ /*
+ * Allocate maximum possible number of functions. Assume every pin
+@@ -860,7 +861,10 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc)
+ return -ENOMEM;
+
+ thunderbay_funcs = ptr;
+- return thunderbay_add_functions(tpc, thunderbay_funcs);
++ ret = thunderbay_add_functions(tpc, thunderbay_funcs);
++
++ kfree(thunderbay_funcs);
++ return ret;
+ }
+
+ static int thunderbay_pinconf_set_tristate(struct thunderbay_pinctrl *tpc,
+diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
+index 2a7ff14dc37e9..59de4ce01faba 100644
+--- a/drivers/platform/chrome/cros_ec_typec.c
++++ b/drivers/platform/chrome/cros_ec_typec.c
+@@ -173,10 +173,13 @@ static int cros_typec_get_switch_handles(struct cros_typec_port *port,
+
+ role_sw_err:
+ typec_switch_put(port->ori_sw);
++ port->ori_sw = NULL;
+ ori_sw_err:
+ typec_retimer_put(port->retimer);
++ port->retimer = NULL;
+ retimer_sw_err:
+ typec_mux_put(port->mux);
++ port->mux = NULL;
+ mux_err:
+ return -ENODEV;
+ }
+diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
+index 4b5a81c9dc6da..10670b6588e3e 100644
+--- a/drivers/platform/chrome/cros_usbpd_notify.c
++++ b/drivers/platform/chrome/cros_usbpd_notify.c
+@@ -239,7 +239,11 @@ static int __init cros_usbpd_notify_init(void)
+ return ret;
+
+ #ifdef CONFIG_ACPI
+- platform_driver_register(&cros_usbpd_notify_acpi_driver);
++ ret = platform_driver_register(&cros_usbpd_notify_acpi_driver);
++ if (ret) {
++ platform_driver_unregister(&cros_usbpd_notify_plat_driver);
++ return ret;
++ }
+ #endif
+ return 0;
+ }
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index 65b4a819f1bdf..c2c9b0d3244cb 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -358,7 +358,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
+ { 0x32, "DDN_DIAG_W_INGRESS" },
+ { 0x33, "DDN_DIAG_C_INGRESS" },
+ { 0x34, "DDN_DIAG_CORE_SENT" },
+- { 0x35, "NDN_DIAG_S_OUT_OF_CRED" },
++ { 0x35, "NDN_DIAG_N_OUT_OF_CRED" },
+ { 0x36, "NDN_DIAG_S_OUT_OF_CRED" },
+ { 0x37, "NDN_DIAG_E_OUT_OF_CRED" },
+ { 0x38, "NDN_DIAG_W_OUT_OF_CRED" },
+diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
+index 5873c2663a65b..b85050e4a0d65 100644
+--- a/drivers/platform/x86/huawei-wmi.c
++++ b/drivers/platform/x86/huawei-wmi.c
+@@ -760,6 +760,9 @@ static int huawei_wmi_input_setup(struct device *dev,
+ const char *guid,
+ struct input_dev **idev)
+ {
++ acpi_status status;
++ int err;
++
+ *idev = devm_input_allocate_device(dev);
+ if (!*idev)
+ return -ENOMEM;
+@@ -769,10 +772,19 @@ static int huawei_wmi_input_setup(struct device *dev,
+ (*idev)->id.bustype = BUS_HOST;
+ (*idev)->dev.parent = dev;
+
+- return sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL) ||
+- input_register_device(*idev) ||
+- wmi_install_notify_handler(guid, huawei_wmi_input_notify,
+- *idev);
++ err = sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL);
++ if (err)
++ return err;
++
++ err = input_register_device(*idev);
++ if (err)
++ return err;
++
++ status = wmi_install_notify_handler(guid, huawei_wmi_input_notify, *idev);
++ if (ACPI_FAILURE(status))
++ return -EIO;
++
++ return 0;
+ }
+
+ static void huawei_wmi_input_exit(struct device *dev, const char *guid)
+diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
+index 1cf958983e868..b2342b3d78c72 100644
+--- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c
++++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
+@@ -185,7 +185,8 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ cfg.init_data = &init_data;
+ cfg.ena_gpiod = int3472->regulator.gpio;
+
+- int3472->regulator.rdev = regulator_register(&int3472->regulator.rdesc,
++ int3472->regulator.rdev = regulator_register(int3472->dev,
++ &int3472->regulator.rdesc,
+ &cfg);
+ if (IS_ERR(int3472->regulator.rdev)) {
+ ret = PTR_ERR(int3472->regulator.rdev);
+diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
+index 7cc9089d1e14f..e7a3e34028178 100644
+--- a/drivers/platform/x86/intel_scu_ipc.c
++++ b/drivers/platform/x86/intel_scu_ipc.c
+@@ -583,7 +583,6 @@ __intel_scu_ipc_register(struct device *parent,
+ scu->dev.parent = parent;
+ scu->dev.class = &intel_scu_ipc_class;
+ scu->dev.release = intel_scu_ipc_release;
+- dev_set_name(&scu->dev, "intel_scu_ipc");
+
+ if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
+ "intel_scu_ipc")) {
+@@ -612,6 +611,7 @@ __intel_scu_ipc_register(struct device *parent,
+ * After this point intel_scu_ipc_release() takes care of
+ * releasing the SCU IPC resources once refcount drops to zero.
+ */
++ dev_set_name(&scu->dev, "intel_scu_ipc");
+ err = device_register(&scu->dev);
+ if (err) {
+ put_device(&scu->dev);
+diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c
+index 9a19fbd2f7341..9a457956025a5 100644
+--- a/drivers/platform/x86/mxm-wmi.c
++++ b/drivers/platform/x86/mxm-wmi.c
+@@ -35,13 +35,11 @@ int mxm_wmi_call_mxds(int adapter)
+ .xarg = 1,
+ };
+ struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
+- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ printk("calling mux switch %d\n", adapter);
+
+- status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input,
+- &output);
++ status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
+
+ if (ACPI_FAILURE(status))
+ return status;
+@@ -60,13 +58,11 @@ int mxm_wmi_call_mxmx(int adapter)
+ .xarg = 1,
+ };
+ struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
+- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ printk("calling mux switch %d\n", adapter);
+
+- status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input,
+- &output);
++ status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
+
+ if (ACPI_FAILURE(status))
+ return status;
+diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
+index 4df5aa6a309c3..6a60c5d83383b 100644
+--- a/drivers/pnp/core.c
++++ b/drivers/pnp/core.c
+@@ -148,14 +148,14 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id,
+ dev->dev.coherent_dma_mask = dev->dma_mask;
+ dev->dev.release = &pnp_release_device;
+
+- dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
+-
+ dev_id = pnp_add_id(dev, pnpid);
+ if (!dev_id) {
+ kfree(dev);
+ return NULL;
+ }
+
++ dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
++
+ return dev;
+ }
+
+diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
+index c19c50442761d..58757a5799f8b 100644
+--- a/drivers/power/supply/ab8500_charger.c
++++ b/drivers/power/supply/ab8500_charger.c
+@@ -3719,7 +3719,14 @@ static int __init ab8500_charger_init(void)
+ if (ret)
+ return ret;
+
+- return platform_driver_register(&ab8500_charger_driver);
++ ret = platform_driver_register(&ab8500_charger_driver);
++ if (ret) {
++ platform_unregister_drivers(ab8500_charger_component_drivers,
++ ARRAY_SIZE(ab8500_charger_component_drivers));
++ return ret;
++ }
++
++ return 0;
+ }
+
+ static void __exit ab8500_charger_exit(void)
+diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
+index 6020b58c641d2..0e15302b8df22 100644
+--- a/drivers/power/supply/bq25890_charger.c
++++ b/drivers/power/supply/bq25890_charger.c
+@@ -1049,6 +1049,36 @@ static const struct regulator_desc bq25890_vbus_desc = {
+ .fixed_uV = 5000000,
+ .n_voltages = 1,
+ };
++
++static int bq25890_register_regulator(struct bq25890_device *bq)
++{
++ struct bq25890_platform_data *pdata = dev_get_platdata(bq->dev);
++ struct regulator_config cfg = {
++ .dev = bq->dev,
++ .driver_data = bq,
++ };
++ struct regulator_dev *reg;
++
++ if (!IS_ERR_OR_NULL(bq->usb_phy))
++ return 0;
++
++ if (pdata)
++ cfg.init_data = pdata->regulator_init_data;
++
++ reg = devm_regulator_register(bq->dev, &bq25890_vbus_desc, &cfg);
++ if (IS_ERR(reg)) {
++ return dev_err_probe(bq->dev, PTR_ERR(reg),
++ "registering vbus regulator");
++ }
++
++ return 0;
++}
++#else
++static inline int
++bq25890_register_regulator(struct bq25890_device *bq)
++{
++ return 0;
++}
+ #endif
+
+ static int bq25890_get_chip_version(struct bq25890_device *bq)
+@@ -1189,8 +1219,14 @@ static int bq25890_fw_probe(struct bq25890_device *bq)
+ return 0;
+ }
+
+-static int bq25890_probe(struct i2c_client *client,
+- const struct i2c_device_id *id)
++static void bq25890_non_devm_cleanup(void *data)
++{
++ struct bq25890_device *bq = data;
++
++ cancel_delayed_work_sync(&bq->pump_express_work);
++}
++
++static int bq25890_probe(struct i2c_client *client)
+ {
+ struct device *dev = &client->dev;
+ struct bq25890_device *bq;
+@@ -1244,27 +1280,24 @@ static int bq25890_probe(struct i2c_client *client,
+
+ /* OTG reporting */
+ bq->usb_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
++
++ /*
++ * This must be before bq25890_power_supply_init(), so that it runs
++ * after devm unregisters the power_supply.
++ */
++ ret = devm_add_action_or_reset(dev, bq25890_non_devm_cleanup, bq);
++ if (ret)
++ return ret;
++
++ ret = bq25890_register_regulator(bq);
++ if (ret)
++ return ret;
++
+ if (!IS_ERR_OR_NULL(bq->usb_phy)) {
+ INIT_WORK(&bq->usb_work, bq25890_usb_work);
+ bq->usb_nb.notifier_call = bq25890_usb_notifier;
+ usb_register_notifier(bq->usb_phy, &bq->usb_nb);
+ }
+-#ifdef CONFIG_REGULATOR
+- else {
+- struct bq25890_platform_data *pdata = dev_get_platdata(dev);
+- struct regulator_config cfg = { };
+- struct regulator_dev *reg;
+-
+- cfg.dev = dev;
+- cfg.driver_data = bq;
+- if (pdata)
+- cfg.init_data = pdata->regulator_init_data;
+-
+- reg = devm_regulator_register(dev, &bq25890_vbus_desc, &cfg);
+- if (IS_ERR(reg))
+- return dev_err_probe(dev, PTR_ERR(reg), "registering regulator");
+- }
+-#endif
+
+ ret = bq25890_power_supply_init(bq);
+ if (ret < 0) {
+@@ -1400,7 +1433,7 @@ static struct i2c_driver bq25890_driver = {
+ .acpi_match_table = ACPI_PTR(bq25890_acpi_match),
+ .pm = &bq25890_pm,
+ },
+- .probe = bq25890_probe,
++ .probe_new = bq25890_probe,
+ .remove = bq25890_remove,
+ .shutdown = bq25890_shutdown,
+ .id_table = bq25890_i2c_ids,
+diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
+index 6d52641151d9a..473522b4326ad 100644
+--- a/drivers/power/supply/cw2015_battery.c
++++ b/drivers/power/supply/cw2015_battery.c
+@@ -699,6 +699,9 @@ static int cw_bat_probe(struct i2c_client *client)
+ }
+
+ cw_bat->battery_workqueue = create_singlethread_workqueue("rk_battery");
++ if (!cw_bat->battery_workqueue)
++ return -ENOMEM;
++
+ devm_delayed_work_autocancel(&client->dev,
+ &cw_bat->battery_delay_work, cw_bat_work);
+ queue_delayed_work(cw_bat->battery_workqueue,
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index 4b5fb172fa994..01d1ac79d982e 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -750,6 +750,11 @@ int power_supply_get_battery_info(struct power_supply *psy,
+ int i, tab_len, size;
+
+ propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index);
++ if (!propname) {
++ power_supply_put_battery_info(psy, info);
++ err = -ENOMEM;
++ goto out_put_node;
++ }
+ list = of_get_property(battery_np, propname, &size);
+ if (!list || !size) {
+ dev_err(&psy->dev, "failed to get %s\n", propname);
+@@ -1387,8 +1392,8 @@ create_triggers_failed:
+ register_cooler_failed:
+ psy_unregister_thermal(psy);
+ register_thermal_failed:
+- device_del(dev);
+ wakeup_init_failed:
++ device_del(dev);
+ device_add_failed:
+ check_supplies_failed:
+ dev_set_name_failed:
+diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
+index f20a6ac584ccd..4f9c1c4179165 100644
+--- a/drivers/power/supply/rk817_charger.c
++++ b/drivers/power/supply/rk817_charger.c
+@@ -1060,8 +1060,10 @@ static int rk817_charger_probe(struct platform_device *pdev)
+ return -ENODEV;
+
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+- if (!charger)
++ if (!charger) {
++ of_node_put(node);
+ return -ENOMEM;
++ }
+
+ charger->rk808 = rk808;
+
+diff --git a/drivers/power/supply/z2_battery.c b/drivers/power/supply/z2_battery.c
+index 1897c29848600..d033c1d3ee42a 100644
+--- a/drivers/power/supply/z2_battery.c
++++ b/drivers/power/supply/z2_battery.c
+@@ -206,10 +206,12 @@ static int z2_batt_probe(struct i2c_client *client,
+
+ charger->charge_gpiod = devm_gpiod_get_optional(&client->dev,
+ NULL, GPIOD_IN);
+- if (IS_ERR(charger->charge_gpiod))
+- return dev_err_probe(&client->dev,
++ if (IS_ERR(charger->charge_gpiod)) {
++ ret = dev_err_probe(&client->dev,
+ PTR_ERR(charger->charge_gpiod),
+ "failed to get charge GPIO\n");
++ goto err;
++ }
+
+ if (charger->charge_gpiod) {
+ gpiod_set_consumer_name(charger->charge_gpiod, "BATT CHRG");
+diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
+index 6901a44dc428d..a337b47dc2f7d 100644
+--- a/drivers/pwm/pwm-mediatek.c
++++ b/drivers/pwm/pwm-mediatek.c
+@@ -296,7 +296,7 @@ static const struct pwm_mediatek_of_data mt6795_pwm_data = {
+ static const struct pwm_mediatek_of_data mt7622_pwm_data = {
+ .num_pwms = 6,
+ .pwm45_fixup = false,
+- .has_ck_26m_sel = false,
++ .has_ck_26m_sel = true,
+ };
+
+ static const struct pwm_mediatek_of_data mt7623_pwm_data = {
+diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
+index c605013e4114c..3fbb4bae93a4e 100644
+--- a/drivers/pwm/pwm-mtk-disp.c
++++ b/drivers/pwm/pwm-mtk-disp.c
+@@ -178,7 +178,7 @@ static void mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ {
+ struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
+ u64 rate, period, high_width;
+- u32 clk_div, con0, con1;
++ u32 clk_div, pwm_en, con0, con1;
+ int err;
+
+ err = clk_prepare_enable(mdp->clk_main);
+@@ -197,7 +197,8 @@ static void mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ rate = clk_get_rate(mdp->clk_main);
+ con0 = readl(mdp->base + mdp->data->con0);
+ con1 = readl(mdp->base + mdp->data->con1);
+- state->enabled = !!(con0 & BIT(0));
++ pwm_en = readl(mdp->base + DISP_PWM_EN);
++ state->enabled = !!(pwm_en & mdp->data->enable_mask);
+ clk_div = FIELD_GET(PWM_CLKDIV_MASK, con0);
+ period = FIELD_GET(PWM_PERIOD_MASK, con1);
+ /*
+diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
+index 2d4fa5e5fdd46..bb72393134016 100644
+--- a/drivers/pwm/pwm-sifive.c
++++ b/drivers/pwm/pwm-sifive.c
+@@ -204,8 +204,11 @@ static int pwm_sifive_clock_notifier(struct notifier_block *nb,
+ struct pwm_sifive_ddata *ddata =
+ container_of(nb, struct pwm_sifive_ddata, notifier);
+
+- if (event == POST_RATE_CHANGE)
++ if (event == POST_RATE_CHANGE) {
++ mutex_lock(&ddata->lock);
+ pwm_sifive_update_clock(ddata, ndata->new_rate);
++ mutex_unlock(&ddata->lock);
++ }
+
+ return NOTIFY_OK;
+ }
+diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
+index dad9978c91861..249dc01932979 100644
+--- a/drivers/pwm/pwm-tegra.c
++++ b/drivers/pwm/pwm-tegra.c
+@@ -145,8 +145,19 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ * source clock rate as required_clk_rate, PWM controller will
+ * be able to configure the requested period.
+ */
+- required_clk_rate =
+- (NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH;
++ required_clk_rate = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC << PWM_DUTY_WIDTH,
++ period_ns);
++
++ if (required_clk_rate > clk_round_rate(pc->clk, required_clk_rate))
++ /*
++ * required_clk_rate is a lower bound for the input
++ * rate; for lower rates there is no value for PWM_SCALE
++ * that yields a period less than or equal to the
++ * requested period. Hence, for lower rates, double the
++ * required_clk_rate to get a clock rate that can meet
++ * the requested period.
++ */
++ required_clk_rate *= 2;
+
+ err = dev_pm_opp_set_rate(pc->dev, required_clk_rate);
+ if (err < 0)
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
+index 2cdc054e53a53..43db495f19861 100644
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -1804,8 +1804,11 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+ rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
+ 0, 0xffff);
+ err = rio_add_device(rdev);
+- if (err)
+- goto cleanup;
++ if (err) {
++ put_device(&rdev->dev);
++ return err;
++ }
++
+ rio_dev_get(rdev);
+
+ return 0;
+@@ -1901,10 +1904,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
+
+ priv->md = chdev;
+
+- mutex_lock(&chdev->file_mutex);
+- list_add_tail(&priv->list, &chdev->file_list);
+- mutex_unlock(&chdev->file_mutex);
+-
+ INIT_LIST_HEAD(&priv->db_filters);
+ INIT_LIST_HEAD(&priv->pw_filters);
+ spin_lock_init(&priv->fifo_lock);
+@@ -1913,6 +1912,7 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
+ sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
+ GFP_KERNEL);
+ if (ret < 0) {
++ put_device(&chdev->dev);
+ dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
+ ret = -ENOMEM;
+ goto err_fifo;
+@@ -1923,6 +1923,9 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
+ spin_lock_init(&priv->req_lock);
+ mutex_init(&priv->dma_lock);
+ #endif
++ mutex_lock(&chdev->file_mutex);
++ list_add_tail(&priv->list, &chdev->file_list);
++ mutex_unlock(&chdev->file_mutex);
+
+ filp->private_data = priv;
+ goto out;
+diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
+index 19b0c33f4a62a..fdcf742b2adbc 100644
+--- a/drivers/rapidio/rio-scan.c
++++ b/drivers/rapidio/rio-scan.c
+@@ -454,8 +454,12 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
+ 0, 0xffff);
+
+ ret = rio_add_device(rdev);
+- if (ret)
+- goto cleanup;
++ if (ret) {
++ if (rswitch)
++ kfree(rswitch->route_table);
++ put_device(&rdev->dev);
++ return NULL;
++ }
+
+ rio_dev_get(rdev);
+
+diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
+index e74cf09eeff07..9544b8ee0c963 100644
+--- a/drivers/rapidio/rio.c
++++ b/drivers/rapidio/rio.c
+@@ -2186,11 +2186,16 @@ int rio_register_mport(struct rio_mport *port)
+ atomic_set(&port->state, RIO_DEVICE_RUNNING);
+
+ res = device_register(&port->dev);
+- if (res)
++ if (res) {
+ dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n",
+ port->id, res);
+- else
++ mutex_lock(&rio_mport_list_lock);
++ list_del(&port->node);
++ mutex_unlock(&rio_mport_list_lock);
++ put_device(&port->dev);
++ } else {
+ dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id);
++ }
+
+ return res;
+ }
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index e8c00a884f1f1..3716ba060368c 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1002,7 +1002,7 @@ static int drms_uA_update(struct regulator_dev *rdev)
+ /* get input voltage */
+ input_uV = 0;
+ if (rdev->supply)
+- input_uV = regulator_get_voltage(rdev->supply);
++ input_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
+ if (input_uV <= 0)
+ input_uV = rdev->constraints->input_uV;
+
+@@ -1596,7 +1596,13 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ if (rdev->supply_name && !rdev->supply)
+ return -EPROBE_DEFER;
+
+- if (rdev->supply) {
++ /* If supplying regulator has already been enabled,
++ * it's not intended to have use_count increment
++ * when rdev is only boot-on.
++ */
++ if (rdev->supply &&
++ (rdev->constraints->always_on ||
++ !regulator_is_enabled(rdev->supply))) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0) {
+ _regulator_put(rdev->supply);
+@@ -1640,6 +1646,7 @@ static int set_supply(struct regulator_dev *rdev,
+
+ rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
+ if (rdev->supply == NULL) {
++ module_put(supply_rdev->owner);
+ err = -ENOMEM;
+ return err;
+ }
+@@ -1813,7 +1820,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+
+ regulator = kzalloc(sizeof(*regulator), GFP_KERNEL);
+ if (regulator == NULL) {
+- kfree(supply_name);
++ kfree_const(supply_name);
+ return NULL;
+ }
+
+@@ -1943,6 +1950,7 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
+ node = of_get_regulator(dev, supply);
+ if (node) {
+ r = of_find_regulator_by_node(node);
++ of_node_put(node);
+ if (r)
+ return r;
+
+@@ -5396,6 +5404,7 @@ static struct regulator_coupler generic_regulator_coupler = {
+
+ /**
+ * regulator_register - register regulator
++ * @dev: the device that drive the regulator
+ * @regulator_desc: regulator to register
+ * @cfg: runtime configuration for regulator
+ *
+@@ -5404,7 +5413,8 @@ static struct regulator_coupler generic_regulator_coupler = {
+ * or an ERR_PTR() on error.
+ */
+ struct regulator_dev *
+-regulator_register(const struct regulator_desc *regulator_desc,
++regulator_register(struct device *dev,
++ const struct regulator_desc *regulator_desc,
+ const struct regulator_config *cfg)
+ {
+ const struct regulator_init_data *init_data;
+@@ -5413,7 +5423,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ struct regulator_dev *rdev;
+ bool dangling_cfg_gpiod = false;
+ bool dangling_of_gpiod = false;
+- struct device *dev;
+ int ret, i;
+ bool resolved_early = false;
+
+@@ -5426,8 +5435,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ goto rinse;
+ }
+
+- dev = cfg->dev;
+- WARN_ON(!dev);
++ WARN_ON(!dev || !cfg->dev);
+
+ if (regulator_desc->name == NULL || regulator_desc->ops == NULL) {
+ ret = -EINVAL;
+@@ -5526,7 +5534,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
+
+ /* register with sysfs */
+ rdev->dev.class = &regulator_class;
+- rdev->dev.parent = dev;
++ rdev->dev.parent = config->dev;
+ dev_set_name(&rdev->dev, "regulator.%lu",
+ (unsigned long) atomic_inc_return(&regulator_no));
+ dev_set_drvdata(&rdev->dev, rdev);
+@@ -5641,6 +5649,7 @@ unset_supplies:
+ regulator_remove_coupling(rdev);
+ mutex_unlock(&regulator_list_mutex);
+ wash:
++ regulator_put(rdev->supply);
+ kfree(rdev->coupling_desc.coupled_rdevs);
+ mutex_lock(&regulator_list_mutex);
+ regulator_ena_gpio_free(rdev);
+diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
+index 3265e75e97ab4..5c7ff9b3e8a79 100644
+--- a/drivers/regulator/devres.c
++++ b/drivers/regulator/devres.c
+@@ -385,7 +385,7 @@ struct regulator_dev *devm_regulator_register(struct device *dev,
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+- rdev = regulator_register(regulator_desc, config);
++ rdev = regulator_register(dev, regulator_desc, config);
+ if (!IS_ERR(rdev)) {
+ *ptr = rdev;
+ devres_add(dev, ptr);
+diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
+index 0aff1c2886b5d..cd726d4e8fbfb 100644
+--- a/drivers/regulator/of_regulator.c
++++ b/drivers/regulator/of_regulator.c
+@@ -505,7 +505,7 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
+ struct device_node *child;
+ struct regulator_init_data *init_data = NULL;
+
+- child = regulator_of_get_init_node(dev, desc);
++ child = regulator_of_get_init_node(config->dev, desc);
+ if (!child)
+ return NULL;
+
+diff --git a/drivers/regulator/qcom-labibb-regulator.c b/drivers/regulator/qcom-labibb-regulator.c
+index 639b71eb41ffe..bcf7140f3bc98 100644
+--- a/drivers/regulator/qcom-labibb-regulator.c
++++ b/drivers/regulator/qcom-labibb-regulator.c
+@@ -822,6 +822,7 @@ static int qcom_labibb_regulator_probe(struct platform_device *pdev)
+ if (irq == 0)
+ irq = -EINVAL;
+
++ of_node_put(reg_node);
+ return dev_err_probe(vreg->dev, irq,
+ "Short-circuit irq not found.\n");
+ }
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index 4158ff126a67a..f90bcdeecea58 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -1187,7 +1187,7 @@ static const struct rpmh_vreg_init_data pm7325_vreg_data[] = {
+ static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps520, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps520, "vdd-s2"),
+- RPMH_VREG("smps3", "smp%s3", &pmic5_hfsmps510, "vdd-s3"),
++ RPMH_VREG("smps3", "smp%s3", &pmic5_hfsmps515, "vdd-s3"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1-l2"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l1-l2"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"),
+diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
+index 30ea3bc8ca192..7a454b7b6eab9 100644
+--- a/drivers/regulator/stm32-vrefbuf.c
++++ b/drivers/regulator/stm32-vrefbuf.c
+@@ -210,7 +210,7 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev)
+ pdev->dev.of_node,
+ &stm32_vrefbuf_regu);
+
+- rdev = regulator_register(&stm32_vrefbuf_regu, &config);
++ rdev = regulator_register(&pdev->dev, &stm32_vrefbuf_regu, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&pdev->dev, "register failed with error %d\n", ret);
+diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
+index 6afd0941e5524..dc6f07ca83410 100644
+--- a/drivers/remoteproc/qcom_q6v5_pas.c
++++ b/drivers/remoteproc/qcom_q6v5_pas.c
+@@ -449,6 +449,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
++ of_node_put(node);
+ if (ret)
+ return ret;
+
+@@ -556,6 +557,7 @@ static int adsp_probe(struct platform_device *pdev)
+ detach_proxy_pds:
+ adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ free_rproc:
++ device_init_wakeup(adsp->dev, false);
+ rproc_free(rproc);
+
+ return ret;
+@@ -572,6 +574,8 @@ static int adsp_remove(struct platform_device *pdev)
+ qcom_remove_sysmon_subdev(adsp->sysmon);
+ qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev);
+ qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
++ adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
++ device_init_wakeup(adsp->dev, false);
+ rproc_free(adsp->rproc);
+
+ return 0;
+diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
+index bb0947f7770ea..ba24d745b2d65 100644
+--- a/drivers/remoteproc/qcom_q6v5_wcss.c
++++ b/drivers/remoteproc/qcom_q6v5_wcss.c
+@@ -351,7 +351,7 @@ static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss)
+ if (ret) {
+ dev_err(wcss->dev,
+ "xo cbcr enabling timed out (rc:%d)\n", ret);
+- return ret;
++ goto disable_xo_cbcr_clk;
+ }
+
+ writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE);
+@@ -417,6 +417,7 @@ disable_sleep_cbcr_clk:
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
++disable_xo_cbcr_clk:
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+@@ -827,6 +828,9 @@ static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
++ if (!res)
++ return -EINVAL;
++
+ wcss->reg_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!wcss->reg_base)
+diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
+index 57dde2a69b9dd..15af52f8499eb 100644
+--- a/drivers/remoteproc/qcom_sysmon.c
++++ b/drivers/remoteproc/qcom_sysmon.c
+@@ -652,7 +652,9 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
+ if (sysmon->shutdown_irq != -ENODATA) {
+ dev_err(sysmon->dev,
+ "failed to retrieve shutdown-ack IRQ\n");
+- return ERR_PTR(sysmon->shutdown_irq);
++ ret = sysmon->shutdown_irq;
++ kfree(sysmon);
++ return ERR_PTR(ret);
+ }
+ } else {
+ ret = devm_request_threaded_irq(sysmon->dev,
+@@ -663,6 +665,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
+ if (ret) {
+ dev_err(sysmon->dev,
+ "failed to acquire shutdown-ack IRQ\n");
++ kfree(sysmon);
+ return ERR_PTR(ret);
+ }
+ }
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index 8768cb64f560c..cb1d414a23896 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -509,7 +509,13 @@ static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
+ rvdev_data.rsc_offset = offset;
+ rvdev_data.rsc = rsc;
+
+- pdev = platform_device_register_data(dev, "rproc-virtio", rvdev_data.index, &rvdev_data,
++ /*
++ * When there is more than one remote processor, rproc->nb_vdev number is
++ * same for each separate instances of "rproc". If rvdev_data.index is used
++ * as device id, then we get duplication in sysfs, so need to use
++ * PLATFORM_DEVID_AUTO to auto select device id.
++ */
++ pdev = platform_device_register_data(dev, "rproc-virtio", PLATFORM_DEVID_AUTO, &rvdev_data,
+ sizeof(rvdev_data));
+ if (IS_ERR(pdev)) {
+ dev_err(dev, "failed to create rproc-virtio device\n");
+diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
+index e48223c00c672..e5b7b48cffac0 100644
+--- a/drivers/rtc/class.c
++++ b/drivers/rtc/class.c
+@@ -374,11 +374,11 @@ struct rtc_device *devm_rtc_allocate_device(struct device *dev)
+
+ rtc->id = id;
+ rtc->dev.parent = dev;
+- err = dev_set_name(&rtc->dev, "rtc%d", id);
++ err = devm_add_action_or_reset(dev, devm_rtc_release_device, rtc);
+ if (err)
+ return ERR_PTR(err);
+
+- err = devm_add_action_or_reset(dev, devm_rtc_release_device, rtc);
++ err = dev_set_name(&rtc->dev, "rtc%d", id);
+ if (err)
+ return ERR_PTR(err);
+
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 58cc2bae2f8a0..00e2ca7374ecf 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -744,6 +744,168 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
+ return IRQ_NONE;
+ }
+
++#ifdef CONFIG_ACPI
++
++#include <linux/acpi.h>
++
++static u32 rtc_handler(void *context)
++{
++ struct device *dev = context;
++ struct cmos_rtc *cmos = dev_get_drvdata(dev);
++ unsigned char rtc_control = 0;
++ unsigned char rtc_intr;
++ unsigned long flags;
++
++
++ /*
++ * Always update rtc irq when ACPI is used as RTC Alarm.
++ * Or else, ACPI SCI is enabled during suspend/resume only,
++ * update rtc irq in that case.
++ */
++ if (cmos_use_acpi_alarm())
++ cmos_interrupt(0, (void *)cmos->rtc);
++ else {
++ /* Fix me: can we use cmos_interrupt() here as well? */
++ spin_lock_irqsave(&rtc_lock, flags);
++ if (cmos_rtc.suspend_ctrl)
++ rtc_control = CMOS_READ(RTC_CONTROL);
++ if (rtc_control & RTC_AIE) {
++ cmos_rtc.suspend_ctrl &= ~RTC_AIE;
++ CMOS_WRITE(rtc_control, RTC_CONTROL);
++ rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
++ rtc_update_irq(cmos->rtc, 1, rtc_intr);
++ }
++ spin_unlock_irqrestore(&rtc_lock, flags);
++ }
++
++ pm_wakeup_hard_event(dev);
++ acpi_clear_event(ACPI_EVENT_RTC);
++ acpi_disable_event(ACPI_EVENT_RTC, 0);
++ return ACPI_INTERRUPT_HANDLED;
++}
++
++static void acpi_rtc_event_setup(struct device *dev)
++{
++ if (acpi_disabled)
++ return;
++
++ acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
++ /*
++ * After the RTC handler is installed, the Fixed_RTC event should
++ * be disabled. Only when the RTC alarm is set will it be enabled.
++ */
++ acpi_clear_event(ACPI_EVENT_RTC);
++ acpi_disable_event(ACPI_EVENT_RTC, 0);
++}
++
++static void acpi_rtc_event_cleanup(void)
++{
++ if (acpi_disabled)
++ return;
++
++ acpi_remove_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler);
++}
++
++static void rtc_wake_on(struct device *dev)
++{
++ acpi_clear_event(ACPI_EVENT_RTC);
++ acpi_enable_event(ACPI_EVENT_RTC, 0);
++}
++
++static void rtc_wake_off(struct device *dev)
++{
++ acpi_disable_event(ACPI_EVENT_RTC, 0);
++}
++
++#ifdef CONFIG_X86
++/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
++static void use_acpi_alarm_quirks(void)
++{
++ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
++ return;
++
++ if (!is_hpet_enabled())
++ return;
++
++ if (dmi_get_bios_year() < 2015)
++ return;
++
++ use_acpi_alarm = true;
++}
++#else
++static inline void use_acpi_alarm_quirks(void) { }
++#endif
++
++static void acpi_cmos_wake_setup(struct device *dev)
++{
++ if (acpi_disabled)
++ return;
++
++ use_acpi_alarm_quirks();
++
++ cmos_rtc.wake_on = rtc_wake_on;
++ cmos_rtc.wake_off = rtc_wake_off;
++
++ /* ACPI tables bug workaround. */
++ if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) {
++ dev_dbg(dev, "bogus FADT month_alarm (%d)\n",
++ acpi_gbl_FADT.month_alarm);
++ acpi_gbl_FADT.month_alarm = 0;
++ }
++
++ cmos_rtc.day_alrm = acpi_gbl_FADT.day_alarm;
++ cmos_rtc.mon_alrm = acpi_gbl_FADT.month_alarm;
++ cmos_rtc.century = acpi_gbl_FADT.century;
++
++ if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE)
++ dev_info(dev, "RTC can wake from S4\n");
++
++ /* RTC always wakes from S1/S2/S3, and often S4/STD */
++ device_init_wakeup(dev, 1);
++}
++
++static void cmos_check_acpi_rtc_status(struct device *dev,
++ unsigned char *rtc_control)
++{
++ struct cmos_rtc *cmos = dev_get_drvdata(dev);
++ acpi_event_status rtc_status;
++ acpi_status status;
++
++ if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC)
++ return;
++
++ status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status);
++ if (ACPI_FAILURE(status)) {
++ dev_err(dev, "Could not get RTC status\n");
++ } else if (rtc_status & ACPI_EVENT_FLAG_SET) {
++ unsigned char mask;
++ *rtc_control &= ~RTC_AIE;
++ CMOS_WRITE(*rtc_control, RTC_CONTROL);
++ mask = CMOS_READ(RTC_INTR_FLAGS);
++ rtc_update_irq(cmos->rtc, 1, mask);
++ }
++}
++
++#else /* !CONFIG_ACPI */
++
++static inline void acpi_rtc_event_setup(struct device *dev)
++{
++}
++
++static inline void acpi_rtc_event_cleanup(void)
++{
++}
++
++static inline void acpi_cmos_wake_setup(struct device *dev)
++{
++}
++
++static inline void cmos_check_acpi_rtc_status(struct device *dev,
++ unsigned char *rtc_control)
++{
++}
++#endif /* CONFIG_ACPI */
++
+ #ifdef CONFIG_PNP
+ #define INITSECTION
+
+@@ -827,19 +989,27 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
+ if (info->address_space)
+ address_space = info->address_space;
+
+- if (info->rtc_day_alarm && info->rtc_day_alarm < 128)
+- cmos_rtc.day_alrm = info->rtc_day_alarm;
+- if (info->rtc_mon_alarm && info->rtc_mon_alarm < 128)
+- cmos_rtc.mon_alrm = info->rtc_mon_alarm;
+- if (info->rtc_century && info->rtc_century < 128)
+- cmos_rtc.century = info->rtc_century;
++ cmos_rtc.day_alrm = info->rtc_day_alarm;
++ cmos_rtc.mon_alrm = info->rtc_mon_alarm;
++ cmos_rtc.century = info->rtc_century;
+
+ if (info->wake_on && info->wake_off) {
+ cmos_rtc.wake_on = info->wake_on;
+ cmos_rtc.wake_off = info->wake_off;
+ }
++ } else {
++ acpi_cmos_wake_setup(dev);
+ }
+
++ if (cmos_rtc.day_alrm >= 128)
++ cmos_rtc.day_alrm = 0;
++
++ if (cmos_rtc.mon_alrm >= 128)
++ cmos_rtc.mon_alrm = 0;
++
++ if (cmos_rtc.century >= 128)
++ cmos_rtc.century = 0;
++
+ cmos_rtc.dev = dev;
+ dev_set_drvdata(dev, &cmos_rtc);
+
+@@ -928,6 +1098,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
+ nvmem_cfg.size = address_space - NVRAM_OFFSET;
+ devm_rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg);
+
++ /*
++ * Everything has gone well so far, so by default register a handler for
++ * the ACPI RTC fixed event.
++ */
++ if (!info)
++ acpi_rtc_event_setup(dev);
++
+ dev_info(dev, "%s%s, %d bytes nvram%s\n",
+ !is_valid_irq(rtc_irq) ? "no alarms" :
+ cmos_rtc.mon_alrm ? "alarms up to one year" :
+@@ -973,6 +1150,9 @@ static void cmos_do_remove(struct device *dev)
+ hpet_unregister_irq_handler(cmos_interrupt);
+ }
+
++ if (!dev_get_platdata(dev))
++ acpi_rtc_event_cleanup();
++
+ cmos->rtc = NULL;
+
+ ports = cmos->iomem;
+@@ -1122,9 +1302,6 @@ static void cmos_check_wkalrm(struct device *dev)
+ }
+ }
+
+-static void cmos_check_acpi_rtc_status(struct device *dev,
+- unsigned char *rtc_control);
+-
+ static int __maybe_unused cmos_resume(struct device *dev)
+ {
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+@@ -1191,175 +1368,13 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
+ * predate even PNPBIOS should set up platform_bus devices.
+ */
+
+-#ifdef CONFIG_ACPI
+-
+-#include <linux/acpi.h>
+-
+-static u32 rtc_handler(void *context)
+-{
+- struct device *dev = context;
+- struct cmos_rtc *cmos = dev_get_drvdata(dev);
+- unsigned char rtc_control = 0;
+- unsigned char rtc_intr;
+- unsigned long flags;
+-
+-
+- /*
+- * Always update rtc irq when ACPI is used as RTC Alarm.
+- * Or else, ACPI SCI is enabled during suspend/resume only,
+- * update rtc irq in that case.
+- */
+- if (cmos_use_acpi_alarm())
+- cmos_interrupt(0, (void *)cmos->rtc);
+- else {
+- /* Fix me: can we use cmos_interrupt() here as well? */
+- spin_lock_irqsave(&rtc_lock, flags);
+- if (cmos_rtc.suspend_ctrl)
+- rtc_control = CMOS_READ(RTC_CONTROL);
+- if (rtc_control & RTC_AIE) {
+- cmos_rtc.suspend_ctrl &= ~RTC_AIE;
+- CMOS_WRITE(rtc_control, RTC_CONTROL);
+- rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
+- rtc_update_irq(cmos->rtc, 1, rtc_intr);
+- }
+- spin_unlock_irqrestore(&rtc_lock, flags);
+- }
+-
+- pm_wakeup_hard_event(dev);
+- acpi_clear_event(ACPI_EVENT_RTC);
+- acpi_disable_event(ACPI_EVENT_RTC, 0);
+- return ACPI_INTERRUPT_HANDLED;
+-}
+-
+-static inline void rtc_wake_setup(struct device *dev)
+-{
+- if (acpi_disabled)
+- return;
+-
+- acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
+- /*
+- * After the RTC handler is installed, the Fixed_RTC event should
+- * be disabled. Only when the RTC alarm is set will it be enabled.
+- */
+- acpi_clear_event(ACPI_EVENT_RTC);
+- acpi_disable_event(ACPI_EVENT_RTC, 0);
+-}
+-
+-static void rtc_wake_on(struct device *dev)
+-{
+- acpi_clear_event(ACPI_EVENT_RTC);
+- acpi_enable_event(ACPI_EVENT_RTC, 0);
+-}
+-
+-static void rtc_wake_off(struct device *dev)
+-{
+- acpi_disable_event(ACPI_EVENT_RTC, 0);
+-}
+-
+-#ifdef CONFIG_X86
+-/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
+-static void use_acpi_alarm_quirks(void)
+-{
+- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+- return;
+-
+- if (!is_hpet_enabled())
+- return;
+-
+- if (dmi_get_bios_year() < 2015)
+- return;
+-
+- use_acpi_alarm = true;
+-}
+-#else
+-static inline void use_acpi_alarm_quirks(void) { }
+-#endif
+-
+-/* Every ACPI platform has a mc146818 compatible "cmos rtc". Here we find
+- * its device node and pass extra config data. This helps its driver use
+- * capabilities that the now-obsolete mc146818 didn't have, and informs it
+- * that this board's RTC is wakeup-capable (per ACPI spec).
+- */
+-static struct cmos_rtc_board_info acpi_rtc_info;
+-
+-static void cmos_wake_setup(struct device *dev)
+-{
+- if (acpi_disabled)
+- return;
+-
+- use_acpi_alarm_quirks();
+-
+- acpi_rtc_info.wake_on = rtc_wake_on;
+- acpi_rtc_info.wake_off = rtc_wake_off;
+-
+- /* workaround bug in some ACPI tables */
+- if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) {
+- dev_dbg(dev, "bogus FADT month_alarm (%d)\n",
+- acpi_gbl_FADT.month_alarm);
+- acpi_gbl_FADT.month_alarm = 0;
+- }
+-
+- acpi_rtc_info.rtc_day_alarm = acpi_gbl_FADT.day_alarm;
+- acpi_rtc_info.rtc_mon_alarm = acpi_gbl_FADT.month_alarm;
+- acpi_rtc_info.rtc_century = acpi_gbl_FADT.century;
+-
+- /* NOTE: S4_RTC_WAKE is NOT currently useful to Linux */
+- if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE)
+- dev_info(dev, "RTC can wake from S4\n");
+-
+- dev->platform_data = &acpi_rtc_info;
+-
+- /* RTC always wakes from S1/S2/S3, and often S4/STD */
+- device_init_wakeup(dev, 1);
+-}
+-
+-static void cmos_check_acpi_rtc_status(struct device *dev,
+- unsigned char *rtc_control)
+-{
+- struct cmos_rtc *cmos = dev_get_drvdata(dev);
+- acpi_event_status rtc_status;
+- acpi_status status;
+-
+- if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC)
+- return;
+-
+- status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status);
+- if (ACPI_FAILURE(status)) {
+- dev_err(dev, "Could not get RTC status\n");
+- } else if (rtc_status & ACPI_EVENT_FLAG_SET) {
+- unsigned char mask;
+- *rtc_control &= ~RTC_AIE;
+- CMOS_WRITE(*rtc_control, RTC_CONTROL);
+- mask = CMOS_READ(RTC_INTR_FLAGS);
+- rtc_update_irq(cmos->rtc, 1, mask);
+- }
+-}
+-
+-#else
+-
+-static void cmos_wake_setup(struct device *dev)
+-{
+-}
+-
+-static void cmos_check_acpi_rtc_status(struct device *dev,
+- unsigned char *rtc_control)
+-{
+-}
+-
+-static void rtc_wake_setup(struct device *dev)
+-{
+-}
+-#endif
+-
+ #ifdef CONFIG_PNP
+
+ #include <linux/pnp.h>
+
+ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
+ {
+- int irq, ret;
+-
+- cmos_wake_setup(&pnp->dev);
++ int irq;
+
+ if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) {
+ irq = 0;
+@@ -1375,13 +1390,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
+ irq = pnp_irq(pnp, 0);
+ }
+
+- ret = cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
+- if (ret)
+- return ret;
+-
+- rtc_wake_setup(&pnp->dev);
+-
+- return 0;
++ return cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
+ }
+
+ static void cmos_pnp_remove(struct pnp_dev *pnp)
+@@ -1465,10 +1474,9 @@ static inline void cmos_of_init(struct platform_device *pdev) {}
+ static int __init cmos_platform_probe(struct platform_device *pdev)
+ {
+ struct resource *resource;
+- int irq, ret;
++ int irq;
+
+ cmos_of_init(pdev);
+- cmos_wake_setup(&pdev->dev);
+
+ if (RTC_IOMAPPED)
+ resource = platform_get_resource(pdev, IORESOURCE_IO, 0);
+@@ -1478,13 +1486,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
+ if (irq < 0)
+ irq = -1;
+
+- ret = cmos_do_probe(&pdev->dev, resource, irq);
+- if (ret)
+- return ret;
+-
+- rtc_wake_setup(&pdev->dev);
+-
+- return 0;
++ return cmos_do_probe(&pdev->dev, resource, irq);
+ }
+
+ static int cmos_platform_remove(struct platform_device *pdev)
+diff --git a/drivers/rtc/rtc-msc313.c b/drivers/rtc/rtc-msc313.c
+index f3fde013c4b8b..8d7737e0e2e02 100644
+--- a/drivers/rtc/rtc-msc313.c
++++ b/drivers/rtc/rtc-msc313.c
+@@ -212,22 +212,12 @@ static int msc313_rtc_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- clk = devm_clk_get(dev, NULL);
++ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "No input reference clock\n");
+ return PTR_ERR(clk);
+ }
+
+- ret = clk_prepare_enable(clk);
+- if (ret) {
+- dev_err(dev, "Failed to enable the reference clock, %d\n", ret);
+- return ret;
+- }
+-
+- ret = devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk);
+- if (ret)
+- return ret;
+-
+ rate = clk_get_rate(clk);
+ writew(rate & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_L);
+ writew((rate >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_H);
+diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
+index 5e03834016294..f6d2ad91ff7a9 100644
+--- a/drivers/rtc/rtc-mxc_v2.c
++++ b/drivers/rtc/rtc-mxc_v2.c
+@@ -336,8 +336,10 @@ static int mxc_rtc_probe(struct platform_device *pdev)
+ }
+
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
+- if (IS_ERR(pdata->rtc))
++ if (IS_ERR(pdata->rtc)) {
++ clk_disable_unprepare(pdata->clk);
+ return PTR_ERR(pdata->rtc);
++ }
+
+ pdata->rtc->ops = &mxc_rtc_ops;
+ pdata->rtc->range_max = U32_MAX;
+diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
+index 63b275b014bd6..87f4fc9df68b4 100644
+--- a/drivers/rtc/rtc-pcf2127.c
++++ b/drivers/rtc/rtc-pcf2127.c
+@@ -885,9 +885,17 @@ static const struct regmap_bus pcf2127_i2c_regmap = {
+
+ static struct i2c_driver pcf2127_i2c_driver;
+
+-static int pcf2127_i2c_probe(struct i2c_client *client,
+- const struct i2c_device_id *id)
++static const struct i2c_device_id pcf2127_i2c_id[] = {
++ { "pcf2127", 1 },
++ { "pcf2129", 0 },
++ { "pca2129", 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id);
++
++static int pcf2127_i2c_probe(struct i2c_client *client)
+ {
++ const struct i2c_device_id *id = i2c_match_id(pcf2127_i2c_id, client);
+ struct regmap *regmap;
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+@@ -910,20 +918,12 @@ static int pcf2127_i2c_probe(struct i2c_client *client,
+ pcf2127_i2c_driver.driver.name, id->driver_data);
+ }
+
+-static const struct i2c_device_id pcf2127_i2c_id[] = {
+- { "pcf2127", 1 },
+- { "pcf2129", 0 },
+- { "pca2129", 0 },
+- { }
+-};
+-MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id);
+-
+ static struct i2c_driver pcf2127_i2c_driver = {
+ .driver = {
+ .name = "rtc-pcf2127-i2c",
+ .of_match_table = of_match_ptr(pcf2127_of_match),
+ },
+- .probe = pcf2127_i2c_probe,
++ .probe_new = pcf2127_i2c_probe,
+ .id_table = pcf2127_i2c_id,
+ };
+
+diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
+index 095891999da11..754e03984f986 100644
+--- a/drivers/rtc/rtc-pcf85063.c
++++ b/drivers/rtc/rtc-pcf85063.c
+@@ -169,10 +169,10 @@ static int pcf85063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ if (ret)
+ return ret;
+
+- alrm->time.tm_sec = bcd2bin(buf[0]);
+- alrm->time.tm_min = bcd2bin(buf[1]);
+- alrm->time.tm_hour = bcd2bin(buf[2]);
+- alrm->time.tm_mday = bcd2bin(buf[3]);
++ alrm->time.tm_sec = bcd2bin(buf[0] & 0x7f);
++ alrm->time.tm_min = bcd2bin(buf[1] & 0x7f);
++ alrm->time.tm_hour = bcd2bin(buf[2] & 0x3f);
++ alrm->time.tm_mday = bcd2bin(buf[3] & 0x3f);
+
+ ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &val);
+ if (ret)
+@@ -424,7 +424,7 @@ static int pcf85063_clkout_control(struct clk_hw *hw, bool enable)
+ unsigned int buf;
+ int ret;
+
+- ret = regmap_read(pcf85063->regmap, PCF85063_REG_OFFSET, &buf);
++ ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf);
+ if (ret < 0)
+ return ret;
+ buf &= PCF85063_REG_CLKO_F_MASK;
+diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
+index 7fb9145c43bd5..fa351ac201587 100644
+--- a/drivers/rtc/rtc-pic32.c
++++ b/drivers/rtc/rtc-pic32.c
+@@ -324,16 +324,16 @@ static int pic32_rtc_probe(struct platform_device *pdev)
+
+ spin_lock_init(&pdata->alarm_lock);
+
++ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
++ if (IS_ERR(pdata->rtc))
++ return PTR_ERR(pdata->rtc);
++
+ clk_prepare_enable(pdata->clk);
+
+ pic32_rtc_enable(pdata, 1);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+- pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
+- if (IS_ERR(pdata->rtc))
+- return PTR_ERR(pdata->rtc);
+-
+ pdata->rtc->ops = &pic32_rtcops;
+ pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
+diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c
+index ac788799c8e3e..0d36bc50197c1 100644
+--- a/drivers/rtc/rtc-rzn1.c
++++ b/drivers/rtc/rtc-rzn1.c
+@@ -355,7 +355,9 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features);
+
+- devm_pm_runtime_enable(&pdev->dev);
++ ret = devm_pm_runtime_enable(&pdev->dev);
++ if (ret < 0)
++ return ret;
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index bd929b0e7d7de..d82acf1af1fae 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -32,6 +32,14 @@
+ #define SNVS_LPPGDR_INIT 0x41736166
+ #define CNTR_TO_SECS_SH 15
+
++/* The maximum RTC clock cycles that are allowed to pass between two
++ * consecutive clock counter register reads. If the values are corrupted a
++ * bigger difference is expected. The RTC frequency is 32kHz. With 320 cycles
++ * we end at 10ms which should be enough for most cases. If it once takes
++ * longer than expected we do a retry.
++ */
++#define MAX_RTC_READ_DIFF_CYCLES 320
++
+ struct snvs_rtc_data {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+@@ -56,6 +64,7 @@ static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
+ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ {
+ u64 read1, read2;
++ s64 diff;
+ unsigned int timeout = 100;
+
+ /* As expected, the registers might update between the read of the LSB
+@@ -66,7 +75,8 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ do {
+ read2 = read1;
+ read1 = rtc_read_lpsrt(data);
+- } while (read1 != read2 && --timeout);
++ diff = read1 - read2;
++ } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout);
+ if (!timeout)
+ dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
+
+@@ -78,13 +88,15 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
+ {
+ u32 count1, count2;
++ s32 diff;
+ unsigned int timeout = 100;
+
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+ do {
+ count2 = count1;
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+- } while (count1 != count2 && --timeout);
++ diff = count1 - count2;
++ } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout);
+ if (!timeout) {
+ dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
+ return -ETIMEDOUT;
+diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
+index bdb20f63254e2..0f8e4231098ef 100644
+--- a/drivers/rtc/rtc-st-lpc.c
++++ b/drivers/rtc/rtc-st-lpc.c
+@@ -238,6 +238,7 @@ static int st_rtc_probe(struct platform_device *pdev)
+
+ rtc->clkrate = clk_get_rate(rtc->clk);
+ if (!rtc->clkrate) {
++ clk_disable_unprepare(rtc->clk);
+ dev_err(&pdev->dev, "Unable to fetch clock rate\n");
+ return -EINVAL;
+ }
+diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
+index 37b551bd43bff..bdfab9ea00464 100644
+--- a/drivers/s390/net/ctcm_main.c
++++ b/drivers/s390/net/ctcm_main.c
+@@ -825,16 +825,9 @@ done:
+ /*
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+- *
+- * skb Pointer to buffer containing the packet.
+- * dev Pointer to interface struct.
+- *
+- * returns 0 if packet consumed, !0 if packet rejected.
+- * Note: If we return !0, then the packet is free'd by
+- * the generic network layer.
+ */
+ /* first merge version - leaving both functions separated */
+-static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct ctcm_priv *priv = dev->ml_priv;
+
+@@ -877,7 +870,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ /* unmerged MPC variant of ctcm_tx */
+-static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ int len = 0;
+ struct ctcm_priv *priv = dev->ml_priv;
+diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
+index 84c8981317b46..38f312664ce72 100644
+--- a/drivers/s390/net/lcs.c
++++ b/drivers/s390/net/lcs.c
+@@ -1519,9 +1519,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+ /*
+ * Packet transmit function called by network stack
+ */
+-static int
+-__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
+- struct net_device *dev)
++static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
++ struct net_device *dev)
+ {
+ struct lcs_header *header;
+ int rc = NETDEV_TX_OK;
+@@ -1582,8 +1581,7 @@ out:
+ return rc;
+ }
+
+-static int
+-lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct lcs_card *card;
+ int rc;
+diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
+index 65aa0a96c21de..66076cada8ae4 100644
+--- a/drivers/s390/net/netiucv.c
++++ b/drivers/s390/net/netiucv.c
+@@ -1248,15 +1248,8 @@ static int netiucv_close(struct net_device *dev)
+ /*
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+- *
+- * @param skb Pointer to buffer containing the packet.
+- * @param dev Pointer to interface struct.
+- *
+- * @return 0 if packet consumed, !0 if packet rejected.
+- * Note: If we return !0, then the packet is free'd by
+- * the generic network layer.
+ */
+-static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct netiucv_priv *privptr = netdev_priv(dev);
+ int rc;
+diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
+index b08fc8839808d..49fd2cfed70c7 100644
+--- a/drivers/scsi/elx/efct/efct_driver.c
++++ b/drivers/scsi/elx/efct/efct_driver.c
+@@ -42,6 +42,7 @@ efct_device_init(void)
+
+ rc = efct_scsi_reg_fc_transport();
+ if (rc) {
++ efct_scsi_tgt_driver_exit();
+ pr_err("failed to register to FC host\n");
+ return rc;
+ }
+diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h
+index dde20891c2dd7..57e3386128127 100644
+--- a/drivers/scsi/elx/libefc/efclib.h
++++ b/drivers/scsi/elx/libefc/efclib.h
+@@ -58,10 +58,12 @@ enum efc_node_send_ls_acc {
+ #define EFC_LINK_STATUS_UP 0
+ #define EFC_LINK_STATUS_DOWN 1
+
++enum efc_sm_event;
++
+ /* State machine context header */
+ struct efc_sm_ctx {
+ void (*current_state)(struct efc_sm_ctx *ctx,
+- u32 evt, void *arg);
++ enum efc_sm_event evt, void *arg);
+
+ const char *description;
+ void *app;
+@@ -365,7 +367,7 @@ struct efc_node {
+ int prev_evt;
+
+ void (*nodedb_state)(struct efc_sm_ctx *ctx,
+- u32 evt, void *arg);
++ enum efc_sm_event evt, void *arg);
+ struct timer_list gidpt_delay_timer;
+ u64 time_last_gidpt_msec;
+
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
+index 6ec296321ffc1..38774a272e627 100644
+--- a/drivers/scsi/fcoe/fcoe.c
++++ b/drivers/scsi/fcoe/fcoe.c
+@@ -2491,6 +2491,7 @@ static int __init fcoe_init(void)
+
+ out_free:
+ mutex_unlock(&fcoe_config_mutex);
++ fcoe_transport_detach(&fcoe_sw_transport);
+ out_destroy:
+ destroy_workqueue(fcoe_wq);
+ return rc;
+diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
+index af658aa38fedf..6260aa5ea6af8 100644
+--- a/drivers/scsi/fcoe/fcoe_sysfs.c
++++ b/drivers/scsi/fcoe/fcoe_sysfs.c
+@@ -830,14 +830,15 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+
+ dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
+ error = device_register(&ctlr->dev);
+- if (error)
+- goto out_del_q2;
++ if (error) {
++ destroy_workqueue(ctlr->devloss_work_q);
++ destroy_workqueue(ctlr->work_q);
++ put_device(&ctlr->dev);
++ return NULL;
++ }
+
+ return ctlr;
+
+-out_del_q2:
+- destroy_workqueue(ctlr->devloss_work_q);
+- ctlr->devloss_work_q = NULL;
+ out_del_q:
+ destroy_workqueue(ctlr->work_q);
+ ctlr->work_q = NULL;
+@@ -1036,16 +1037,16 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+ fcf->selected = new_fcf->selected;
+
+ error = device_register(&fcf->dev);
+- if (error)
+- goto out_del;
++ if (error) {
++ put_device(&fcf->dev);
++ goto out;
++ }
+
+ fcf->state = FCOE_FCF_STATE_CONNECTED;
+ list_add_tail(&fcf->peers, &ctlr->fcfs);
+
+ return fcf;
+
+-out_del:
+- kfree(fcf);
+ out:
+ return NULL;
+ }
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index f8e832b1bc46a..4dbf51e2623ad 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -8925,7 +8925,7 @@ clean1: /* wq/aer/h */
+ destroy_workqueue(h->monitor_ctlr_wq);
+ h->monitor_ctlr_wq = NULL;
+ }
+- kfree(h);
++ hpda_free_ctlr_info(h);
+ return rc;
+ }
+
+@@ -9786,7 +9786,8 @@ static int hpsa_add_sas_host(struct ctlr_info *h)
+ return 0;
+
+ free_sas_phy:
+- hpsa_free_sas_phy(hpsa_sas_phy);
++ sas_phy_free(hpsa_sas_phy->phy);
++ kfree(hpsa_sas_phy);
+ free_sas_port:
+ hpsa_free_sas_port(hpsa_sas_port);
+ free_sas_node:
+@@ -9822,10 +9823,12 @@ static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
+
+ rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
+ if (rc)
+- goto free_sas_port;
++ goto free_sas_rphy;
+
+ return 0;
+
++free_sas_rphy:
++ sas_rphy_free(rphy);
+ free_sas_port:
+ hpsa_free_sas_port(hpsa_sas_port);
+ device->sas_port = NULL;
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 9d01a3e3c26aa..2022ffb450417 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -10872,11 +10872,19 @@ static struct notifier_block ipr_notifier = {
+ **/
+ static int __init ipr_init(void)
+ {
++ int rc;
++
+ ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
+ IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
+
+ register_reboot_notifier(&ipr_notifier);
+- return pci_register_driver(&ipr_driver);
++ rc = pci_register_driver(&ipr_driver);
++ if (rc) {
++ unregister_reboot_notifier(&ipr_notifier);
++ return rc;
++ }
++
++ return 0;
+ }
+
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 99d06dc7ddf6b..21c52154626f1 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -8150,10 +8150,10 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ "IO_cnt", "Info", "BWutil(ms)");
+ }
+
+- /* Needs to be _bh because record is called from timer interrupt
++ /* Needs to be _irq because record is called from timer interrupt
+ * context
+ */
+- spin_lock_bh(ring_lock);
++ spin_lock_irq(ring_lock);
+ while (*head_idx != *tail_idx) {
+ entry = &ring[*head_idx];
+
+@@ -8197,7 +8197,7 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ if (cnt >= max_read_entries)
+ break;
+ }
+- spin_unlock_bh(ring_lock);
++ spin_unlock_irq(ring_lock);
+
+ return cnt;
+ }
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+index 0681daee6c149..e5ecd6ada6cdd 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+@@ -829,6 +829,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ if ((sas_rphy_add(rphy))) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
++ sas_rphy_free(rphy);
++ rphy = NULL;
+ }
+
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 802eec6407d9a..a26a373be9da3 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -5136,17 +5136,17 @@ struct secure_flash_update_block_pk {
+ (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
+ test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+
+-#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
+- atomic_inc(&__vha->vref_count); \
+- mb(); \
+- if (__vha->flags.delete_progress) { \
+- atomic_dec(&__vha->vref_count); \
+- wake_up(&__vha->vref_waitq); \
+- __bail = 1; \
+- } else { \
+- __bail = 0; \
+- } \
+-} while (0)
++static inline bool qla_vha_mark_busy(scsi_qla_host_t *vha)
++{
++ atomic_inc(&vha->vref_count);
++ mb();
++ if (vha->flags.delete_progress) {
++ atomic_dec(&vha->vref_count);
++ wake_up(&vha->vref_waitq);
++ return true;
++ }
++ return false;
++}
+
+ #define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
+ atomic_dec(&__vha->vref_count); \
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index e12db95de6883..432f47fc5e1f3 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -110,6 +110,7 @@ static void qla24xx_abort_iocb_timeout(void *data)
+ struct qla_qpair *qpair = sp->qpair;
+ u32 handle;
+ unsigned long flags;
++ int sp_found = 0, cmdsp_found = 0;
+
+ if (sp->cmd_sp)
+ ql_dbg(ql_dbg_async, sp->vha, 0x507c,
+@@ -124,18 +125,21 @@ static void qla24xx_abort_iocb_timeout(void *data)
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
+ if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
+- sp->cmd_sp))
++ sp->cmd_sp)) {
+ qpair->req->outstanding_cmds[handle] = NULL;
++ cmdsp_found = 1;
++ }
+
+ /* removing the abort */
+ if (qpair->req->outstanding_cmds[handle] == sp) {
+ qpair->req->outstanding_cmds[handle] = NULL;
++ sp_found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+- if (sp->cmd_sp) {
++ if (cmdsp_found && sp->cmd_sp) {
+ /*
+ * This done function should take care of
+ * original command ref: INIT
+@@ -143,8 +147,10 @@ static void qla24xx_abort_iocb_timeout(void *data)
+ sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
+ }
+
+- abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
+- sp->done(sp, QLA_OS_TIMER_EXPIRED);
++ if (sp_found) {
++ abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
++ sp->done(sp, QLA_OS_TIMER_EXPIRED);
++ }
+ }
+
+ static void qla24xx_abort_sp_done(srb_t *sp, int res)
+@@ -168,7 +174,6 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+ struct srb_iocb *abt_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+- uint8_t bail;
+
+ /* ref: INIT for ABTS command */
+ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
+@@ -176,7 +181,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+ if (!sp)
+ return QLA_MEMORY_ALLOC_FAILED;
+
+- QLA_VHA_MARK_BUSY(vha, bail);
++ qla_vha_mark_busy(vha);
+ abt_iocb = &sp->u.iocb_cmd;
+ sp->type = SRB_ABT_CMD;
+ sp->name = "abort";
+@@ -2020,14 +2025,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+ struct srb_iocb *tm_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+- uint8_t bail;
+
+ /* ref: INIT */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+- QLA_VHA_MARK_BUSY(vha, bail);
++ qla_vha_mark_busy(vha);
+ sp->type = SRB_TM_CMD;
+ sp->name = "tmf";
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index db17f7f410cdd..5185dc5daf80d 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -225,11 +225,9 @@ static inline srb_t *
+ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
+ {
+ srb_t *sp = NULL;
+- uint8_t bail;
+ struct qla_qpair *qpair;
+
+- QLA_VHA_MARK_BUSY(vha, bail);
+- if (unlikely(bail))
++ if (unlikely(qla_vha_mark_busy(vha)))
+ return NULL;
+
+ qpair = vha->hw->base_qpair;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 2c85f3cce7264..96ba1398f20c1 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -5069,13 +5069,11 @@ struct qla_work_evt *
+ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
+ {
+ struct qla_work_evt *e;
+- uint8_t bail;
+
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return NULL;
+
+- QLA_VHA_MARK_BUSY(vha, bail);
+- if (bail)
++ if (qla_vha_mark_busy(vha))
+ return NULL;
+
+ e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index bebda917b1383..b77035ddc9440 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -3785,7 +3785,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+- lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
++ lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
+ if (lrdp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ if (sdebug_verbose)
+@@ -4436,7 +4436,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+ if (ret)
+ return ret;
+
+- arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
++ arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+@@ -4504,7 +4504,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
+
+ rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
+
+- arr = kzalloc(alloc_len, GFP_ATOMIC);
++ arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+@@ -7340,7 +7340,10 @@ clean:
+ kfree(sdbg_devinfo->zstate);
+ kfree(sdbg_devinfo);
+ }
+- kfree(sdbg_host);
++ if (sdbg_host->dev.release)
++ put_device(&sdbg_host->dev);
++ else
++ kfree(sdbg_host);
+ pr_warn("%s: failed, errno=%d\n", __func__, -error);
+ return error;
+ }
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 6995c89792300..02520f9123066 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -343,19 +343,11 @@ enum blk_eh_timer_return scsi_timeout(struct request *req)
+
+ if (rtn == BLK_EH_DONE) {
+ /*
+- * Set the command to complete first in order to prevent a real
+- * completion from releasing the command while error handling
+- * is using it. If the command was already completed, then the
+- * lower level driver beat the timeout handler, and it is safe
+- * to return without escalating error recovery.
+- *
+- * If timeout handling lost the race to a real completion, the
+- * block layer may ignore that due to a fake timeout injection,
+- * so return RESET_TIMER to allow error handling another shot
+- * at this command.
++ * If scsi_done() has already set SCMD_STATE_COMPLETE, do not
++ * modify *scmd.
+ */
+ if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state))
+- return BLK_EH_RESET_TIMER;
++ return BLK_EH_DONE;
+ if (scsi_abort_command(scmd) != SUCCESS) {
+ set_host_byte(scmd, DID_TIME_OUT);
+ scsi_eh_scmd_add(scmd);
+diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
+index e550b12e525a1..c8235f15728bb 100644
+--- a/drivers/scsi/smartpqi/smartpqi.h
++++ b/drivers/scsi/smartpqi/smartpqi.h
+@@ -1130,7 +1130,7 @@ struct pqi_scsi_dev {
+ u8 phy_id;
+ u8 ncq_prio_enable;
+ u8 ncq_prio_support;
+- u8 multi_lun_device_lun_count;
++ u8 lun_count;
+ bool raid_bypass_configured; /* RAID bypass configured */
+ bool raid_bypass_enabled; /* RAID bypass enabled */
+ u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index b971fbe3b3a17..9f0f69c1ed665 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -1610,9 +1610,7 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
+ &id_phys->alternate_paths_phys_connector,
+ sizeof(device->phys_connector));
+ device->bay = id_phys->phys_bay_in_box;
+- device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
+- if (!device->multi_lun_device_lun_count)
+- device->multi_lun_device_lun_count = 1;
++ device->lun_count = id_phys->multi_lun_device_lun_count;
+ if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
+ id_phys->phy_count)
+ device->phy_id =
+@@ -1746,7 +1744,7 @@ out:
+ return offline;
+ }
+
+-static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
++static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device,
+ struct bmic_identify_physical_device *id_phys)
+ {
+@@ -1763,6 +1761,20 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+ return rc;
+ }
+
++static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
++ struct pqi_scsi_dev *device,
++ struct bmic_identify_physical_device *id_phys)
++{
++ int rc;
++
++ rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
++
++ if (rc == 0 && device->lun_count == 0)
++ device->lun_count = 1;
++
++ return rc;
++}
++
+ static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+ {
+@@ -1897,7 +1909,7 @@ static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi
+ int rc;
+ int lun;
+
+- for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
++ for (lun = 0; lun < device->lun_count; lun++) {
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
+ PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
+ if (rc)
+@@ -2076,6 +2088,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
+ existing_device->sas_address = new_device->sas_address;
+ existing_device->queue_depth = new_device->queue_depth;
+ existing_device->device_offline = false;
++ existing_device->lun_count = new_device->lun_count;
+
+ if (pqi_is_logical_device(existing_device)) {
+ existing_device->is_external_raid_device = new_device->is_external_raid_device;
+@@ -2108,10 +2121,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
+ existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
+ memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
+ memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
+-
+- existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
+- if (existing_device->multi_lun_device_lun_count == 0)
+- existing_device->multi_lun_device_lun_count = 1;
+ }
+ }
+
+@@ -6484,6 +6493,12 @@ static void pqi_slave_destroy(struct scsi_device *sdev)
+ return;
+ }
+
++ device->lun_count--;
++ if (device->lun_count > 0) {
++ mutex_unlock(&ctrl_info->scan_mutex);
++ return;
++ }
++
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_del(&device->scsi_device_list_entry);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+@@ -9302,6 +9317,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1109)
+ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x193d, 0x110b)
++ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x8460)
+@@ -9402,6 +9421,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0072)
+ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1bd4, 0x0086)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1bd4, 0x0087)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1bd4, 0x0088)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1bd4, 0x0089)
++ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x19e5, 0xd227)
+@@ -9650,6 +9685,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1474)
+ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ PCI_VENDOR_ID_ADAPTEC2, 0x1475)
++ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1480)
+@@ -9706,6 +9745,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
+ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
++ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
+@@ -9942,6 +9989,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0623)
+ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1e93, 0x1000)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1e93, 0x1001)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1e93, 0x1002)
++ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_ANY_ID, PCI_ANY_ID)
+diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
+index 9b2b5f8c23b9a..8fbf3c1b1311d 100644
+--- a/drivers/scsi/snic/snic_disc.c
++++ b/drivers/scsi/snic/snic_disc.c
+@@ -304,6 +304,9 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
+ ret);
+
+ put_device(&snic->shost->shost_gendev);
++ spin_lock_irqsave(snic->shost->host_lock, flags);
++ list_del(&tgt->list);
++ spin_unlock_irqrestore(snic->shost->host_lock, flags);
+ kfree(tgt);
+ tgt = NULL;
+
+diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
+index 031ec4aa06d55..8ec74d7539eb4 100644
+--- a/drivers/soc/apple/rtkit.c
++++ b/drivers/soc/apple/rtkit.c
+@@ -926,8 +926,10 @@ int apple_rtkit_wake(struct apple_rtkit *rtk)
+ }
+ EXPORT_SYMBOL_GPL(apple_rtkit_wake);
+
+-static void apple_rtkit_free(struct apple_rtkit *rtk)
++static void apple_rtkit_free(void *data)
+ {
++ struct apple_rtkit *rtk = data;
++
+ mbox_free_channel(rtk->mbox_chan);
+ destroy_workqueue(rtk->wq);
+
+@@ -950,8 +952,7 @@ struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
+ if (IS_ERR(rtk))
+ return rtk;
+
+- ret = devm_add_action_or_reset(dev, (void (*)(void *))apple_rtkit_free,
+- rtk);
++ ret = devm_add_action_or_reset(dev, apple_rtkit_free, rtk);
+ if (ret)
+ return ERR_PTR(ret);
+
+diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c
+index 83804b16ad03d..afa1117368997 100644
+--- a/drivers/soc/apple/sart.c
++++ b/drivers/soc/apple/sart.c
+@@ -164,6 +164,11 @@ static int apple_sart_probe(struct platform_device *pdev)
+ return 0;
+ }
+
++static void apple_sart_put_device(void *dev)
++{
++ put_device(dev);
++}
++
+ struct apple_sart *devm_apple_sart_get(struct device *dev)
+ {
+ struct device_node *sart_node;
+@@ -187,7 +192,7 @@ struct apple_sart *devm_apple_sart_get(struct device *dev)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+- ret = devm_add_action_or_reset(dev, (void (*)(void *))put_device,
++ ret = devm_add_action_or_reset(dev, apple_sart_put_device,
+ &sart_pdev->dev);
+ if (ret)
+ return ERR_PTR(ret);
+diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
+index 09e3c38b84664..474b272f9b02d 100644
+--- a/drivers/soc/mediatek/mtk-pm-domains.c
++++ b/drivers/soc/mediatek/mtk-pm-domains.c
+@@ -275,9 +275,9 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
+ clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+
+ /* subsys power off */
+- regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
++ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+
+diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
+index b4046f393575e..cd44f17dad3d0 100644
+--- a/drivers/soc/qcom/apr.c
++++ b/drivers/soc/qcom/apr.c
+@@ -454,11 +454,19 @@ static int apr_add_device(struct device *dev, struct device_node *np,
+ adev->dev.driver = NULL;
+
+ spin_lock(&apr->svcs_lock);
+- idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC);
++ ret = idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC);
+ spin_unlock(&apr->svcs_lock);
++ if (ret < 0) {
++ dev_err(dev, "idr_alloc failed: %d\n", ret);
++ goto out;
++ }
+
+- of_property_read_string_index(np, "qcom,protection-domain",
+- 1, &adev->service_path);
++ ret = of_property_read_string_index(np, "qcom,protection-domain",
++ 1, &adev->service_path);
++ if (ret < 0) {
++ dev_err(dev, "Failed to read second value of qcom,protection-domain\n");
++ goto out;
++ }
+
+ dev_info(dev, "Adding APR/GPR dev: %s\n", dev_name(&adev->dev));
+
+@@ -468,6 +476,7 @@ static int apr_add_device(struct device *dev, struct device_node *np,
+ put_device(&adev->dev);
+ }
+
++out:
+ return ret;
+ }
+
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 8b7e8118f3cec..82c3cfdcc5601 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -849,7 +849,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ if (ret)
+ goto err;
+
+- drv_data->ecc_irq = platform_get_irq(pdev, 0);
++ drv_data->ecc_irq = platform_get_irq_optional(pdev, 0);
+ if (drv_data->ecc_irq >= 0) {
+ llcc_edac = platform_device_register_data(&pdev->dev,
+ "qcom_llcc_edac", -1, drv_data,
+diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/soc/sifive/sifive_ccache.c
+index 1c171150e878d..3684f5b40a80e 100644
+--- a/drivers/soc/sifive/sifive_ccache.c
++++ b/drivers/soc/sifive/sifive_ccache.c
+@@ -215,20 +215,27 @@ static int __init sifive_ccache_init(void)
+ if (!np)
+ return -ENODEV;
+
+- if (of_address_to_resource(np, 0, &res))
+- return -ENODEV;
++ if (of_address_to_resource(np, 0, &res)) {
++ rc = -ENODEV;
++ goto err_node_put;
++ }
+
+ ccache_base = ioremap(res.start, resource_size(&res));
+- if (!ccache_base)
+- return -ENOMEM;
++ if (!ccache_base) {
++ rc = -ENOMEM;
++ goto err_node_put;
++ }
+
+- if (of_property_read_u32(np, "cache-level", &level))
+- return -ENOENT;
++ if (of_property_read_u32(np, "cache-level", &level)) {
++ rc = -ENOENT;
++ goto err_unmap;
++ }
+
+ intr_num = of_property_count_u32_elems(np, "interrupts");
+ if (!intr_num) {
+ pr_err("No interrupts property\n");
+- return -ENODEV;
++ rc = -ENODEV;
++ goto err_unmap;
+ }
+
+ for (i = 0; i < intr_num; i++) {
+@@ -237,9 +244,10 @@ static int __init sifive_ccache_init(void)
+ NULL);
+ if (rc) {
+ pr_err("Could not request IRQ %d\n", g_irq[i]);
+- return rc;
++ goto err_free_irq;
+ }
+ }
++ of_node_put(np);
+
+ ccache_config_read();
+
+@@ -250,6 +258,15 @@ static int __init sifive_ccache_init(void)
+ setup_sifive_debug();
+ #endif
+ return 0;
++
++err_free_irq:
++ while (--i >= 0)
++ free_irq(g_irq[i], NULL);
++err_unmap:
++ iounmap(ccache_base);
++err_node_put:
++ of_node_put(np);
++ return rc;
+ }
+
+ device_initcall(sifive_ccache_init);
+diff --git a/drivers/soc/tegra/cbb/tegra194-cbb.c b/drivers/soc/tegra/cbb/tegra194-cbb.c
+index 1ae0bd9a1ac1b..2e952c6f7c9e3 100644
+--- a/drivers/soc/tegra/cbb/tegra194-cbb.c
++++ b/drivers/soc/tegra/cbb/tegra194-cbb.c
+@@ -102,8 +102,6 @@
+ #define CLUSTER_NOC_VQC GENMASK(17, 16)
+ #define CLUSTER_NOC_MSTR_ID GENMASK(21, 18)
+
+-#define USRBITS_MSTR_ID GENMASK(21, 18)
+-
+ #define CBB_ERR_OPC GENMASK(4, 1)
+ #define CBB_ERR_ERRCODE GENMASK(10, 8)
+ #define CBB_ERR_LEN1 GENMASK(27, 16)
+@@ -2038,15 +2036,17 @@ static irqreturn_t tegra194_cbb_err_isr(int irq, void *data)
+ smp_processor_id(), priv->noc->name, priv->res->start,
+ irq);
+
+- mstr_id = FIELD_GET(USRBITS_MSTR_ID, priv->errlog5) - 1;
+ is_fatal = print_errlog(NULL, priv, status);
+
+ /*
+- * If illegal request is from CCPLEX(0x1)
+- * initiator then call BUG() to crash system.
++ * If illegal request is from CCPLEX(0x1) initiator
++ * and error is fatal then call BUG() to crash system.
+ */
+- if ((mstr_id == 0x1) && priv->noc->erd_mask_inband_err)
+- is_inband_err = 1;
++ if (priv->noc->erd_mask_inband_err) {
++ mstr_id = FIELD_GET(CBB_NOC_MSTR_ID, priv->errlog5);
++ if (mstr_id == 0x1)
++ is_inband_err = 1;
++ }
+ }
+ }
+
+diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
+index 3528f9e15d5c0..f33d094e5ea60 100644
+--- a/drivers/soc/tegra/cbb/tegra234-cbb.c
++++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
+@@ -72,6 +72,11 @@
+
+ #define REQ_SOCKET_ID GENMASK(27, 24)
+
++#define CCPLEX_MSTRID 0x1
++#define FIREWALL_APERTURE_SZ 0x10000
++/* Write firewall check enable */
++#define WEN 0x20000
++
+ enum tegra234_cbb_fabric_ids {
+ CBB_FAB_ID,
+ SCE_FAB_ID,
+@@ -92,11 +97,15 @@ struct tegra234_slave_lookup {
+ struct tegra234_cbb_fabric {
+ const char *name;
+ phys_addr_t off_mask_erd;
+- bool erd_mask_inband_err;
++ phys_addr_t firewall_base;
++ unsigned int firewall_ctl;
++ unsigned int firewall_wr_ctl;
+ const char * const *master_id;
+ unsigned int notifier_offset;
+ const struct tegra_cbb_error *errors;
++ const int max_errors;
+ const struct tegra234_slave_lookup *slave_map;
++ const int max_slaves;
+ };
+
+ struct tegra234_cbb {
+@@ -128,6 +137,44 @@ static inline struct tegra234_cbb *to_tegra234_cbb(struct tegra_cbb *cbb)
+ static LIST_HEAD(cbb_list);
+ static DEFINE_SPINLOCK(cbb_lock);
+
++static bool
++tegra234_cbb_write_access_allowed(struct platform_device *pdev, struct tegra234_cbb *cbb)
++{
++ u32 val;
++
++ if (!cbb->fabric->firewall_base ||
++ !cbb->fabric->firewall_ctl ||
++ !cbb->fabric->firewall_wr_ctl) {
++ dev_info(&pdev->dev, "SoC data missing for firewall\n");
++ return false;
++ }
++
++ if ((cbb->fabric->firewall_ctl > FIREWALL_APERTURE_SZ) ||
++ (cbb->fabric->firewall_wr_ctl > FIREWALL_APERTURE_SZ)) {
++ dev_err(&pdev->dev, "wrong firewall offset value\n");
++ return false;
++ }
++
++ val = readl(cbb->regs + cbb->fabric->firewall_base + cbb->fabric->firewall_ctl);
++ /*
++ * If the firewall check feature for allowing or blocking the
++ * write accesses through the firewall of a fabric is disabled
++ * then CCPLEX can write to the registers of that fabric.
++ */
++ if (!(val & WEN))
++ return true;
++
++ /*
++ * If the firewall check is enabled then check whether CCPLEX
++ * has write access to the fabric's error notifier registers
++ */
++ val = readl(cbb->regs + cbb->fabric->firewall_base + cbb->fabric->firewall_wr_ctl);
++ if (val & (BIT(CCPLEX_MSTRID)))
++ return true;
++
++ return false;
++}
++
+ static void tegra234_cbb_fault_enable(struct tegra_cbb *cbb)
+ {
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+@@ -271,6 +318,12 @@ static void tegra234_cbb_print_error(struct seq_file *file, struct tegra234_cbb
+ tegra_cbb_print_err(file, "\t Multiple type of errors reported\n");
+
+ while (status) {
++ if (type >= cbb->fabric->max_errors) {
++ tegra_cbb_print_err(file, "\t Wrong type index:%u, status:%u\n",
++ type, status);
++ return;
++ }
++
+ if (status & 0x1)
+ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+ cbb->fabric->errors[type].code);
+@@ -282,6 +335,12 @@ static void tegra234_cbb_print_error(struct seq_file *file, struct tegra234_cbb
+ type = 0;
+
+ while (overflow) {
++ if (type >= cbb->fabric->max_errors) {
++ tegra_cbb_print_err(file, "\t Wrong type index:%u, overflow:%u\n",
++ type, overflow);
++ return;
++ }
++
+ if (overflow & 0x1)
+ tegra_cbb_print_err(file, "\t Overflow\t\t: Multiple %s\n",
+ cbb->fabric->errors[type].code);
+@@ -334,8 +393,11 @@ static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
+ access_type = FIELD_GET(FAB_EM_EL_ACCESSTYPE, cbb->mn_attr0);
+
+ tegra_cbb_print_err(file, "\n");
+- tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+- cbb->fabric->errors[cbb->type].code);
++ if (cbb->type < cbb->fabric->max_errors)
++ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
++ cbb->fabric->errors[cbb->type].code);
++ else
++ tegra_cbb_print_err(file, "\t Wrong type index:%u\n", cbb->type);
+
+ tegra_cbb_print_err(file, "\t MASTER_ID\t\t: %s\n", cbb->fabric->master_id[mstr_id]);
+ tegra_cbb_print_err(file, "\t Address\t\t: %#llx\n", cbb->access);
+@@ -374,6 +436,11 @@ static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
+ if ((fab_id == PSC_FAB_ID) || (fab_id == FSI_FAB_ID))
+ return;
+
++ if (slave_id >= cbb->fabric->max_slaves) {
++ tegra_cbb_print_err(file, "\t Invalid slave_id:%d\n", slave_id);
++ return;
++ }
++
+ if (!strcmp(cbb->fabric->errors[cbb->type].code, "TIMEOUT_ERR")) {
+ tegra234_lookup_slave_timeout(file, cbb, slave_id, fab_id);
+ return;
+@@ -517,7 +584,7 @@ static irqreturn_t tegra234_cbb_isr(int irq, void *data)
+ u32 status = tegra_cbb_get_status(cbb);
+
+ if (status && (irq == priv->sec_irq)) {
+- tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@%llx, irq=%d\n",
++ tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@0x%llx, irq=%d\n",
+ smp_processor_id(), priv->fabric->name,
+ priv->res->start, irq);
+
+@@ -525,14 +592,14 @@ static irqreturn_t tegra234_cbb_isr(int irq, void *data)
+ if (err)
+ goto unlock;
+
+- mstr_id = FIELD_GET(USRBITS_MSTR_ID, priv->mn_user_bits);
+-
+ /*
+- * If illegal request is from CCPLEX(id:0x1) master then call BUG() to
+- * crash system.
++ * If illegal request is from CCPLEX(id:0x1) master then call WARN()
+ */
+- if ((mstr_id == 0x1) && priv->fabric->off_mask_erd)
+- is_inband_err = 1;
++ if (priv->fabric->off_mask_erd) {
++ mstr_id = FIELD_GET(USRBITS_MSTR_ID, priv->mn_user_bits);
++ if (mstr_id == CCPLEX_MSTRID)
++ is_inband_err = 1;
++ }
+ }
+ }
+
+@@ -640,8 +707,13 @@ static const struct tegra234_cbb_fabric tegra234_aon_fabric = {
+ .name = "aon-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_aon_slave_map,
++ .max_slaves = ARRAY_SIZE(tegra234_aon_slave_map),
+ .errors = tegra234_cbb_errors,
++ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x17000,
++ .firewall_base = 0x30000,
++ .firewall_ctl = 0x8d0,
++ .firewall_wr_ctl = 0x8c8,
+ };
+
+ static const struct tegra234_slave_lookup tegra234_bpmp_slave_map[] = {
+@@ -656,8 +728,13 @@ static const struct tegra234_cbb_fabric tegra234_bpmp_fabric = {
+ .name = "bpmp-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_bpmp_slave_map,
++ .max_slaves = ARRAY_SIZE(tegra234_bpmp_slave_map),
+ .errors = tegra234_cbb_errors,
++ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x19000,
++ .firewall_base = 0x30000,
++ .firewall_ctl = 0x8f0,
++ .firewall_wr_ctl = 0x8e8,
+ };
+
+ static const struct tegra234_slave_lookup tegra234_cbb_slave_map[] = {
+@@ -728,55 +805,62 @@ static const struct tegra234_cbb_fabric tegra234_cbb_fabric = {
+ .name = "cbb-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_cbb_slave_map,
++ .max_slaves = ARRAY_SIZE(tegra234_cbb_slave_map),
+ .errors = tegra234_cbb_errors,
++ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x60000,
+- .off_mask_erd = 0x3a004
++ .off_mask_erd = 0x3a004,
++ .firewall_base = 0x10000,
++ .firewall_ctl = 0x23f0,
++ .firewall_wr_ctl = 0x23e8,
+ };
+
+-static const struct tegra234_slave_lookup tegra234_dce_slave_map[] = {
++static const struct tegra234_slave_lookup tegra234_common_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
++ { "CBB", 0x17000 },
++ { "RSVD", 0x00000 },
+ { "CPU", 0x18000 },
+ };
+
+ static const struct tegra234_cbb_fabric tegra234_dce_fabric = {
+ .name = "dce-fabric",
+ .master_id = tegra234_master_id,
+- .slave_map = tegra234_dce_slave_map,
++ .slave_map = tegra234_common_slave_map,
++ .max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ .errors = tegra234_cbb_errors,
++ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x19000,
+-};
+-
+-static const struct tegra234_slave_lookup tegra234_rce_slave_map[] = {
+- { "AXI2APB", 0x00000 },
+- { "AST0", 0x15000 },
+- { "AST1", 0x16000 },
+- { "CPU", 0x18000 },
++ .firewall_base = 0x30000,
++ .firewall_ctl = 0x290,
++ .firewall_wr_ctl = 0x288,
+ };
+
+ static const struct tegra234_cbb_fabric tegra234_rce_fabric = {
+ .name = "rce-fabric",
+ .master_id = tegra234_master_id,
+- .slave_map = tegra234_rce_slave_map,
++ .slave_map = tegra234_common_slave_map,
++ .max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ .errors = tegra234_cbb_errors,
++ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x19000,
+-};
+-
+-static const struct tegra234_slave_lookup tegra234_sce_slave_map[] = {
+- { "AXI2APB", 0x00000 },
+- { "AST0", 0x15000 },
+- { "AST1", 0x16000 },
+- { "CBB", 0x17000 },
+- { "CPU", 0x18000 },
++ .firewall_base = 0x30000,
++ .firewall_ctl = 0x290,
++ .firewall_wr_ctl = 0x288,
+ };
+
+ static const struct tegra234_cbb_fabric tegra234_sce_fabric = {
+ .name = "sce-fabric",
+ .master_id = tegra234_master_id,
+- .slave_map = tegra234_sce_slave_map,
++ .slave_map = tegra234_common_slave_map,
++ .max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ .errors = tegra234_cbb_errors,
++ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x19000,
++ .firewall_base = 0x30000,
++ .firewall_ctl = 0x290,
++ .firewall_wr_ctl = 0x288,
+ };
+
+ static const char * const tegra241_master_id[] = {
+@@ -889,7 +973,7 @@ static const struct tegra_cbb_error tegra241_cbb_errors[] = {
+ };
+
+ static const struct tegra234_slave_lookup tegra241_cbb_slave_map[] = {
+- { "CCPLEX", 0x50000 },
++ { "RSVD", 0x00000 },
+ { "PCIE_C8", 0x51000 },
+ { "PCIE_C9", 0x52000 },
+ { "RSVD", 0x00000 },
+@@ -942,20 +1026,30 @@ static const struct tegra234_slave_lookup tegra241_cbb_slave_map[] = {
+ { "PCIE_C3", 0x58000 },
+ { "PCIE_C0", 0x59000 },
+ { "PCIE_C1", 0x5a000 },
++ { "CCPLEX", 0x50000 },
+ { "AXI2APB_29", 0x85000 },
+ { "AXI2APB_30", 0x86000 },
++ { "CBB_CENTRAL", 0x00000 },
++ { "AXI2APB_31", 0x8E000 },
++ { "AXI2APB_32", 0x8F000 },
+ };
+
+ static const struct tegra234_cbb_fabric tegra241_cbb_fabric = {
+ .name = "cbb-fabric",
+ .master_id = tegra241_master_id,
+ .slave_map = tegra241_cbb_slave_map,
++ .max_slaves = ARRAY_SIZE(tegra241_cbb_slave_map),
+ .errors = tegra241_cbb_errors,
++ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .notifier_offset = 0x60000,
+ .off_mask_erd = 0x40004,
++ .firewall_base = 0x20000,
++ .firewall_ctl = 0x2370,
++ .firewall_wr_ctl = 0x2368,
+ };
+
+ static const struct tegra234_slave_lookup tegra241_bpmp_slave_map[] = {
++ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "CBB", 0x15000 },
+@@ -969,8 +1063,13 @@ static const struct tegra234_cbb_fabric tegra241_bpmp_fabric = {
+ .name = "bpmp-fabric",
+ .master_id = tegra241_master_id,
+ .slave_map = tegra241_bpmp_slave_map,
++ .max_slaves = ARRAY_SIZE(tegra241_bpmp_slave_map),
+ .errors = tegra241_cbb_errors,
++ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .notifier_offset = 0x19000,
++ .firewall_base = 0x30000,
++ .firewall_ctl = 0x8f0,
++ .firewall_wr_ctl = 0x8e8,
+ };
+
+ static const struct of_device_id tegra234_cbb_dt_ids[] = {
+@@ -1055,6 +1154,15 @@ static int tegra234_cbb_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, cbb);
+
++ /*
++ * Don't enable error reporting for a Fabric if write to it's registers
++ * is blocked by CBB firewall.
++ */
++ if (!tegra234_cbb_write_access_allowed(pdev, cbb)) {
++ dev_info(&pdev->dev, "error reporting not enabled due to firewall\n");
++ return 0;
++ }
++
+ spin_lock_irqsave(&cbb_lock, flags);
+ list_add(&cbb->base.node, &cbb_list);
+ spin_unlock_irqrestore(&cbb_lock, flags);
+diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
+index 92af7d1b6f5bd..8fb76908be704 100644
+--- a/drivers/soc/ti/knav_qmss_queue.c
++++ b/drivers/soc/ti/knav_qmss_queue.c
+@@ -67,7 +67,7 @@ static DEFINE_MUTEX(knav_dev_lock);
+ * Newest followed by older ones. Search is done from start of the array
+ * until a firmware file is found.
+ */
+-const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
++static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
+
+ static bool device_ready;
+ bool knav_qmss_device_ready(void)
+@@ -1785,6 +1785,7 @@ static int knav_queue_probe(struct platform_device *pdev)
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
++ pm_runtime_disable(&pdev->dev);
+ dev_err(dev, "Failed to enable QMSS\n");
+ return ret;
+ }
+diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
+index ad2bb72e640c8..6a389a6444f36 100644
+--- a/drivers/soc/ti/smartreflex.c
++++ b/drivers/soc/ti/smartreflex.c
+@@ -932,6 +932,7 @@ static int omap_sr_probe(struct platform_device *pdev)
+ err_debugfs:
+ debugfs_remove_recursive(sr_info->dbg_dir);
+ err_list_del:
++ pm_runtime_disable(&pdev->dev);
+ list_del(&sr_info->node);
+ clk_unprepare(sr_info->fck);
+
+diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
+index f81cdd83ec26e..7969881f126dc 100644
+--- a/drivers/soundwire/dmi-quirks.c
++++ b/drivers/soundwire/dmi-quirks.c
+@@ -90,6 +90,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ },
+ .driver_data = (void *)intel_tgl_bios,
+ },
++ {
++ /* quirk used for NUC15 LAPBC710 skew */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_BOARD_NAME, "LAPBC710"),
++ },
++ .driver_data = (void *)intel_tgl_bios,
++ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 731624f157fc0..93152144fd2ec 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -333,13 +333,26 @@ static int fsl_spi_prepare_message(struct spi_controller *ctlr,
+ {
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(ctlr);
+ struct spi_transfer *t;
++ struct spi_transfer *first;
++
++ first = list_first_entry(&m->transfers, struct spi_transfer,
++ transfer_list);
+
+ /*
+ * In CPU mode, optimize large byte transfers to use larger
+ * bits_per_word values to reduce number of interrupts taken.
++ *
++ * Some glitches can appear on the SPI clock when the mode changes.
++ * Check that there is no speed change during the transfer and set it up
++ * now to change the mode without having a chip-select asserted.
+ */
+- if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+- list_for_each_entry(t, &m->transfers, transfer_list) {
++ list_for_each_entry(t, &m->transfers, transfer_list) {
++ if (t->speed_hz != first->speed_hz) {
++ dev_err(&m->spi->dev,
++ "speed_hz cannot change during message.\n");
++ return -EINVAL;
++ }
++ if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+ if (t->len < 256 || t->bits_per_word != 8)
+ continue;
+ if ((t->len & 3) == 0)
+@@ -348,7 +361,7 @@ static int fsl_spi_prepare_message(struct spi_controller *ctlr,
+ t->bits_per_word = 16;
+ }
+ }
+- return 0;
++ return fsl_spi_setup_transfer(m->spi, first);
+ }
+
+ static int fsl_spi_transfer_one(struct spi_controller *controller,
+diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
+index 4b12c4964a664..9c8c7948044ed 100644
+--- a/drivers/spi/spi-gpio.c
++++ b/drivers/spi/spi-gpio.c
+@@ -268,9 +268,19 @@ static int spi_gpio_set_direction(struct spi_device *spi, bool output)
+ if (output)
+ return gpiod_direction_output(spi_gpio->mosi, 1);
+
+- ret = gpiod_direction_input(spi_gpio->mosi);
+- if (ret)
+- return ret;
++ /*
++ * Only change MOSI to an input if using 3WIRE mode.
++ * Otherwise, MOSI could be left floating if there is
++ * no pull resistor connected to the I/O pin, or could
++ * be left logic high if there is a pull-up. Transmitting
++ * logic high when only clocking MISO data in can put some
++ * SPI devices in to a bad state.
++ */
++ if (spi->mode & SPI_3WIRE) {
++ ret = gpiod_direction_input(spi_gpio->mosi);
++ if (ret)
++ return ret;
++ }
+ /*
+ * Send a turnaround high impedance cycle when switching
+ * from output to input. Theoretically there should be
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index b2775d82d2d7b..6313e7d0cdf87 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -377,12 +377,23 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ switch (cmd) {
+ /* read requests */
+ case SPI_IOC_RD_MODE:
+- retval = put_user(spi->mode & SPI_MODE_MASK,
+- (__u8 __user *)arg);
+- break;
+ case SPI_IOC_RD_MODE32:
+- retval = put_user(spi->mode & SPI_MODE_MASK,
+- (__u32 __user *)arg);
++ tmp = spi->mode;
++
++ {
++ struct spi_controller *ctlr = spi->controller;
++
++ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
++ ctlr->cs_gpiods[spi->chip_select])
++ tmp &= ~SPI_CS_HIGH;
++ }
++
++ if (cmd == SPI_IOC_RD_MODE)
++ retval = put_user(tmp & SPI_MODE_MASK,
++ (__u8 __user *)arg);
++ else
++ retval = put_user(tmp & SPI_MODE_MASK,
++ (__u32 __user *)arg);
+ break;
+ case SPI_IOC_RD_LSB_FIRST:
+ retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
+diff --git a/drivers/staging/media/deprecated/stkwebcam/Kconfig b/drivers/staging/media/deprecated/stkwebcam/Kconfig
+index 4450403dff41f..7234498e634ac 100644
+--- a/drivers/staging/media/deprecated/stkwebcam/Kconfig
++++ b/drivers/staging/media/deprecated/stkwebcam/Kconfig
+@@ -2,7 +2,7 @@
+ config VIDEO_STKWEBCAM
+ tristate "USB Syntek DC1125 Camera support (DEPRECATED)"
+ depends on VIDEO_DEV
+- depends on USB
++ depends on MEDIA_USB_SUPPORT && MEDIA_CAMERA_SUPPORT
+ help
+ Say Y here if you want to use this type of camera.
+ Supported devices are typically found in some Asus laptops,
+diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
+index e5b550ccfa22d..c77401f184d74 100644
+--- a/drivers/staging/media/imx/imx7-media-csi.c
++++ b/drivers/staging/media/imx/imx7-media-csi.c
+@@ -521,9 +521,9 @@ static void imx7_csi_configure(struct imx7_csi *csi)
+ cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ cr18 &= ~(BIT_CSI_HW_ENABLE | BIT_MIPI_DATA_FORMAT_MASK |
+- BIT_DATA_FROM_MIPI | BIT_BASEADDR_CHG_ERR_EN |
+- BIT_BASEADDR_SWITCH_EN | BIT_BASEADDR_SWITCH_SEL |
+- BIT_DEINTERLACE_EN);
++ BIT_DATA_FROM_MIPI | BIT_MIPI_DOUBLE_CMPNT |
++ BIT_BASEADDR_CHG_ERR_EN | BIT_BASEADDR_SWITCH_SEL |
++ BIT_BASEADDR_SWITCH_EN | BIT_DEINTERLACE_EN);
+
+ if (out_pix->field == V4L2_FIELD_INTERLACED) {
+ cr18 |= BIT_DEINTERLACE_EN;
+diff --git a/drivers/staging/media/rkvdec/rkvdec-vp9.c b/drivers/staging/media/rkvdec/rkvdec-vp9.c
+index d8c1c0db15c70..cfae99b40ccb4 100644
+--- a/drivers/staging/media/rkvdec/rkvdec-vp9.c
++++ b/drivers/staging/media/rkvdec/rkvdec-vp9.c
+@@ -84,6 +84,8 @@ struct rkvdec_vp9_probs {
+ struct rkvdec_vp9_inter_frame_probs inter;
+ struct rkvdec_vp9_intra_only_frame_probs intra_only;
+ };
++ /* 128 bit alignment */
++ u8 padding1[11];
+ };
+
+ /* Data structure describing auxiliary buffer format. */
+@@ -1006,6 +1008,7 @@ static int rkvdec_vp9_start(struct rkvdec_ctx *ctx)
+
+ ctx->priv = vp9_ctx;
+
++ BUILD_BUG_ON(sizeof(priv_tbl->probs) % 16); /* ensure probs size is 128-bit aligned */
+ priv_tbl = dma_alloc_coherent(rkvdec->dev, sizeof(*priv_tbl),
+ &vp9_ctx->priv_tbl.dma, GFP_KERNEL);
+ if (!priv_tbl) {
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+index 4952fc17f3e6d..625f77a8c5bde 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+@@ -242,6 +242,18 @@ static void cedrus_h265_skip_bits(struct cedrus_dev *dev, int num)
+ }
+ }
+
++static u32 cedrus_h265_show_bits(struct cedrus_dev *dev, int num)
++{
++ cedrus_write(dev, VE_DEC_H265_TRIGGER,
++ VE_DEC_H265_TRIGGER_SHOW_BITS |
++ VE_DEC_H265_TRIGGER_TYPE_N_BITS(num));
++
++ cedrus_wait_for(dev, VE_DEC_H265_STATUS,
++ VE_DEC_H265_STATUS_VLD_BUSY);
++
++ return cedrus_read(dev, VE_DEC_H265_BITS_READ);
++}
++
+ static void cedrus_h265_write_scaling_list(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+ {
+@@ -406,7 +418,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ u32 num_entry_point_offsets;
+ u32 output_pic_list_index;
+ u32 pic_order_cnt[2];
+- u8 *padding;
++ u8 padding;
+ int count;
+ u32 reg;
+
+@@ -520,21 +532,22 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ if (slice_params->data_byte_offset == 0)
+ return -EOPNOTSUPP;
+
+- padding = (u8 *)vb2_plane_vaddr(&run->src->vb2_buf, 0) +
+- slice_params->data_byte_offset - 1;
++ cedrus_h265_skip_bits(dev, (slice_params->data_byte_offset - 1) * 8);
++
++ padding = cedrus_h265_show_bits(dev, 8);
+
+ /* at least one bit must be set in that byte */
+- if (*padding == 0)
++ if (padding == 0)
+ return -EINVAL;
+
+ for (count = 0; count < 8; count++)
+- if (*padding & (1 << count))
++ if (padding & (1 << count))
+ break;
+
+ /* Include the one bit. */
+ count++;
+
+- cedrus_h265_skip_bits(dev, slice_params->data_byte_offset * 8 - count);
++ cedrus_h265_skip_bits(dev, 8 - count);
+
+ /* Bitstream parameters. */
+
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+index d81f7513ade0d..655c05b389cf5 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+@@ -505,6 +505,8 @@
+ #define VE_DEC_H265_LOW_ADDR_ENTRY_POINTS_BUF(a) \
+ SHIFT_AND_MASK_BITS(a, 7, 0)
+
++#define VE_DEC_H265_BITS_READ (VE_ENGINE_DEC_H265 + 0xdc)
++
+ #define VE_DEC_H265_SRAM_OFFSET (VE_ENGINE_DEC_H265 + 0xe0)
+
+ #define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0 0x00
+diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
+index 1e316e6358ea2..48c5db69929c3 100644
+--- a/drivers/staging/r8188eu/core/rtw_led.c
++++ b/drivers/staging/r8188eu/core/rtw_led.c
+@@ -32,40 +32,19 @@ static void ResetLedStatus(struct led_priv *pLed)
+
+ static void SwLedOn(struct adapter *padapter, struct led_priv *pLed)
+ {
+- u8 LedCfg;
+- int res;
+-
+ if (padapter->bDriverStopped)
+ return;
+
+- res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);
+- if (res)
+- return;
+-
+- rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0xf0) | BIT(5) | BIT(6)); /* SW control led0 on. */
++ rtw_write8(padapter, REG_LEDCFG2, BIT(5)); /* SW control led0 on. */
+ pLed->bLedOn = true;
+ }
+
+ static void SwLedOff(struct adapter *padapter, struct led_priv *pLed)
+ {
+- u8 LedCfg;
+- int res;
+-
+ if (padapter->bDriverStopped)
+ goto exit;
+
+- res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);/* 0x4E */
+- if (res)
+- goto exit;
+-
+- LedCfg &= 0x90; /* Set to software control. */
+- rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
+- res = rtw_read8(padapter, REG_MAC_PINMUX_CFG, &LedCfg);
+- if (res)
+- goto exit;
+-
+- LedCfg &= 0xFE;
+- rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
++ rtw_write8(padapter, REG_LEDCFG2, BIT(5) | BIT(3));
+ exit:
+ pLed->bLedOn = false;
+ }
+diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+index 870d81735b8dc..5290ac36f08c1 100644
+--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
++++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+@@ -273,7 +273,7 @@ static s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
+ err = -1;
+ break;
+ }
+- msleep(1);
++ mdelay(1);
+ }
+
+ return err;
+diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
+index 46d75e925ee9b..f710eb2a95f3a 100644
+--- a/drivers/staging/rtl8192e/rtllib_rx.c
++++ b/drivers/staging/rtl8192e/rtllib_rx.c
+@@ -1489,9 +1489,9 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
+ hdrlen += 4;
+ }
+
+- rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
+ ieee->stats.rx_packets++;
+ ieee->stats.rx_bytes += skb->len;
++ rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
+
+ return 1;
+ }
+diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+index b58e75932ecd5..3686b3c599ce7 100644
+--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+@@ -951,9 +951,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+ #endif
+
+ if (ieee->iw_mode == IW_MODE_MONITOR) {
++ unsigned int len = skb->len;
++
+ ieee80211_monitor_rx(ieee, skb, rx_stats);
+ stats->rx_packets++;
+- stats->rx_bytes += skb->len;
++ stats->rx_bytes += len;
+ return 1;
+ }
+
+diff --git a/drivers/staging/vme_user/vme_fake.c b/drivers/staging/vme_user/vme_fake.c
+index dd646b0c531d4..1ee432c223e2b 100644
+--- a/drivers/staging/vme_user/vme_fake.c
++++ b/drivers/staging/vme_user/vme_fake.c
+@@ -1073,6 +1073,8 @@ static int __init fake_init(void)
+
+ /* We need a fake parent device */
+ vme_root = __root_device_register("vme", THIS_MODULE);
++ if (IS_ERR(vme_root))
++ return PTR_ERR(vme_root);
+
+ /* If we want to support more than one bridge at some point, we need to
+ * dynamically allocate this so we get one per device.
+diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
+index 020e0b3bce64b..0171f46d1848f 100644
+--- a/drivers/staging/vme_user/vme_tsi148.c
++++ b/drivers/staging/vme_user/vme_tsi148.c
+@@ -1751,6 +1751,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
+ return 0;
+
+ err_dma:
++ list_del(&entry->list);
+ err_dest:
+ err_source:
+ err_align:
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index f2919319ad383..ff49c8f3fe241 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -1018,6 +1018,13 @@ static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_lo
+ return 0;
+ }
+
++/*
++ * RETURN VALUE:
++ *
++ * 1 = Login successful
++ * -1 = Login failed
++ * 0 = More PDU exchanges required
++ */
+ static int iscsi_target_do_login(struct iscsit_conn *conn, struct iscsi_login *login)
+ {
+ int pdu_count = 0;
+@@ -1363,12 +1370,13 @@ int iscsi_target_start_negotiation(
+ ret = -1;
+
+ if (ret < 0) {
+- cancel_delayed_work_sync(&conn->login_work);
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_remove_failed_auth_entry(conn);
+ }
+- if (ret != 0)
++ if (ret != 0) {
++ cancel_delayed_work_sync(&conn->login_work);
+ iscsi_target_nego_release(conn);
++ }
+
+ return ret;
+ }
+diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
+index e2c2673025a7a..258d988b266d7 100644
+--- a/drivers/thermal/imx8mm_thermal.c
++++ b/drivers/thermal/imx8mm_thermal.c
+@@ -65,8 +65,14 @@ static int imx8mm_tmu_get_temp(void *data, int *temp)
+ u32 val;
+
+ val = readl_relaxed(tmu->base + TRITSR) & TRITSR_TEMP0_VAL_MASK;
++
++ /*
++ * Do not validate against the V bit (bit 31) due to errata
++ * ERR051272: TMU: Bit 31 of registers TMU_TSCR/TMU_TRITSR/TMU_TRATSR invalid
++ */
++
+ *temp = val * 1000;
+- if (*temp < VER1_TEMP_LOW_LIMIT)
++ if (*temp < VER1_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT)
+ return -EAGAIN;
+
+ return 0;
+diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
+index 16b6bcf1bf4fa..c073b1023bbe7 100644
+--- a/drivers/thermal/k3_j72xx_bandgap.c
++++ b/drivers/thermal/k3_j72xx_bandgap.c
+@@ -439,7 +439,7 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
+ workaround_needed = false;
+
+ dev_dbg(bgp->dev, "Work around %sneeded\n",
+- workaround_needed ? "not " : "");
++ workaround_needed ? "" : "not ");
+
+ if (!workaround_needed)
+ init_table(5, ref_table, golden_factors);
+diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
+index d3d9b9fa49e81..4122a51e98741 100644
+--- a/drivers/thermal/qcom/lmh.c
++++ b/drivers/thermal/qcom/lmh.c
+@@ -45,7 +45,7 @@ static irqreturn_t lmh_handle_irq(int hw_irq, void *data)
+ if (irq)
+ generic_handle_irq(irq);
+
+- return 0;
++ return IRQ_HANDLED;
+ }
+
+ static void lmh_enable_interrupt(struct irq_data *d)
+diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+index be785ab37e53d..ad84978109e6f 100644
+--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
++++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+@@ -252,7 +252,8 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
+ disable_s2_shutdown = true;
+ else
+ dev_warn(chip->dev,
+- "No ADC is configured and critical temperature is above the maximum stage 2 threshold of 140 C! Configuring stage 2 shutdown at 140 C.\n");
++ "No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n",
++ temp, stage2_threshold_max, stage2_threshold_max);
+ }
+
+ skip:
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 117eeaf7dd241..615fdda3a5de7 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -883,10 +883,6 @@ __thermal_cooling_device_register(struct device_node *np,
+ cdev->id = ret;
+ id = ret;
+
+- ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
+- if (ret)
+- goto out_ida_remove;
+-
+ cdev->type = kstrdup(type ? type : "", GFP_KERNEL);
+ if (!cdev->type) {
+ ret = -ENOMEM;
+@@ -901,6 +897,11 @@ __thermal_cooling_device_register(struct device_node *np,
+ cdev->device.class = &thermal_class;
+ cdev->devdata = devdata;
+ thermal_cooling_device_setup_sysfs(cdev);
++ ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
++ if (ret) {
++ thermal_cooling_device_destroy_sysfs(cdev);
++ goto out_kfree_type;
++ }
+ ret = device_register(&cdev->device);
+ if (ret)
+ goto out_kfree_type;
+@@ -1234,10 +1235,6 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
+ tz->id = id;
+ strscpy(tz->type, type, sizeof(tz->type));
+
+- result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
+- if (result)
+- goto remove_id;
+-
+ if (!ops->critical)
+ ops->critical = thermal_zone_device_critical;
+
+@@ -1260,6 +1257,11 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
+ /* A new thermal zone needs to be updated anyway. */
+ atomic_set(&tz->need_update, 1);
+
++ result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
++ if (result) {
++ thermal_zone_destroy_device_groups(tz);
++ goto remove_id;
++ }
+ result = device_register(&tz->device);
+ if (result)
+ goto release_device;
+diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
+index c65cdce8f856e..fca0b23570f96 100644
+--- a/drivers/thermal/thermal_helpers.c
++++ b/drivers/thermal/thermal_helpers.c
+@@ -115,7 +115,12 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
+ int ret;
+
+ mutex_lock(&tz->lock);
+- ret = __thermal_zone_get_temp(tz, temp);
++
++ if (device_is_registered(&tz->device))
++ ret = __thermal_zone_get_temp(tz, temp);
++ else
++ ret = -ENODEV;
++
+ mutex_unlock(&tz->lock);
+
+ return ret;
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index d4b6335ace15f..aacba30bc10c1 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -604,13 +604,15 @@ struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor,
+ if (IS_ERR(np)) {
+ if (PTR_ERR(np) != -ENODEV)
+ pr_err("Failed to find thermal zone for %pOFn id=%d\n", sensor, id);
+- return ERR_CAST(np);
++ ret = PTR_ERR(np);
++ goto out_kfree_of_ops;
+ }
+
+ trips = thermal_of_trips_init(np, &ntrips);
+ if (IS_ERR(trips)) {
+ pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id);
+- return ERR_CAST(trips);
++ ret = PTR_ERR(trips);
++ goto out_kfree_of_ops;
+ }
+
+ ret = thermal_of_monitor_init(np, &delay, &pdelay);
+@@ -659,6 +661,8 @@ out_kfree_tzp:
+ kfree(tzp);
+ out_kfree_trips:
+ kfree(trips);
++out_kfree_of_ops:
++ kfree(of_ops);
+
+ return ERR_PTR(ret);
+ }
+diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
+index fa8ccf204d860..89bfcefbea848 100644
+--- a/drivers/tty/serial/8250/8250_bcm7271.c
++++ b/drivers/tty/serial/8250/8250_bcm7271.c
+@@ -1212,9 +1212,17 @@ static struct platform_driver brcmuart_platform_driver = {
+
+ static int __init brcmuart_init(void)
+ {
++ int ret;
++
+ brcmuart_debugfs_root = debugfs_create_dir(
+ brcmuart_platform_driver.driver.name, NULL);
+- return platform_driver_register(&brcmuart_platform_driver);
++ ret = platform_driver_register(&brcmuart_platform_driver);
++ if (ret) {
++ debugfs_remove_recursive(brcmuart_debugfs_root);
++ return ret;
++ }
++
++ return 0;
+ }
+ module_init(brcmuart_init);
+
+diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
+index 82f2790de28d1..1203d1e08cd6c 100644
+--- a/drivers/tty/serial/altera_uart.c
++++ b/drivers/tty/serial/altera_uart.c
+@@ -278,16 +278,17 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
+ {
+ struct uart_port *port = data;
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
++ unsigned long flags;
+ unsigned int isr;
+
+ isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
+
+- spin_lock(&port->lock);
++ spin_lock_irqsave(&port->lock, flags);
+ if (isr & ALTERA_UART_STATUS_RRDY_MSK)
+ altera_uart_rx_chars(port);
+ if (isr & ALTERA_UART_STATUS_TRDY_MSK)
+ altera_uart_tx_chars(port);
+- spin_unlock(&port->lock);
++ spin_unlock_irqrestore(&port->lock, flags);
+
+ return IRQ_RETVAL(isr);
+ }
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 5cdced39eafdb..aa0bbb7abeacf 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1045,6 +1045,9 @@ static void pl011_dma_rx_callback(void *data)
+ */
+ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+ {
++ if (!uap->using_rx_dma)
++ return;
++
+ /* FIXME. Just disable the DMA enable */
+ uap->dmacr &= ~UART011_RXDMAE;
+ pl011_write(uap->dmacr, uap, REG_DMACR);
+@@ -1828,8 +1831,17 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
+ static void pl011_unthrottle_rx(struct uart_port *port)
+ {
+ struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
++ unsigned long flags;
+
+- pl011_enable_interrupts(uap);
++ spin_lock_irqsave(&uap->port.lock, flags);
++
++ uap->im = UART011_RTIM;
++ if (!pl011_dma_rx_running(uap))
++ uap->im |= UART011_RXIM;
++
++ pl011_write(uap->im, uap, REG_IMSC);
++
++ spin_unlock_irqrestore(&uap->port.lock, flags);
+ }
+
+ static int pl011_startup(struct uart_port *port)
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index c59ce78865799..b17788cf309b1 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -694,6 +694,7 @@ static void pch_request_dma(struct uart_port *port)
+ if (!chan) {
+ dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n",
+ __func__);
++ pci_dev_put(dma_dev);
+ return;
+ }
+ priv->chan_tx = chan;
+@@ -710,6 +711,7 @@ static void pch_request_dma(struct uart_port *port)
+ __func__);
+ dma_release_channel(priv->chan_tx);
+ priv->chan_tx = NULL;
++ pci_dev_put(dma_dev);
+ return;
+ }
+
+@@ -717,6 +719,8 @@ static void pch_request_dma(struct uart_port *port)
+ priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize,
+ &priv->rx_buf_dma, GFP_KERNEL);
+ priv->chan_rx = chan;
++
++ pci_dev_put(dma_dev);
+ }
+
+ static void pch_dma_rx_complete(void *arg)
+diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
+index b7170cb9a544f..cda9cd4fa92c8 100644
+--- a/drivers/tty/serial/serial-tegra.c
++++ b/drivers/tty/serial/serial-tegra.c
+@@ -619,8 +619,9 @@ static void tegra_uart_stop_tx(struct uart_port *u)
+ if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
+ return;
+
+- dmaengine_terminate_all(tup->tx_dma_chan);
++ dmaengine_pause(tup->tx_dma_chan);
+ dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
++ dmaengine_terminate_all(tup->tx_dma_chan);
+ count = tup->tx_bytes_requested - state.residue;
+ async_tx_ack(tup->tx_dma_desc);
+ uart_xmit_advance(&tup->uport, count);
+@@ -763,8 +764,9 @@ static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
+ return;
+ }
+
+- dmaengine_terminate_all(tup->rx_dma_chan);
++ dmaengine_pause(tup->rx_dma_chan);
+ dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
++ dmaengine_terminate_all(tup->rx_dma_chan);
+
+ tegra_uart_rx_buffer_push(tup, state.residue);
+ tup->rx_dma_active = false;
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index dfdbcf092facc..b8aed28b8f17b 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -1681,22 +1681,10 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
+ if (!stm32port->info)
+ return -EINVAL;
+
+- ret = stm32_usart_init_port(stm32port, pdev);
+- if (ret)
+- return ret;
+-
+- if (stm32port->wakeup_src) {
+- device_set_wakeup_capable(&pdev->dev, true);
+- ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
+- if (ret)
+- goto err_deinit_port;
+- }
+-
+ stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
+- if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) {
+- ret = -EPROBE_DEFER;
+- goto err_wakeirq;
+- }
++ if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER)
++ return -EPROBE_DEFER;
++
+ /* Fall back in interrupt mode for any non-deferral error */
+ if (IS_ERR(stm32port->rx_ch))
+ stm32port->rx_ch = NULL;
+@@ -1710,6 +1698,17 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
+ if (IS_ERR(stm32port->tx_ch))
+ stm32port->tx_ch = NULL;
+
++ ret = stm32_usart_init_port(stm32port, pdev);
++ if (ret)
++ goto err_dma_tx;
++
++ if (stm32port->wakeup_src) {
++ device_set_wakeup_capable(&pdev->dev, true);
++ ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
++ if (ret)
++ goto err_deinit_port;
++ }
++
+ if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
+ /* Fall back in interrupt mode */
+ dma_release_channel(stm32port->rx_ch);
+@@ -1746,19 +1745,11 @@ err_port:
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+- if (stm32port->tx_ch) {
++ if (stm32port->tx_ch)
+ stm32_usart_of_dma_tx_remove(stm32port, pdev);
+- dma_release_channel(stm32port->tx_ch);
+- }
+-
+ if (stm32port->rx_ch)
+ stm32_usart_of_dma_rx_remove(stm32port, pdev);
+
+-err_dma_rx:
+- if (stm32port->rx_ch)
+- dma_release_channel(stm32port->rx_ch);
+-
+-err_wakeirq:
+ if (stm32port->wakeup_src)
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+@@ -1768,6 +1759,14 @@ err_deinit_port:
+
+ stm32_usart_deinit_port(stm32port);
+
++err_dma_tx:
++ if (stm32port->tx_ch)
++ dma_release_channel(stm32port->tx_ch);
++
++err_dma_rx:
++ if (stm32port->rx_ch)
++ dma_release_channel(stm32port->rx_ch);
++
+ return ret;
+ }
+
+diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
+index 99608b2a2b74f..7ace3aa498402 100644
+--- a/drivers/tty/serial/sunsab.c
++++ b/drivers/tty/serial/sunsab.c
+@@ -1133,7 +1133,13 @@ static int __init sunsab_init(void)
+ }
+ }
+
+- return platform_driver_register(&sab_driver);
++ err = platform_driver_register(&sab_driver);
++ if (err) {
++ kfree(sunsab_ports);
++ sunsab_ports = NULL;
++ }
++
++ return err;
+ }
+
+ static void __exit sunsab_exit(void)
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index b1f59a5fe6327..d1db6be801560 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -5382,6 +5382,26 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ }
+ }
+
++/* Any value that is not an existing queue number is fine for this constant. */
++enum {
++ UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
++};
++
++static void ufshcd_clear_polled(struct ufs_hba *hba,
++ unsigned long *completed_reqs)
++{
++ int tag;
++
++ for_each_set_bit(tag, completed_reqs, hba->nutrs) {
++ struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
++
++ if (!cmd)
++ continue;
++ if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
++ __clear_bit(tag, completed_reqs);
++ }
++}
++
+ /*
+ * Returns > 0 if one or more commands have been completed or 0 if no
+ * requests have been completed.
+@@ -5398,13 +5418,17 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
+ WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
+ "completed: %#lx; outstanding: %#lx\n", completed_reqs,
+ hba->outstanding_reqs);
++ if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
++ /* Do not complete polled requests from interrupt context. */
++ ufshcd_clear_polled(hba, &completed_reqs);
++ }
+ hba->outstanding_reqs &= ~completed_reqs;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (completed_reqs)
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+
+- return completed_reqs;
++ return completed_reqs != 0;
+ }
+
+ /**
+@@ -5435,7 +5459,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
+ * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
+ * do not want polling to trigger spurious interrupt complaints.
+ */
+- ufshcd_poll(hba->host, 0);
++ ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
+
+ return IRQ_HANDLED;
+ }
+@@ -8747,8 +8771,6 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ struct scsi_device *sdp;
+ unsigned long flags;
+ int ret, retries;
+- unsigned long deadline;
+- int32_t remaining;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ sdp = hba->ufs_device_wlun;
+@@ -8781,14 +8803,9 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ * callbacks hence set the RQF_PM flag so that it doesn't resume the
+ * already suspended childs.
+ */
+- deadline = jiffies + 10 * HZ;
+ for (retries = 3; retries > 0; --retries) {
+- ret = -ETIMEDOUT;
+- remaining = deadline - jiffies;
+- if (remaining <= 0)
+- break;
+ ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+- remaining / HZ, 0, 0, RQF_PM, NULL);
++ HZ, 0, 0, RQF_PM, NULL);
+ if (!scsi_status_is_check_condition(ret) ||
+ !scsi_sense_valid(&sshdr) ||
+ sshdr.sense_key != UNIT_ATTENTION)
+diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
+index 1106f33764047..792c3e9c9ce53 100644
+--- a/drivers/uio/uio_dmem_genirq.c
++++ b/drivers/uio/uio_dmem_genirq.c
+@@ -110,8 +110,10 @@ static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
+ * remember the state so we can allow user space to enable it later.
+ */
+
++ spin_lock(&priv->lock);
+ if (!test_and_set_bit(0, &priv->flags))
+ disable_irq_nosync(irq);
++ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+ }
+@@ -125,20 +127,19 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
+ * in the interrupt controller, but keep track of the
+ * state to prevent per-irq depth damage.
+ *
+- * Serialize this operation to support multiple tasks.
++ * Serialize this operation to support multiple tasks and concurrency
++ * with irq handler on SMP systems.
+ */
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (irq_on) {
+ if (test_and_clear_bit(0, &priv->flags))
+ enable_irq(dev_info->irq);
+- spin_unlock_irqrestore(&priv->lock, flags);
+ } else {
+- if (!test_and_set_bit(0, &priv->flags)) {
+- spin_unlock_irqrestore(&priv->lock, flags);
+- disable_irq(dev_info->irq);
+- }
++ if (!test_and_set_bit(0, &priv->flags))
++ disable_irq_nosync(dev_info->irq);
+ }
++ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+ }
+diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
+index 2f29431f612e0..b23e543b3a3d5 100644
+--- a/drivers/usb/cdns3/cdnsp-ring.c
++++ b/drivers/usb/cdns3/cdnsp-ring.c
+@@ -2006,10 +2006,11 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+
+ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ {
+- u32 field, length_field, remainder;
++ u32 field, length_field, zlp = 0;
+ struct cdnsp_ep *pep = preq->pep;
+ struct cdnsp_ring *ep_ring;
+ int num_trbs;
++ u32 maxp;
+ int ret;
+
+ ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
+@@ -2019,26 +2020,33 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ /* 1 TRB for data, 1 for status */
+ num_trbs = (pdev->three_stage_setup) ? 2 : 1;
+
++ maxp = usb_endpoint_maxp(pep->endpoint.desc);
++
++ if (preq->request.zero && preq->request.length &&
++ (preq->request.length % maxp == 0)) {
++ num_trbs++;
++ zlp = 1;
++ }
++
+ ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
+ if (ret)
+ return ret;
+
+ /* If there's data, queue data TRBs */
+- if (pdev->ep0_expect_in)
+- field = TRB_TYPE(TRB_DATA) | TRB_IOC;
+- else
+- field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC;
+-
+ if (preq->request.length > 0) {
+- remainder = cdnsp_td_remainder(pdev, 0, preq->request.length,
+- preq->request.length, preq, 1, 0);
++ field = TRB_TYPE(TRB_DATA);
+
+- length_field = TRB_LEN(preq->request.length) |
+- TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0);
++ if (zlp)
++ field |= TRB_CHAIN;
++ else
++ field |= TRB_IOC | (pdev->ep0_expect_in ? 0 : TRB_ISP);
+
+ if (pdev->ep0_expect_in)
+ field |= TRB_DIR_IN;
+
++ length_field = TRB_LEN(preq->request.length) |
++ TRB_TD_SIZE(zlp) | TRB_INTR_TARGET(0);
++
+ cdnsp_queue_trb(pdev, ep_ring, true,
+ lower_32_bits(preq->request.dma),
+ upper_32_bits(preq->request.dma), length_field,
+@@ -2046,6 +2054,20 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ TRB_SETUPID(pdev->setup_id) |
+ pdev->setup_speed);
+
++ if (zlp) {
++ field = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
++
++ if (!pdev->ep0_expect_in)
++ field = TRB_ISP;
++
++ cdnsp_queue_trb(pdev, ep_ring, true,
++ lower_32_bits(preq->request.dma),
++ upper_32_bits(preq->request.dma), 0,
++ field | ep_ring->cycle_state |
++ TRB_SETUPID(pdev->setup_id) |
++ pdev->setup_speed);
++ }
++
+ pdev->ep0_stage = CDNSP_DATA_STAGE;
+ }
+
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index faeaace0d197d..8300baedafd20 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -3133,8 +3133,12 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
+ GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+
+- if (IS_ERR(local_mem))
++ if (IS_ERR_OR_NULL(local_mem)) {
++ if (!local_mem)
++ return -ENOMEM;
++
+ return PTR_ERR(local_mem);
++ }
+
+ /*
+ * Here we pass a dma_addr_t but the arg type is a phys_addr_t.
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 1f348bc867c22..476b636185116 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -122,21 +122,25 @@ static void __dwc3_set_mode(struct work_struct *work)
+ unsigned long flags;
+ int ret;
+ u32 reg;
++ u32 desired_dr_role;
+
+ mutex_lock(&dwc->mutex);
++ spin_lock_irqsave(&dwc->lock, flags);
++ desired_dr_role = dwc->desired_dr_role;
++ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_get_sync(dwc->dev);
+
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
+ dwc3_otg_update(dwc, 0);
+
+- if (!dwc->desired_dr_role)
++ if (!desired_dr_role)
+ goto out;
+
+- if (dwc->desired_dr_role == dwc->current_dr_role)
++ if (desired_dr_role == dwc->current_dr_role)
+ goto out;
+
+- if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
++ if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
+ goto out;
+
+ switch (dwc->current_dr_role) {
+@@ -164,7 +168,7 @@ static void __dwc3_set_mode(struct work_struct *work)
+ */
+ if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
+ DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
+- dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
++ desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg |= DWC3_GCTL_CORESOFTRESET;
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+@@ -184,11 +188,11 @@ static void __dwc3_set_mode(struct work_struct *work)
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+- dwc3_set_prtcap(dwc, dwc->desired_dr_role);
++ dwc3_set_prtcap(dwc, desired_dr_role);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+- switch (dwc->desired_dr_role) {
++ switch (desired_dr_role) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+@@ -1096,8 +1100,13 @@ static int dwc3_core_init(struct dwc3 *dwc)
+
+ if (!dwc->ulpi_ready) {
+ ret = dwc3_core_ulpi_init(dwc);
+- if (ret)
++ if (ret) {
++ if (ret == -ETIMEDOUT) {
++ dwc3_core_soft_reset(dwc);
++ ret = -EPROBE_DEFER;
++ }
+ goto err0;
++ }
+ dwc->ulpi_ready = true;
+ }
+
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index fb14511b1e10f..89c9ab2b19f85 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -45,7 +45,7 @@
+ #define PCI_DEVICE_ID_INTEL_ADLN 0x465e
+ #define PCI_DEVICE_ID_INTEL_ADLN_PCH 0x54ee
+ #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+-#define PCI_DEVICE_ID_INTEL_RPL 0x460e
++#define PCI_DEVICE_ID_INTEL_RPL 0xa70e
+ #define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
+ #define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
+ #define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 7c40f3ffc0544..b0a0351d2d8b5 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -261,7 +261,8 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
+ if (IS_ERR(qcom->icc_path_apps)) {
+ dev_err(dev, "failed to get apps-usb path: %ld\n",
+ PTR_ERR(qcom->icc_path_apps));
+- return PTR_ERR(qcom->icc_path_apps);
++ ret = PTR_ERR(qcom->icc_path_apps);
++ goto put_path_ddr;
+ }
+
+ max_speed = usb_get_maximum_speed(&qcom->dwc3->dev);
+@@ -274,16 +275,22 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
+ }
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret);
+- return ret;
++ goto put_path_apps;
+ }
+
+ ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret);
+- return ret;
++ goto put_path_apps;
+ }
+
+ return 0;
++
++put_path_apps:
++ icc_put(qcom->icc_path_apps);
++put_path_ddr:
++ icc_put(qcom->icc_path_ddr);
++ return ret;
+ }
+
+ /**
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index ca0a7d9eaa34e..6be6009f911e1 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -71,7 +71,7 @@ struct f_hidg {
+ wait_queue_head_t write_queue;
+ struct usb_request *req;
+
+- int minor;
++ struct device dev;
+ struct cdev cdev;
+ struct usb_function func;
+
+@@ -84,6 +84,14 @@ static inline struct f_hidg *func_to_hidg(struct usb_function *f)
+ return container_of(f, struct f_hidg, func);
+ }
+
++static void hidg_release(struct device *dev)
++{
++ struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
++
++ kfree(hidg->set_report_buf);
++ kfree(hidg);
++}
++
+ /*-------------------------------------------------------------------------*/
+ /* Static descriptors */
+
+@@ -904,9 +912,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ struct usb_ep *ep;
+ struct f_hidg *hidg = func_to_hidg(f);
+ struct usb_string *us;
+- struct device *device;
+ int status;
+- dev_t dev;
+
+ /* maybe allocate device-global string IDs, and patch descriptors */
+ us = usb_gstrings_attach(c->cdev, ct_func_strings,
+@@ -999,21 +1005,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+
+ /* create char device */
+ cdev_init(&hidg->cdev, &f_hidg_fops);
+- dev = MKDEV(major, hidg->minor);
+- status = cdev_add(&hidg->cdev, dev, 1);
++ status = cdev_device_add(&hidg->cdev, &hidg->dev);
+ if (status)
+ goto fail_free_descs;
+
+- device = device_create(hidg_class, NULL, dev, NULL,
+- "%s%d", "hidg", hidg->minor);
+- if (IS_ERR(device)) {
+- status = PTR_ERR(device);
+- goto del;
+- }
+-
+ return 0;
+-del:
+- cdev_del(&hidg->cdev);
+ fail_free_descs:
+ usb_free_all_descriptors(f);
+ fail:
+@@ -1244,9 +1240,7 @@ static void hidg_free(struct usb_function *f)
+
+ hidg = func_to_hidg(f);
+ opts = container_of(f->fi, struct f_hid_opts, func_inst);
+- kfree(hidg->report_desc);
+- kfree(hidg->set_report_buf);
+- kfree(hidg);
++ put_device(&hidg->dev);
+ mutex_lock(&opts->lock);
+ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+@@ -1256,8 +1250,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
+ {
+ struct f_hidg *hidg = func_to_hidg(f);
+
+- device_destroy(hidg_class, MKDEV(major, hidg->minor));
+- cdev_del(&hidg->cdev);
++ cdev_device_del(&hidg->cdev, &hidg->dev);
+
+ usb_free_all_descriptors(f);
+ }
+@@ -1266,6 +1259,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
+ {
+ struct f_hidg *hidg;
+ struct f_hid_opts *opts;
++ int ret;
+
+ /* allocate and initialize one new instance */
+ hidg = kzalloc(sizeof(*hidg), GFP_KERNEL);
+@@ -1277,17 +1271,28 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
+ mutex_lock(&opts->lock);
+ ++opts->refcnt;
+
+- hidg->minor = opts->minor;
++ device_initialize(&hidg->dev);
++ hidg->dev.release = hidg_release;
++ hidg->dev.class = hidg_class;
++ hidg->dev.devt = MKDEV(major, opts->minor);
++ ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor);
++ if (ret) {
++ --opts->refcnt;
++ mutex_unlock(&opts->lock);
++ return ERR_PTR(ret);
++ }
++
+ hidg->bInterfaceSubClass = opts->subclass;
+ hidg->bInterfaceProtocol = opts->protocol;
+ hidg->report_length = opts->report_length;
+ hidg->report_desc_length = opts->report_desc_length;
+ if (opts->report_desc) {
+- hidg->report_desc = kmemdup(opts->report_desc,
+- opts->report_desc_length,
+- GFP_KERNEL);
++ hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc,
++ opts->report_desc_length,
++ GFP_KERNEL);
+ if (!hidg->report_desc) {
+- kfree(hidg);
++ put_device(&hidg->dev);
++ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
+index 6e196e06181ec..4419b7972e78f 100644
+--- a/drivers/usb/gadget/function/f_uvc.c
++++ b/drivers/usb/gadget/function/f_uvc.c
+@@ -216,8 +216,9 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_DATA;
+- uvc_event->data.length = req->actual;
+- memcpy(&uvc_event->data.data, req->buf, req->actual);
++ uvc_event->data.length = min_t(unsigned int, req->actual,
++ sizeof(uvc_event->data.data));
++ memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length);
+ v4l2_event_queue(&uvc->vdev, &v4l2_event);
+ }
+ }
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index c63c0c2cf649d..bf9878e1a72a8 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -734,13 +734,13 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
+ }
+
+ ret = gadget->ops->pullup(gadget, 0);
+- if (!ret) {
++ if (!ret)
+ gadget->connected = 0;
+- mutex_lock(&udc_lock);
+- if (gadget->udc->driver)
+- gadget->udc->driver->disconnect(gadget);
+- mutex_unlock(&udc_lock);
+- }
++
++ mutex_lock(&udc_lock);
++ if (gadget->udc->driver)
++ gadget->udc->driver->disconnect(gadget);
++ mutex_unlock(&udc_lock);
+
+ out:
+ trace_usb_gadget_disconnect(gadget, ret);
+diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
+index fdca28e72a3b4..d0e051beb3af9 100644
+--- a/drivers/usb/gadget/udc/fotg210-udc.c
++++ b/drivers/usb/gadget/udc/fotg210-udc.c
+@@ -629,10 +629,10 @@ static void fotg210_request_error(struct fotg210_udc *fotg210)
+ static void fotg210_set_address(struct fotg210_udc *fotg210,
+ struct usb_ctrlrequest *ctrl)
+ {
+- if (ctrl->wValue >= 0x0100) {
++ if (le16_to_cpu(ctrl->wValue) >= 0x0100) {
+ fotg210_request_error(fotg210);
+ } else {
+- fotg210_set_dev_addr(fotg210, ctrl->wValue);
++ fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue));
+ fotg210_set_cxdone(fotg210);
+ }
+ }
+@@ -713,17 +713,17 @@ static void fotg210_get_status(struct fotg210_udc *fotg210,
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+- fotg210->ep0_data = 1 << USB_DEVICE_SELF_POWERED;
++ fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED);
+ break;
+ case USB_RECIP_INTERFACE:
+- fotg210->ep0_data = 0;
++ fotg210->ep0_data = cpu_to_le16(0);
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum)
+ fotg210->ep0_data =
+- fotg210_is_epnstall(fotg210->ep[epnum])
+- << USB_ENDPOINT_HALT;
++ cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum])
++ << USB_ENDPOINT_HALT);
+ else
+ fotg210_request_error(fotg210);
+ break;
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index 01705e559c422..c61fc19ef1154 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -639,7 +639,6 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+
+ dealloc_usb3_hcd:
+ usb_remove_hcd(xhci->shared_hcd);
+- xhci->shared_hcd = NULL;
+
+ dealloc_usb2_hcd:
+ usb_remove_hcd(hcd);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 7bccbe50bab15..f98cf30a3c1a5 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -59,6 +59,7 @@
+ #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
+ #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
++#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed
+
+ #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
+@@ -246,7 +247,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_MISSING_CAS;
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+- pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI)
++ (pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI))
+ xhci->quirks |= XHCI_RESET_TO_DEFAULT;
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index ad81e9a508b14..343709af4c16f 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2458,7 +2458,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+- ep_ring->err_count = 0;
++ ep->err_count = 0;
+ /* handle success with untransferred data as short packet */
+ if (ep_trb != td->last_trb || remaining) {
+ xhci_warn(xhci, "WARN Successful completion on short TX\n");
+@@ -2484,7 +2484,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ break;
+ case COMP_USB_TRANSACTION_ERROR:
+ if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
+- (ep_ring->err_count++ > MAX_SOFT_RETRY) ||
++ (ep->err_count++ > MAX_SOFT_RETRY) ||
+ le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
+ break;
+
+@@ -2565,8 +2565,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_INVALID_STREAM_TYPE_ERROR:
+ case COMP_INVALID_STREAM_ID_ERROR:
+- xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
+- EP_SOFT_RESET);
++ xhci_dbg(xhci, "Stream transaction error ep %u no id\n",
++ ep_index);
++ if (ep->err_count++ > MAX_SOFT_RETRY)
++ xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
++ EP_HARD_RESET);
++ else
++ xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
++ EP_SOFT_RESET);
+ goto cleanup;
+ case COMP_RING_UNDERRUN:
+ case COMP_RING_OVERRUN:
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index cc084d9505cdf..c9f06c5e4e9d2 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -933,6 +933,7 @@ struct xhci_virt_ep {
+ * have to restore the device state to the previous state
+ */
+ struct xhci_ring *new_ring;
++ unsigned int err_count;
+ unsigned int ep_state;
+ #define SET_DEQ_PENDING (1 << 0)
+ #define EP_HALTED (1 << 1) /* For stall handling */
+@@ -1627,7 +1628,6 @@ struct xhci_ring {
+ * if we own the TRB (if we are the consumer). See section 4.9.1.
+ */
+ u32 cycle_state;
+- unsigned int err_count;
+ unsigned int stream_id;
+ unsigned int num_segs;
+ unsigned int num_trbs_free;
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index 6704a62a16659..ba20272d22215 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -1628,8 +1628,6 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+ {
+ struct musb *musb = gadget_to_musb(gadget);
+
+- if (!musb->xceiv->set_power)
+- return -EOPNOTSUPP;
+ return usb_phy_set_power(musb->xceiv, mA);
+ }
+
+diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
+index f571a65ae6ee2..476f55d1fec30 100644
+--- a/drivers/usb/musb/omap2430.c
++++ b/drivers/usb/musb/omap2430.c
+@@ -15,6 +15,7 @@
+ #include <linux/list.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
++#include <linux/of_irq.h>
+ #include <linux/platform_device.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/pm_runtime.h>
+@@ -310,6 +311,7 @@ static int omap2430_probe(struct platform_device *pdev)
+ struct device_node *control_node;
+ struct platform_device *control_pdev;
+ int ret = -ENOMEM, val;
++ bool populate_irqs = false;
+
+ if (!np)
+ return -ENODEV;
+@@ -328,6 +330,18 @@ static int omap2430_probe(struct platform_device *pdev)
+ musb->dev.dma_mask = &omap2430_dmamask;
+ musb->dev.coherent_dma_mask = omap2430_dmamask;
+
++ /*
++ * Legacy SoCs using omap_device get confused if node is moved
++ * because of interconnect properties mixed into the node.
++ */
++ if (of_get_property(np, "ti,hwmods", NULL)) {
++ dev_warn(&pdev->dev, "please update to probe with ti-sysc\n");
++ populate_irqs = true;
++ } else {
++ device_set_of_node_from_dev(&musb->dev, &pdev->dev);
++ }
++ of_node_put(np);
++
+ glue->dev = &pdev->dev;
+ glue->musb = musb;
+ glue->status = MUSB_UNKNOWN;
+@@ -389,6 +403,46 @@ static int omap2430_probe(struct platform_device *pdev)
+ goto err2;
+ }
+
++ if (populate_irqs) {
++ struct resource musb_res[3];
++ struct resource *res;
++ int i = 0;
++
++ memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res));
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ goto err2;
++
++ musb_res[i].start = res->start;
++ musb_res[i].end = res->end;
++ musb_res[i].flags = res->flags;
++ musb_res[i].name = res->name;
++ i++;
++
++ ret = of_irq_get_byname(np, "mc");
++ if (ret > 0) {
++ musb_res[i].start = ret;
++ musb_res[i].flags = IORESOURCE_IRQ;
++ musb_res[i].name = "mc";
++ i++;
++ }
++
++ ret = of_irq_get_byname(np, "dma");
++ if (ret > 0) {
++ musb_res[i].start = ret;
++ musb_res[i].flags = IORESOURCE_IRQ;
++ musb_res[i].name = "dma";
++ i++;
++ }
++
++ ret = platform_device_add_resources(musb, musb_res, i);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to add IRQ resources\n");
++ goto err2;
++ }
++ }
++
+ ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add platform_data\n");
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
+index dfaed7eee94fc..32e6d19f7011a 100644
+--- a/drivers/usb/roles/class.c
++++ b/drivers/usb/roles/class.c
+@@ -106,10 +106,13 @@ usb_role_switch_is_parent(struct fwnode_handle *fwnode)
+ struct fwnode_handle *parent = fwnode_get_parent(fwnode);
+ struct device *dev;
+
+- if (!parent || !fwnode_property_present(parent, "usb-role-switch"))
++ if (!fwnode_property_present(parent, "usb-role-switch")) {
++ fwnode_handle_put(parent);
+ return NULL;
++ }
+
+ dev = class_find_device_by_fwnode(role_class, parent);
++ fwnode_handle_put(parent);
+ return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+ }
+
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 3bcec419f4632..f6fb23620e87a 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -195,6 +195,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
+ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
+ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
++ { USB_DEVICE(0x17A8, 0x0011) }, /* Kamstrup 444 MHz RF sniffer */
++ { USB_DEVICE(0x17A8, 0x0013) }, /* Kamstrup 870 MHz RF sniffer */
+ { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */
+ { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
+index 2dd58cd9f0ccb..891fb1fe69df7 100644
+--- a/drivers/usb/serial/f81232.c
++++ b/drivers/usb/serial/f81232.c
+@@ -130,9 +130,6 @@ static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ,
+
+ static int calc_baud_divisor(speed_t baudrate, speed_t clockrate)
+ {
+- if (!baudrate)
+- return 0;
+-
+ return DIV_ROUND_CLOSEST(clockrate, baudrate);
+ }
+
+@@ -498,9 +495,14 @@ static void f81232_set_baudrate(struct tty_struct *tty,
+ speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE };
+
+ for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
+- idx = f81232_find_clk(baud_list[i]);
++ baudrate = baud_list[i];
++ if (baudrate == 0) {
++ tty_encode_baud_rate(tty, 0, 0);
++ return;
++ }
++
++ idx = f81232_find_clk(baudrate);
+ if (idx >= 0) {
+- baudrate = baud_list[i];
+ tty_encode_baud_rate(tty, baudrate, baudrate);
+ break;
+ }
+diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
+index ddfcd72eb0ae7..4083ae961be43 100644
+--- a/drivers/usb/serial/f81534.c
++++ b/drivers/usb/serial/f81534.c
+@@ -536,9 +536,6 @@ static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags)
+
+ static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
+ {
+- if (!baudrate)
+- return 0;
+-
+ /* Round to nearest divisor */
+ return DIV_ROUND_CLOSEST(clockrate, baudrate);
+ }
+@@ -568,9 +565,14 @@ static int f81534_set_port_config(struct usb_serial_port *port,
+ u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE};
+
+ for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
+- idx = f81534_find_clk(baud_list[i]);
++ baudrate = baud_list[i];
++ if (baudrate == 0) {
++ tty_encode_baud_rate(tty, 0, 0);
++ return 0;
++ }
++
++ idx = f81534_find_clk(baudrate);
+ if (idx >= 0) {
+- baudrate = baud_list[i];
+ tty_encode_baud_rate(tty, baudrate, baudrate);
+ break;
+ }
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c3b7f1d98e781..dee79c7d82d5c 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -255,6 +255,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EP06 0x0306
+ #define QUECTEL_PRODUCT_EM05G 0x030a
+ #define QUECTEL_PRODUCT_EM060K 0x030b
++#define QUECTEL_PRODUCT_EM05G_SG 0x0311
+ #define QUECTEL_PRODUCT_EM12 0x0512
+ #define QUECTEL_PRODUCT_RM500Q 0x0800
+ #define QUECTEL_PRODUCT_RM520N 0x0801
+@@ -1160,6 +1161,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
+ .driver_info = RSVD(6) | ZLP },
++ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff),
++ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
+diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
+index 747be69e5e699..5e912dd29b4c9 100644
+--- a/drivers/usb/storage/alauda.c
++++ b/drivers/usb/storage/alauda.c
+@@ -438,6 +438,8 @@ static int alauda_init_media(struct us_data *us)
+ + MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift);
+ MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO);
+ MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO);
++ if (MEDIA_INFO(us).pba_to_lba == NULL || MEDIA_INFO(us).lba_to_pba == NULL)
++ return USB_STOR_TRANSPORT_ERROR;
+
+ if (alauda_reset_media(us) != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
+index 26ea2fdec17dc..31c2a3130cadb 100644
+--- a/drivers/usb/typec/bus.c
++++ b/drivers/usb/typec/bus.c
+@@ -134,7 +134,7 @@ int typec_altmode_exit(struct typec_altmode *adev)
+ if (!adev || !adev->active)
+ return 0;
+
+- if (!pdev->ops || !pdev->ops->enter)
++ if (!pdev->ops || !pdev->ops->exit)
+ return -EOPNOTSUPP;
+
+ /* Moving to USB Safe State */
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index b2bfcebe218f0..72f8d1e876004 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -794,8 +794,10 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
+ return ERR_PTR(err);
+
+ tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc);
+- if (IS_ERR(tcpci->port))
++ if (IS_ERR(tcpci->port)) {
++ fwnode_handle_put(tcpci->tcpc.fwnode);
+ return ERR_CAST(tcpci->port);
++ }
+
+ return tcpci;
+ }
+@@ -804,6 +806,7 @@ EXPORT_SYMBOL_GPL(tcpci_register_port);
+ void tcpci_unregister_port(struct tcpci *tcpci)
+ {
+ tcpm_unregister_port(tcpci->port);
++ fwnode_handle_put(tcpci->tcpc.fwnode);
+ }
+ EXPORT_SYMBOL_GPL(tcpci_unregister_port);
+
+diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
+index 2a77bab948f54..195c9c16f817f 100644
+--- a/drivers/usb/typec/tipd/core.c
++++ b/drivers/usb/typec/tipd/core.c
+@@ -814,20 +814,19 @@ static int tps6598x_probe(struct i2c_client *client)
+
+ ret = devm_tps6598_psy_register(tps);
+ if (ret)
+- return ret;
++ goto err_role_put;
+
+ tps->port = typec_register_port(&client->dev, &typec_cap);
+ if (IS_ERR(tps->port)) {
+ ret = PTR_ERR(tps->port);
+ goto err_role_put;
+ }
+- fwnode_handle_put(fwnode);
+
+ if (status & TPS_STATUS_PLUG_PRESENT) {
+ ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &tps->pwr_status);
+ if (ret < 0) {
+ dev_err(tps->dev, "failed to read power status: %d\n", ret);
+- goto err_role_put;
++ goto err_unregister_port;
+ }
+ ret = tps6598x_connect(tps, status);
+ if (ret)
+@@ -840,14 +839,16 @@ static int tps6598x_probe(struct i2c_client *client)
+ dev_name(&client->dev), tps);
+ if (ret) {
+ tps6598x_disconnect(tps, 0);
+- typec_unregister_port(tps->port);
+- goto err_role_put;
++ goto err_unregister_port;
+ }
+
+ i2c_set_clientdata(client, tps);
++ fwnode_handle_put(fwnode);
+
+ return 0;
+
++err_unregister_port:
++ typec_unregister_port(tps->port);
+ err_role_put:
+ usb_role_switch_put(tps->role_sw);
+ err_fwnode_put:
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index a7987fc764cc6..eabe519013e78 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1270,8 +1270,9 @@ err:
+ return ret;
+ }
+
+-int ucsi_resume(struct ucsi *ucsi)
++static void ucsi_resume_work(struct work_struct *work)
+ {
++ struct ucsi *ucsi = container_of(work, struct ucsi, resume_work);
+ struct ucsi_connector *con;
+ u64 command;
+ int ret;
+@@ -1279,15 +1280,21 @@ int ucsi_resume(struct ucsi *ucsi)
+ /* Restore UCSI notification enable mask after system resume */
+ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+ ret = ucsi_send_command(ucsi, command, NULL, 0);
+- if (ret < 0)
+- return ret;
++ if (ret < 0) {
++ dev_err(ucsi->dev, "failed to re-enable notifications (%d)\n", ret);
++ return;
++ }
+
+ for (con = ucsi->connector; con->port; con++) {
+ mutex_lock(&con->lock);
+- ucsi_check_connection(con);
++ ucsi_partner_task(con, ucsi_check_connection, 1, 0);
+ mutex_unlock(&con->lock);
+ }
++}
+
++int ucsi_resume(struct ucsi *ucsi)
++{
++ queue_work(system_long_wq, &ucsi->resume_work);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(ucsi_resume);
+@@ -1347,6 +1354,7 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
+ if (!ucsi)
+ return ERR_PTR(-ENOMEM);
+
++ INIT_WORK(&ucsi->resume_work, ucsi_resume_work);
+ INIT_DELAYED_WORK(&ucsi->work, ucsi_init_work);
+ mutex_init(&ucsi->ppm_lock);
+ ucsi->dev = dev;
+@@ -1401,6 +1409,7 @@ void ucsi_unregister(struct ucsi *ucsi)
+
+ /* Make sure that we are not in the middle of driver initialization */
+ cancel_delayed_work_sync(&ucsi->work);
++ cancel_work_sync(&ucsi->resume_work);
+
+ /* Disable notifications */
+ ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 8eb391e3e592c..c968474ee5473 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -287,6 +287,7 @@ struct ucsi {
+ struct ucsi_capability cap;
+ struct ucsi_connector *connector;
+
++ struct work_struct resume_work;
+ struct delayed_work work;
+ int work_count;
+ #define UCSI_ROLE_SWITCH_RETRY_PER_HZ 10
+diff --git a/drivers/usb/typec/wusb3801.c b/drivers/usb/typec/wusb3801.c
+index 3cc7a15ecbd31..a43a18d4b02ed 100644
+--- a/drivers/usb/typec/wusb3801.c
++++ b/drivers/usb/typec/wusb3801.c
+@@ -364,7 +364,7 @@ static int wusb3801_probe(struct i2c_client *client)
+ /* Initialize the hardware with the devicetree settings. */
+ ret = wusb3801_hw_init(wusb3801);
+ if (ret)
+- return ret;
++ goto err_put_connector;
+
+ wusb3801->cap.revision = USB_TYPEC_REV_1_2;
+ wusb3801->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO;
+diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c
+index 6631e8befe1b2..0f19d502f351b 100644
+--- a/drivers/vfio/iova_bitmap.c
++++ b/drivers/vfio/iova_bitmap.c
+@@ -295,11 +295,13 @@ void iova_bitmap_free(struct iova_bitmap *bitmap)
+ */
+ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
+ {
+- unsigned long remaining;
++ unsigned long remaining, bytes;
++
++ bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff;
+
+ remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
+ remaining = min_t(unsigned long, remaining,
+- (bitmap->mapped.npages << PAGE_SHIFT) / sizeof(*bitmap->bitmap));
++ bytes / sizeof(*bitmap->bitmap));
+
+ return remaining;
+ }
+@@ -394,29 +396,27 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ * Set the bits corresponding to the range [iova .. iova+length-1] in
+ * the user bitmap.
+ *
+- * Return: The number of bits set.
+ */
+ void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length)
+ {
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+- unsigned long offset = (iova - mapped->iova) >> mapped->pgshift;
+- unsigned long nbits = max_t(unsigned long, 1, length >> mapped->pgshift);
+- unsigned long page_idx = offset / BITS_PER_PAGE;
+- unsigned long page_offset = mapped->pgoff;
+- void *kaddr;
+-
+- offset = offset % BITS_PER_PAGE;
++ unsigned long cur_bit = ((iova - mapped->iova) >>
++ mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
++ unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
++ mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
+
+ do {
+- unsigned long size = min(BITS_PER_PAGE - offset, nbits);
++ unsigned int page_idx = cur_bit / BITS_PER_PAGE;
++ unsigned int offset = cur_bit % BITS_PER_PAGE;
++ unsigned int nbits = min(BITS_PER_PAGE - offset,
++ last_bit - cur_bit + 1);
++ void *kaddr;
+
+ kaddr = kmap_local_page(mapped->pages[page_idx]);
+- bitmap_set(kaddr + page_offset, offset, size);
++ bitmap_set(kaddr, offset, nbits);
+ kunmap_local(kaddr);
+- page_offset = offset = 0;
+- nbits -= size;
+- page_idx++;
+- } while (nbits > 0);
++ cur_bit += nbits;
++ } while (cur_bit <= last_bit);
+ }
+ EXPORT_SYMBOL_GPL(iova_bitmap_set);
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index 55dc4f43c31e3..1a0a238ffa354 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -72,12 +72,11 @@ static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
+ const char **extra_dbg)
+ {
+ #ifdef CONFIG_ACPI
+- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct device *dev = vdev->device;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ acpi_status acpi_ret;
+
+- acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer);
++ acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, NULL);
+ if (ACPI_FAILURE(acpi_ret)) {
+ if (extra_dbg)
+ *extra_dbg = acpi_format_exception(acpi_ret);
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index cfc55273dc5d1..974e862cd20d6 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -601,6 +601,7 @@ config FB_TGA
+ config FB_UVESA
+ tristate "Userspace VESA VGA graphics support"
+ depends on FB && CONNECTOR
++ depends on !UML
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+@@ -2217,7 +2218,6 @@ config FB_SSD1307
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_DEFERRED_IO
+- select PWM
+ select FB_BACKLIGHT
+ help
+ This driver implements support for the Solomon SSD1307
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index c0143d38df83a..14a7d404062c3 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2450,7 +2450,8 @@ err_out:
+
+ if (userfont) {
+ p->userfont = old_userfont;
+- REFCOUNT(data)--;
++ if (--REFCOUNT(data) == 0)
++ kfree(data - FONT_EXTRA_WORDS * sizeof(int));
+ }
+
+ vc->vc_font.width = old_width;
+diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
+index 2398b3d48fedf..305f1587bd898 100644
+--- a/drivers/video/fbdev/ep93xx-fb.c
++++ b/drivers/video/fbdev/ep93xx-fb.c
+@@ -552,12 +552,14 @@ static int ep93xxfb_probe(struct platform_device *pdev)
+
+ err = register_framebuffer(info);
+ if (err)
+- goto failed_check;
++ goto failed_framebuffer;
+
+ dev_info(info->dev, "registered. Mode = %dx%d-%d\n",
+ info->var.xres, info->var.yres, info->var.bits_per_pixel);
+ return 0;
+
++failed_framebuffer:
++ clk_disable_unprepare(fbi->clk);
+ failed_check:
+ if (fbi->mach_info->teardown)
+ fbi->mach_info->teardown(pdev);
+diff --git a/drivers/video/fbdev/geode/Kconfig b/drivers/video/fbdev/geode/Kconfig
+index ac9c860592aaf..85bc14b6faf64 100644
+--- a/drivers/video/fbdev/geode/Kconfig
++++ b/drivers/video/fbdev/geode/Kconfig
+@@ -5,6 +5,7 @@
+ config FB_GEODE
+ bool "AMD Geode family framebuffer support"
+ depends on FB && PCI && (X86_32 || (X86 && COMPILE_TEST))
++ depends on !UML
+ help
+ Say 'Y' here to allow you to select framebuffer drivers for
+ the AMD Geode family of processors.
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index 072ce07ba9e05..4ff25dfc865d9 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -780,12 +780,18 @@ static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par,
+ static int hvfb_on_panic(struct notifier_block *nb,
+ unsigned long e, void *p)
+ {
++ struct hv_device *hdev;
+ struct hvfb_par *par;
+ struct fb_info *info;
+
+ par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
+- par->synchronous_fb = true;
+ info = par->info;
++ hdev = device_to_hv_device(info->device);
++
++ if (hv_ringbuffer_spinlock_busy(hdev->channel))
++ return NOTIFY_DONE;
++
++ par->synchronous_fb = true;
+ if (par->need_docopy)
+ hvfb_docopy(par, 0, dio_fb_size);
+ synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
+diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
+index 7da715d31a933..7a8609c40ae93 100644
+--- a/drivers/video/fbdev/pm2fb.c
++++ b/drivers/video/fbdev/pm2fb.c
+@@ -1533,8 +1533,10 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ }
+
+ info = framebuffer_alloc(sizeof(struct pm2fb_par), &pdev->dev);
+- if (!info)
+- return -ENOMEM;
++ if (!info) {
++ err = -ENOMEM;
++ goto err_exit_disable;
++ }
+ default_par = info->par;
+
+ switch (pdev->device) {
+@@ -1715,6 +1717,8 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ release_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
+ err_exit_neither:
+ framebuffer_release(info);
++ err_exit_disable:
++ pci_disable_device(pdev);
+ return retval;
+ }
+
+@@ -1739,6 +1743,7 @@ static void pm2fb_remove(struct pci_dev *pdev)
+ fb_dealloc_cmap(&info->cmap);
+ kfree(info->pixmap.addr);
+ framebuffer_release(info);
++ pci_disable_device(pdev);
+ }
+
+ static const struct pci_device_id pm2fb_id_table[] = {
+diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
+index 00d789b6c0faf..0e3cabbec4b40 100644
+--- a/drivers/video/fbdev/uvesafb.c
++++ b/drivers/video/fbdev/uvesafb.c
+@@ -1758,6 +1758,7 @@ static int uvesafb_probe(struct platform_device *dev)
+ out_unmap:
+ iounmap(info->screen_base);
+ out_mem:
++ arch_phys_wc_del(par->mtrr_handle);
+ release_mem_region(info->fix.smem_start, info->fix.smem_len);
+ out_reg:
+ release_region(0x3c0, 32);
+diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
+index 82b36dbb5b1a9..33051e3a2561e 100644
+--- a/drivers/video/fbdev/vermilion/vermilion.c
++++ b/drivers/video/fbdev/vermilion/vermilion.c
+@@ -278,8 +278,10 @@ static int vmlfb_get_gpu(struct vml_par *par)
+
+ mutex_unlock(&vml_mutex);
+
+- if (pci_enable_device(par->gpu) < 0)
++ if (pci_enable_device(par->gpu) < 0) {
++ pci_dev_put(par->gpu);
+ return -ENODEV;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
+index 2ee8fcae08dfb..b8cd04defc5e2 100644
+--- a/drivers/video/fbdev/via/via-core.c
++++ b/drivers/video/fbdev/via/via-core.c
+@@ -730,7 +730,14 @@ static int __init via_core_init(void)
+ return ret;
+ viafb_i2c_init();
+ viafb_gpio_init();
+- return pci_register_driver(&via_driver);
++ ret = pci_register_driver(&via_driver);
++ if (ret) {
++ viafb_gpio_exit();
++ viafb_i2c_exit();
++ return ret;
++ }
++
++ return 0;
+ }
+
+ static void __exit via_core_exit(void)
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 1ea6d2e5b2187..99d6062afe72f 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -800,3 +800,4 @@ MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION("1.0.0");
+ MODULE_DESCRIPTION("AMD SEV Guest Driver");
++MODULE_ALIAS("platform:sev-guest");
+diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
+index 34693f11385f6..e937b4dd28be7 100644
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -423,14 +423,18 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
+ return time_left;
+ }
+
+-static void iTCO_wdt_set_running(struct iTCO_wdt_private *p)
++/* Returns true if the watchdog was running */
++static bool iTCO_wdt_set_running(struct iTCO_wdt_private *p)
+ {
+ u16 val;
+
+- /* Bit 11: TCO Timer Halt -> 0 = The TCO timer is * enabled */
++ /* Bit 11: TCO Timer Halt -> 0 = The TCO timer is enabled */
+ val = inw(TCO1_CNT(p));
+- if (!(val & BIT(11)))
++ if (!(val & BIT(11))) {
+ set_bit(WDOG_HW_RUNNING, &p->wddev.status);
++ return true;
++ }
++ return false;
+ }
+
+ /*
+@@ -518,9 +522,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
+ return -ENODEV; /* Cannot reset NO_REBOOT bit */
+ }
+
+- /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
+- p->update_no_reboot_bit(p->no_reboot_priv, true);
+-
+ if (turn_SMI_watchdog_clear_off >= p->iTCO_version) {
+ /*
+ * Bit 13: TCO_EN -> 0
+@@ -572,7 +573,13 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
+ watchdog_set_drvdata(&p->wddev, p);
+ platform_set_drvdata(pdev, p);
+
+- iTCO_wdt_set_running(p);
++ if (!iTCO_wdt_set_running(p)) {
++ /*
++ * If the watchdog was not running set NO_REBOOT now to
++ * prevent later reboots.
++ */
++ p->update_no_reboot_bit(p->no_reboot_priv, true);
++ }
+
+ /* Check that the heartbeat value is within it's range;
+ if not reset to the default */
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
+index fae50a24630bd..1edf45ee9890d 100644
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -760,7 +760,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
+ goto out;
+ }
+
+- pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
++ pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
+ if (!pfns) {
+ rc = -ENOMEM;
+ goto out;
+diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
+index 3ac5fcf98d0d6..daaf3810cc925 100644
+--- a/fs/afs/fs_probe.c
++++ b/fs/afs/fs_probe.c
+@@ -366,12 +366,15 @@ void afs_fs_probe_dispatcher(struct work_struct *work)
+ unsigned long nowj, timer_at, poll_at;
+ bool first_pass = true, set_timer = false;
+
+- if (!net->live)
++ if (!net->live) {
++ afs_dec_servers_outstanding(net);
+ return;
++ }
+
+ _enter("");
+
+ if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) {
++ afs_dec_servers_outstanding(net);
+ _leave(" [none]");
+ return;
+ }
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 08d0c8797828c..9ce5e1f41c26f 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -434,8 +434,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ current->mm->start_stack = current->mm->start_brk + stack_size;
+ #endif
+
+- if (create_elf_fdpic_tables(bprm, current->mm,
+- &exec_params, &interp_params) < 0)
++ retval = create_elf_fdpic_tables(bprm, current->mm, &exec_params,
++ &interp_params);
++ if (retval < 0)
+ goto error;
+
+ kdebug("- start_code %lx", current->mm->start_code);
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index e1eae7ea823ae..bb202ad369d53 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -44,10 +44,10 @@ static LIST_HEAD(entries);
+ static int enabled = 1;
+
+ enum {Enabled, Magic};
+-#define MISC_FMT_PRESERVE_ARGV0 (1 << 31)
+-#define MISC_FMT_OPEN_BINARY (1 << 30)
+-#define MISC_FMT_CREDENTIALS (1 << 29)
+-#define MISC_FMT_OPEN_FILE (1 << 28)
++#define MISC_FMT_PRESERVE_ARGV0 (1UL << 31)
++#define MISC_FMT_OPEN_BINARY (1UL << 30)
++#define MISC_FMT_CREDENTIALS (1UL << 29)
++#define MISC_FMT_OPEN_FILE (1UL << 28)
+
+ typedef struct {
+ struct list_head list;
+diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
+index 83cb0378096f2..3676580c2d97e 100644
+--- a/fs/btrfs/extent-io-tree.c
++++ b/fs/btrfs/extent-io-tree.c
+@@ -572,7 +572,7 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
+ clear = 1;
+ again:
+- if (!prealloc && gfpflags_allow_blocking(mask)) {
++ if (!prealloc) {
+ /*
+ * Don't care for allocation failure here because we might end
+ * up not needing the pre-allocated extent state at all, which
+@@ -636,7 +636,8 @@ hit_next:
+
+ if (state->start < start) {
+ prealloc = alloc_extent_state_atomic(prealloc);
+- BUG_ON(!prealloc);
++ if (!prealloc)
++ goto search_again;
+ err = split_state(tree, state, prealloc, start);
+ if (err)
+ extent_io_tree_panic(tree, err);
+@@ -657,7 +658,8 @@ hit_next:
+ */
+ if (state->start <= end && state->end > end) {
+ prealloc = alloc_extent_state_atomic(prealloc);
+- BUG_ON(!prealloc);
++ if (!prealloc)
++ goto search_again;
+ err = split_state(tree, state, prealloc, end + 1);
+ if (err)
+ extent_io_tree_panic(tree, err);
+@@ -966,7 +968,7 @@ static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ else
+ ASSERT(failed_start == NULL);
+ again:
+- if (!prealloc && gfpflags_allow_blocking(mask)) {
++ if (!prealloc) {
+ /*
+ * Don't care for allocation failure here because we might end
+ * up not needing the pre-allocated extent state at all, which
+@@ -991,7 +993,8 @@ again:
+ state = tree_search_for_insert(tree, start, &p, &parent);
+ if (!state) {
+ prealloc = alloc_extent_state_atomic(prealloc);
+- BUG_ON(!prealloc);
++ if (!prealloc)
++ goto search_again;
+ prealloc->start = start;
+ prealloc->end = end;
+ insert_state_fast(tree, prealloc, p, parent, bits, changeset);
+@@ -1062,7 +1065,8 @@ hit_next:
+ }
+
+ prealloc = alloc_extent_state_atomic(prealloc);
+- BUG_ON(!prealloc);
++ if (!prealloc)
++ goto search_again;
+ err = split_state(tree, state, prealloc, start);
+ if (err)
+ extent_io_tree_panic(tree, err);
+@@ -1099,7 +1103,8 @@ hit_next:
+ this_end = last_start - 1;
+
+ prealloc = alloc_extent_state_atomic(prealloc);
+- BUG_ON(!prealloc);
++ if (!prealloc)
++ goto search_again;
+
+ /*
+ * Avoid to free 'prealloc' if it can be merged with the later
+@@ -1130,7 +1135,8 @@ hit_next:
+ }
+
+ prealloc = alloc_extent_state_atomic(prealloc);
+- BUG_ON(!prealloc);
++ if (!prealloc)
++ goto search_again;
+ err = split_state(tree, state, prealloc, end + 1);
+ if (err)
+ extent_io_tree_panic(tree, err);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index d01631d478067..ed4e1c3705d0a 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -696,7 +696,10 @@ next_slot:
+ args->start - extent_offset,
+ 0, false);
+ ret = btrfs_inc_extent_ref(trans, &ref);
+- BUG_ON(ret); /* -ENOMEM */
++ if (ret) {
++ btrfs_abort_transaction(trans, ret);
++ break;
++ }
+ }
+ key.offset = args->start;
+ }
+@@ -783,7 +786,10 @@ delete_extent_item:
+ key.offset - extent_offset, 0,
+ false);
+ ret = btrfs_free_extent(trans, &ref);
+- BUG_ON(ret); /* -ENOMEM */
++ if (ret) {
++ btrfs_abort_transaction(trans, ret);
++ break;
++ }
+ args->bytes_found += extent_end - key.offset;
+ }
+
+diff --git a/fs/char_dev.c b/fs/char_dev.c
+index ba0ded7842a77..3f667292608c0 100644
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -547,7 +547,7 @@ int cdev_device_add(struct cdev *cdev, struct device *dev)
+ }
+
+ rc = device_add(dev);
+- if (rc)
++ if (rc && dev->devt)
+ cdev_del(cdev);
+
+ return rc;
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 1420acf987f03..157d3c0e3cc76 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -13,6 +13,8 @@
+ #include <linux/in6.h>
+ #include <linux/inet.h>
+ #include <linux/slab.h>
++#include <linux/scatterlist.h>
++#include <linux/mm.h>
+ #include <linux/mempool.h>
+ #include <linux/workqueue.h>
+ #include <linux/utsname.h>
+@@ -2137,4 +2139,70 @@ static inline void move_cifs_info_to_smb2(struct smb2_file_all_info *dst, const
+ dst->FileNameLength = src->FileNameLength;
+ }
+
++static inline unsigned int cifs_get_num_sgs(const struct smb_rqst *rqst,
++ int num_rqst,
++ const u8 *sig)
++{
++ unsigned int len, skip;
++ unsigned int nents = 0;
++ unsigned long addr;
++ int i, j;
++
++ /* Assumes the first rqst has a transform header as the first iov.
++ * I.e.
++ * rqst[0].rq_iov[0] is transform header
++ * rqst[0].rq_iov[1+] data to be encrypted/decrypted
++ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
++ */
++ for (i = 0; i < num_rqst; i++) {
++ /*
++ * The first rqst has a transform header where the
++ * first 20 bytes are not part of the encrypted blob.
++ */
++ for (j = 0; j < rqst[i].rq_nvec; j++) {
++ struct kvec *iov = &rqst[i].rq_iov[j];
++
++ skip = (i == 0) && (j == 0) ? 20 : 0;
++ addr = (unsigned long)iov->iov_base + skip;
++ if (unlikely(is_vmalloc_addr((void *)addr))) {
++ len = iov->iov_len - skip;
++ nents += DIV_ROUND_UP(offset_in_page(addr) + len,
++ PAGE_SIZE);
++ } else {
++ nents++;
++ }
++ }
++ nents += rqst[i].rq_npages;
++ }
++ nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
++ return nents;
++}
++
++/* We can not use the normal sg_set_buf() as we will sometimes pass a
++ * stack object as buf.
++ */
++static inline struct scatterlist *cifs_sg_set_buf(struct scatterlist *sg,
++ const void *buf,
++ unsigned int buflen)
++{
++ unsigned long addr = (unsigned long)buf;
++ unsigned int off = offset_in_page(addr);
++
++ addr &= PAGE_MASK;
++ if (unlikely(is_vmalloc_addr((void *)addr))) {
++ do {
++ unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off);
++
++ sg_set_page(sg++, vmalloc_to_page((void *)addr), len, off);
++
++ off = 0;
++ addr += PAGE_SIZE;
++ buflen -= len;
++ } while (buflen);
++ } else {
++ sg_set_page(sg++, virt_to_page(addr), buflen, off);
++ }
++ return sg;
++}
++
+ #endif /* _CIFS_GLOB_H */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index 83e83d8beabba..eb1a0de9dd553 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -600,8 +600,8 @@ int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
+ int cifs_alloc_hash(const char *name, struct shash_desc **sdesc);
+ void cifs_free_hash(struct shash_desc **sdesc);
+
+-extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
+- unsigned int *len, unsigned int *offset);
++void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
++ unsigned int *len, unsigned int *offset);
+ struct cifs_chan *
+ cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server);
+ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses);
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 3e68d8208cf5e..1cbecd64d697f 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -1136,8 +1136,8 @@ cifs_free_hash(struct shash_desc **sdesc)
+ * @len: Where to store the length for this page:
+ * @offset: Where to store the offset for this page
+ */
+-void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
+- unsigned int *len, unsigned int *offset)
++void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
++ unsigned int *len, unsigned int *offset)
+ {
+ *len = rqst->rq_pagesz;
+ *offset = (page == 0) ? rqst->rq_offset : 0;
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index ffbd9a99fc128..ba6cc50af390f 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -122,8 +122,8 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32
+ struct smb2_hdr *hdr = err_iov.iov_base;
+
+ if (unlikely(!err_iov.iov_base || err_buftype == CIFS_NO_BUFFER))
+- rc = -ENOMEM;
+- else if (hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
++ goto out;
++ if (hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
+ rc = smb2_parse_symlink_response(oparms->cifs_sb, &err_iov,
+ &data->symlink_target);
+ if (!rc) {
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index bfaafd02fb1f2..b24e68b5ccd61 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -4204,69 +4204,82 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
+ memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
+ }
+
+-/* We can not use the normal sg_set_buf() as we will sometimes pass a
+- * stack object as buf.
+- */
+-static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
+- unsigned int buflen)
++static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
++ int num_rqst, const u8 *sig, u8 **iv,
++ struct aead_request **req, struct scatterlist **sgl,
++ unsigned int *num_sgs)
+ {
+- void *addr;
+- /*
+- * VMAP_STACK (at least) puts stack into the vmalloc address space
+- */
+- if (is_vmalloc_addr(buf))
+- addr = vmalloc_to_page(buf);
+- else
+- addr = virt_to_page(buf);
+- sg_set_page(sg, addr, buflen, offset_in_page(buf));
++ unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
++ unsigned int iv_size = crypto_aead_ivsize(tfm);
++ unsigned int len;
++ u8 *p;
++
++ *num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig);
++
++ len = iv_size;
++ len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
++ len = ALIGN(len, crypto_tfm_ctx_alignment());
++ len += req_size;
++ len = ALIGN(len, __alignof__(struct scatterlist));
++ len += *num_sgs * sizeof(**sgl);
++
++ p = kmalloc(len, GFP_ATOMIC);
++ if (!p)
++ return NULL;
++
++ *iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1);
++ *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
++ crypto_tfm_ctx_alignment());
++ *sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
++ __alignof__(struct scatterlist));
++ return p;
+ }
+
+-/* Assumes the first rqst has a transform header as the first iov.
+- * I.e.
+- * rqst[0].rq_iov[0] is transform header
+- * rqst[0].rq_iov[1+] data to be encrypted/decrypted
+- * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+- */
+-static struct scatterlist *
+-init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
++static void *smb2_get_aead_req(struct crypto_aead *tfm, const struct smb_rqst *rqst,
++ int num_rqst, const u8 *sig, u8 **iv,
++ struct aead_request **req, struct scatterlist **sgl)
+ {
+- unsigned int sg_len;
++ unsigned int off, len, skip;
+ struct scatterlist *sg;
+- unsigned int i;
+- unsigned int j;
+- unsigned int idx = 0;
+- int skip;
+-
+- sg_len = 1;
+- for (i = 0; i < num_rqst; i++)
+- sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
++ unsigned int num_sgs;
++ unsigned long addr;
++ int i, j;
++ void *p;
+
+- sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
+- if (!sg)
++ p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, sgl, &num_sgs);
++ if (!p)
+ return NULL;
+
+- sg_init_table(sg, sg_len);
++ sg_init_table(*sgl, num_sgs);
++ sg = *sgl;
++
++ /* Assumes the first rqst has a transform header as the first iov.
++ * I.e.
++ * rqst[0].rq_iov[0] is transform header
++ * rqst[0].rq_iov[1+] data to be encrypted/decrypted
++ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
++ */
+ for (i = 0; i < num_rqst; i++) {
++ /*
++ * The first rqst has a transform header where the
++ * first 20 bytes are not part of the encrypted blob.
++ */
+ for (j = 0; j < rqst[i].rq_nvec; j++) {
+- /*
+- * The first rqst has a transform header where the
+- * first 20 bytes are not part of the encrypted blob
+- */
+- skip = (i == 0) && (j == 0) ? 20 : 0;
+- smb2_sg_set_buf(&sg[idx++],
+- rqst[i].rq_iov[j].iov_base + skip,
+- rqst[i].rq_iov[j].iov_len - skip);
+- }
++ struct kvec *iov = &rqst[i].rq_iov[j];
+
++ skip = (i == 0) && (j == 0) ? 20 : 0;
++ addr = (unsigned long)iov->iov_base + skip;
++ len = iov->iov_len - skip;
++ sg = cifs_sg_set_buf(sg, (void *)addr, len);
++ }
+ for (j = 0; j < rqst[i].rq_npages; j++) {
+- unsigned int len, offset;
+-
+- rqst_page_get_length(&rqst[i], j, &len, &offset);
+- sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
++ rqst_page_get_length(&rqst[i], j, &len, &off);
++ sg_set_page(sg++, rqst[i].rq_pages[j], len, off);
+ }
+ }
+- smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
+- return sg;
++ cifs_sg_set_buf(sg, sig, SMB2_SIGNATURE_SIZE);
++
++ return p;
+ }
+
+ static int
+@@ -4314,11 +4327,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ u8 sign[SMB2_SIGNATURE_SIZE] = {};
+ u8 key[SMB3_ENC_DEC_KEY_SIZE];
+ struct aead_request *req;
+- char *iv;
+- unsigned int iv_len;
++ u8 *iv;
+ DECLARE_CRYPTO_WAIT(wait);
+ struct crypto_aead *tfm;
+ unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
++ void *creq;
+
+ rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
+ if (rc) {
+@@ -4352,32 +4365,15 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ return rc;
+ }
+
+- req = aead_request_alloc(tfm, GFP_KERNEL);
+- if (!req) {
+- cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
++ creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
++ if (unlikely(!creq))
+ return -ENOMEM;
+- }
+
+ if (!enc) {
+ memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
+ crypt_len += SMB2_SIGNATURE_SIZE;
+ }
+
+- sg = init_sg(num_rqst, rqst, sign);
+- if (!sg) {
+- cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
+- rc = -ENOMEM;
+- goto free_req;
+- }
+-
+- iv_len = crypto_aead_ivsize(tfm);
+- iv = kzalloc(iv_len, GFP_KERNEL);
+- if (!iv) {
+- cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
+- rc = -ENOMEM;
+- goto free_sg;
+- }
+-
+ if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
+ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+ memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
+@@ -4386,6 +4382,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
+ }
+
++ aead_request_set_tfm(req, tfm);
+ aead_request_set_crypt(req, sg, sg, crypt_len, iv);
+ aead_request_set_ad(req, assoc_data_len);
+
+@@ -4398,11 +4395,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ if (!rc && enc)
+ memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+
+- kfree_sensitive(iv);
+-free_sg:
+- kfree_sensitive(sg);
+-free_req:
+- kfree_sensitive(req);
++ kfree_sensitive(creq);
+ return rc;
+ }
+
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index d1f9d26322027..ec6519e1ca3bf 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -316,6 +316,7 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
+ return 0;
+
+ out_remove:
++ configfs_put(dentry->d_fsdata);
+ configfs_remove_dirent(dentry);
+ return PTR_ERR(inode);
+ }
+@@ -382,6 +383,7 @@ int configfs_create_link(struct configfs_dirent *target, struct dentry *parent,
+ return 0;
+
+ out_remove:
++ configfs_put(dentry->d_fsdata);
+ configfs_remove_dirent(dentry);
+ return PTR_ERR(inode);
+ }
+diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
+index ddb3fc258df94..b54f470e0d031 100644
+--- a/fs/debugfs/file.c
++++ b/fs/debugfs/file.c
+@@ -378,8 +378,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+ }
+ EXPORT_SYMBOL_GPL(debugfs_attr_read);
+
+-ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+- size_t len, loff_t *ppos)
++static ssize_t debugfs_attr_write_xsigned(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos, bool is_signed)
+ {
+ struct dentry *dentry = F_DENTRY(file);
+ ssize_t ret;
+@@ -387,12 +387,28 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+ ret = debugfs_file_get(dentry);
+ if (unlikely(ret))
+ return ret;
+- ret = simple_attr_write(file, buf, len, ppos);
++ if (is_signed)
++ ret = simple_attr_write_signed(file, buf, len, ppos);
++ else
++ ret = simple_attr_write(file, buf, len, ppos);
+ debugfs_file_put(dentry);
+ return ret;
+ }
++
++ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ return debugfs_attr_write_xsigned(file, buf, len, ppos, false);
++}
+ EXPORT_SYMBOL_GPL(debugfs_attr_write);
+
++ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ return debugfs_attr_write_xsigned(file, buf, len, ppos, true);
++}
++EXPORT_SYMBOL_GPL(debugfs_attr_write_signed);
++
+ static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode,
+ struct dentry *parent, void *value,
+ const struct file_operations *fops,
+@@ -738,11 +754,11 @@ static int debugfs_atomic_t_get(void *data, u64 *val)
+ *val = atomic_read((atomic_t *)data);
+ return 0;
+ }
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t, debugfs_atomic_t_get,
+ debugfs_atomic_t_set, "%lld\n");
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
+ "%lld\n");
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
+ "%lld\n");
+
+ /**
+diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
+index af5ed6b9c54dd..6a792a513d6b8 100644
+--- a/fs/erofs/fscache.c
++++ b/fs/erofs/fscache.c
+@@ -494,7 +494,8 @@ static int erofs_fscache_register_domain(struct super_block *sb)
+
+ static
+ struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
+- char *name, bool need_inode)
++ char *name,
++ unsigned int flags)
+ {
+ struct fscache_volume *volume = EROFS_SB(sb)->volume;
+ struct erofs_fscache *ctx;
+@@ -516,7 +517,7 @@ struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
+ fscache_use_cookie(cookie, false);
+ ctx->cookie = cookie;
+
+- if (need_inode) {
++ if (flags & EROFS_REG_COOKIE_NEED_INODE) {
+ struct inode *const inode = new_inode(sb);
+
+ if (!inode) {
+@@ -554,14 +555,15 @@ static void erofs_fscache_relinquish_cookie(struct erofs_fscache *ctx)
+
+ static
+ struct erofs_fscache *erofs_fscache_domain_init_cookie(struct super_block *sb,
+- char *name, bool need_inode)
++ char *name,
++ unsigned int flags)
+ {
+ int err;
+ struct inode *inode;
+ struct erofs_fscache *ctx;
+ struct erofs_domain *domain = EROFS_SB(sb)->domain;
+
+- ctx = erofs_fscache_acquire_cookie(sb, name, need_inode);
++ ctx = erofs_fscache_acquire_cookie(sb, name, flags);
+ if (IS_ERR(ctx))
+ return ctx;
+
+@@ -589,7 +591,8 @@ out:
+
+ static
+ struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
+- char *name, bool need_inode)
++ char *name,
++ unsigned int flags)
+ {
+ struct inode *inode;
+ struct erofs_fscache *ctx;
+@@ -602,23 +605,30 @@ struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
+ ctx = inode->i_private;
+ if (!ctx || ctx->domain != domain || strcmp(ctx->name, name))
+ continue;
+- igrab(inode);
++ if (!(flags & EROFS_REG_COOKIE_NEED_NOEXIST)) {
++ igrab(inode);
++ } else {
++ erofs_err(sb, "%s already exists in domain %s", name,
++ domain->domain_id);
++ ctx = ERR_PTR(-EEXIST);
++ }
+ spin_unlock(&psb->s_inode_list_lock);
+ mutex_unlock(&erofs_domain_cookies_lock);
+ return ctx;
+ }
+ spin_unlock(&psb->s_inode_list_lock);
+- ctx = erofs_fscache_domain_init_cookie(sb, name, need_inode);
++ ctx = erofs_fscache_domain_init_cookie(sb, name, flags);
+ mutex_unlock(&erofs_domain_cookies_lock);
+ return ctx;
+ }
+
+ struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
+- char *name, bool need_inode)
++ char *name,
++ unsigned int flags)
+ {
+ if (EROFS_SB(sb)->domain_id)
+- return erofs_domain_register_cookie(sb, name, need_inode);
+- return erofs_fscache_acquire_cookie(sb, name, need_inode);
++ return erofs_domain_register_cookie(sb, name, flags);
++ return erofs_fscache_acquire_cookie(sb, name, flags);
+ }
+
+ void erofs_fscache_unregister_cookie(struct erofs_fscache *ctx)
+@@ -647,6 +657,7 @@ int erofs_fscache_register_fs(struct super_block *sb)
+ int ret;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_fscache *fscache;
++ unsigned int flags;
+
+ if (sbi->domain_id)
+ ret = erofs_fscache_register_domain(sb);
+@@ -655,8 +666,20 @@ int erofs_fscache_register_fs(struct super_block *sb)
+ if (ret)
+ return ret;
+
+- /* acquired domain/volume will be relinquished in kill_sb() on error */
+- fscache = erofs_fscache_register_cookie(sb, sbi->fsid, true);
++ /*
++ * When shared domain is enabled, using NEED_NOEXIST to guarantee
++ * the primary data blob (aka fsid) is unique in the shared domain.
++ *
++ * For non-shared-domain case, fscache_acquire_volume() invoked by
++ * erofs_fscache_register_volume() has already guaranteed
++ * the uniqueness of primary data blob.
++ *
++ * Acquired domain/volume will be relinquished in kill_sb() on error.
++ */
++ flags = EROFS_REG_COOKIE_NEED_INODE;
++ if (sbi->domain_id)
++ flags |= EROFS_REG_COOKIE_NEED_NOEXIST;
++ fscache = erofs_fscache_register_cookie(sb, sbi->fsid, flags);
+ if (IS_ERR(fscache))
+ return PTR_ERR(fscache);
+
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 05dc686277220..e51f27b6bde15 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -604,13 +604,18 @@ static inline int z_erofs_load_lzma_config(struct super_block *sb,
+ }
+ #endif /* !CONFIG_EROFS_FS_ZIP */
+
++/* flags for erofs_fscache_register_cookie() */
++#define EROFS_REG_COOKIE_NEED_INODE 1
++#define EROFS_REG_COOKIE_NEED_NOEXIST 2
++
+ /* fscache.c */
+ #ifdef CONFIG_EROFS_FS_ONDEMAND
+ int erofs_fscache_register_fs(struct super_block *sb);
+ void erofs_fscache_unregister_fs(struct super_block *sb);
+
+ struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
+- char *name, bool need_inode);
++ char *name,
++ unsigned int flags);
+ void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache);
+
+ extern const struct address_space_operations erofs_fscache_access_aops;
+@@ -623,7 +628,8 @@ static inline void erofs_fscache_unregister_fs(struct super_block *sb) {}
+
+ static inline
+ struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
+- char *name, bool need_inode)
++ char *name,
++ unsigned int flags)
+ {
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 1c7dcca702b3e..481788c24a68b 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -245,7 +245,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
+ }
+
+ if (erofs_is_fscache_mode(sb)) {
+- fscache = erofs_fscache_register_cookie(sb, dif->path, false);
++ fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
+ if (IS_ERR(fscache))
+ return PTR_ERR(fscache);
+ dif->fscache = fscache;
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index b792d424d774c..cf4871834ebb2 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -488,7 +488,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ struct erofs_workgroup *grp;
+ int err;
+
+- if (!(map->m_flags & EROFS_MAP_ENCODED)) {
++ if (!(map->m_flags & EROFS_MAP_ENCODED) ||
++ (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index 0bb66927e3d06..e6d5d7a18fb06 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -694,10 +694,15 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ map->m_pa = blknr_to_addr(m.pblk);
+ err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
+ if (err)
+- goto out;
++ goto unmap_out;
+ }
+
+ if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN) {
++ if (map->m_llen > map->m_plen) {
++ DBG_BUGON(1);
++ err = -EFSCORRUPTED;
++ goto unmap_out;
++ }
+ if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
+ map->m_algorithmformat =
+ Z_EROFS_COMPRESSION_INTERLACED;
+@@ -718,14 +723,12 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ if (!err)
+ map->m_flags |= EROFS_MAP_FULL_MAPPED;
+ }
++
+ unmap_out:
+ erofs_unmap_metabuf(&m.map->buf);
+-
+-out:
+ erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
+ __func__, map->m_la, map->m_pa,
+ map->m_llen, map->m_plen, map->m_flags);
+-
+ return err;
+ }
+
+diff --git a/fs/eventfd.c b/fs/eventfd.c
+index c0ffee99ad238..249ca6c0b7843 100644
+--- a/fs/eventfd.c
++++ b/fs/eventfd.c
+@@ -43,21 +43,7 @@ struct eventfd_ctx {
+ int id;
+ };
+
+-/**
+- * eventfd_signal - Adds @n to the eventfd counter.
+- * @ctx: [in] Pointer to the eventfd context.
+- * @n: [in] Value of the counter to be added to the eventfd internal counter.
+- * The value cannot be negative.
+- *
+- * This function is supposed to be called by the kernel in paths that do not
+- * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
+- * value, and we signal this as overflow condition by returning a EPOLLERR
+- * to poll(2).
+- *
+- * Returns the amount by which the counter was incremented. This will be less
+- * than @n if the counter has overflowed.
+- */
+-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
++__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask)
+ {
+ unsigned long flags;
+
+@@ -78,12 +64,31 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+ n = ULLONG_MAX - ctx->count;
+ ctx->count += n;
+ if (waitqueue_active(&ctx->wqh))
+- wake_up_locked_poll(&ctx->wqh, EPOLLIN);
++ wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
+ current->in_eventfd = 0;
+ spin_unlock_irqrestore(&ctx->wqh.lock, flags);
+
+ return n;
+ }
++
++/**
++ * eventfd_signal - Adds @n to the eventfd counter.
++ * @ctx: [in] Pointer to the eventfd context.
++ * @n: [in] Value of the counter to be added to the eventfd internal counter.
++ * The value cannot be negative.
++ *
++ * This function is supposed to be called by the kernel in paths that do not
++ * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
++ * value, and we signal this as overflow condition by returning a EPOLLERR
++ * to poll(2).
++ *
++ * Returns the amount by which the counter was incremented. This will be less
++ * than @n if the counter has overflowed.
++ */
++__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
++{
++ return eventfd_signal_mask(ctx, n, 0);
++}
+ EXPORT_SYMBOL_GPL(eventfd_signal);
+
+ static void eventfd_free_ctx(struct eventfd_ctx *ctx)
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 52954d4637b54..64659b1109733 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -491,7 +491,8 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
+ */
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+-static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
++static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
++ unsigned pollflags)
+ {
+ struct eventpoll *ep_src;
+ unsigned long flags;
+@@ -522,16 +523,17 @@ static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
+ }
+ spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
+ ep->nests = nests + 1;
+- wake_up_locked_poll(&ep->poll_wait, EPOLLIN);
++ wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags);
+ ep->nests = 0;
+ spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
+ }
+
+ #else
+
+-static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
++static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
++ unsigned pollflags)
+ {
+- wake_up_poll(&ep->poll_wait, EPOLLIN);
++ wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags);
+ }
+
+ #endif
+@@ -742,7 +744,7 @@ static void ep_free(struct eventpoll *ep)
+
+ /* We need to release all tasks waiting for these file */
+ if (waitqueue_active(&ep->poll_wait))
+- ep_poll_safewake(ep, NULL);
++ ep_poll_safewake(ep, NULL, 0);
+
+ /*
+ * We need to lock this because we could be hit by
+@@ -1208,7 +1210,7 @@ out_unlock:
+
+ /* We have to call this outside the lock */
+ if (pwake)
+- ep_poll_safewake(ep, epi);
++ ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
+
+ if (!(epi->event.events & EPOLLEXCLUSIVE))
+ ewake = 1;
+@@ -1553,7 +1555,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
+
+ /* We have to call this outside the lock */
+ if (pwake)
+- ep_poll_safewake(ep, NULL);
++ ep_poll_safewake(ep, NULL, 0);
+
+ return 0;
+ }
+@@ -1629,7 +1631,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
+
+ /* We have to call this outside the lock */
+ if (pwake)
+- ep_poll_safewake(ep, NULL);
++ ep_poll_safewake(ep, NULL, 0);
+
+ return 0;
+ }
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index d315c2de136f2..74d3f2d2271f3 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -346,7 +346,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
+ if (!level)
+ level = F2FS_ZSTD_DEFAULT_CLEVEL;
+
+- params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen);
++ params = zstd_get_params(level, cc->rlen);
+ workspace_size = zstd_cstream_workspace_bound(&params.cParams);
+
+ workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index e6355a5683b75..8b9f0b3c77232 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -2974,7 +2974,7 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
+ /* Flags that should be inherited by new inodes from their parent. */
+ #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
+ F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
+- F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
++ F2FS_CASEFOLD_FL)
+
+ /* Flags that are appropriate for regular files (all but dir-specific ones). */
+ #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 82cda12582272..f96bbfa8b3991 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1915,6 +1915,10 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
+ if (!f2fs_disable_compressed_file(inode))
+ return -EINVAL;
+ } else {
++ /* try to convert inline_data to support compression */
++ int err = f2fs_convert_inline_inode(inode);
++ if (err)
++ return err;
+ if (!f2fs_may_compress(inode))
+ return -EINVAL;
+ if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 4546e01b2ee08..b3184d8b1ce89 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -96,16 +96,6 @@ static int gc_thread_func(void *data)
+ * invalidated soon after by user update or deletion.
+ * So, I'd like to wait some time to collect dirty segments.
+ */
+- if (sbi->gc_mode == GC_URGENT_HIGH) {
+- spin_lock(&sbi->gc_urgent_high_lock);
+- if (sbi->gc_urgent_high_remaining) {
+- sbi->gc_urgent_high_remaining--;
+- if (!sbi->gc_urgent_high_remaining)
+- sbi->gc_mode = GC_NORMAL;
+- }
+- spin_unlock(&sbi->gc_urgent_high_lock);
+- }
+-
+ if (sbi->gc_mode == GC_URGENT_HIGH ||
+ sbi->gc_mode == GC_URGENT_MID) {
+ wait_ms = gc_th->urgent_sleep_time;
+@@ -162,6 +152,15 @@ do_gc:
+ /* balancing f2fs's metadata periodically */
+ f2fs_balance_fs_bg(sbi, true);
+ next:
++ if (sbi->gc_mode == GC_URGENT_HIGH) {
++ spin_lock(&sbi->gc_urgent_high_lock);
++ if (sbi->gc_urgent_high_remaining) {
++ sbi->gc_urgent_high_remaining--;
++ if (!sbi->gc_urgent_high_remaining)
++ sbi->gc_mode = GC_NORMAL;
++ }
++ spin_unlock(&sbi->gc_urgent_high_lock);
++ }
+ sb_end_write(sbi->sb);
+
+ } while (!kthread_should_stop());
+@@ -1110,6 +1109,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ if (ofs_in_node >= max_addrs) {
+ f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u",
+ ofs_in_node, dni->ino, dni->nid, max_addrs);
++ f2fs_put_page(node_page, 1);
+ return false;
+ }
+
+@@ -1744,8 +1744,9 @@ freed:
+ get_valid_blocks(sbi, segno, false) == 0)
+ seg_freed++;
+
+- if (__is_large_section(sbi) && segno + 1 < end_segno)
+- sbi->next_victim_seg[gc_type] = segno + 1;
++ if (__is_large_section(sbi))
++ sbi->next_victim_seg[gc_type] =
++ (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
+ skip:
+ f2fs_put_page(sum_page, 0);
+ }
+@@ -2133,8 +2134,6 @@ out_unlock:
+ if (err)
+ return err;
+
+- set_sbi_flag(sbi, SBI_IS_RESIZEFS);
+-
+ freeze_super(sbi->sb);
+ f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->cp_global_sem);
+@@ -2150,6 +2149,7 @@ out_unlock:
+ if (err)
+ goto out_err;
+
++ set_sbi_flag(sbi, SBI_IS_RESIZEFS);
+ err = free_segment_range(sbi, secs, false);
+ if (err)
+ goto recover_out;
+@@ -2173,6 +2173,7 @@ out_unlock:
+ f2fs_commit_super(sbi, false);
+ }
+ recover_out:
++ clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
+@@ -2185,6 +2186,5 @@ out_err:
+ f2fs_up_write(&sbi->cp_global_sem);
+ f2fs_up_write(&sbi->gc_lock);
+ thaw_super(sbi->sb);
+- clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+ return err;
+ }
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index a389772fd212a..b6c14c9c33a08 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -22,8 +22,163 @@
+ #include "acl.h"
+ #include <trace/events/f2fs.h>
+
++static inline int is_extension_exist(const unsigned char *s, const char *sub,
++ bool tmp_ext)
++{
++ size_t slen = strlen(s);
++ size_t sublen = strlen(sub);
++ int i;
++
++ if (sublen == 1 && *sub == '*')
++ return 1;
++
++ /*
++ * filename format of multimedia file should be defined as:
++ * "filename + '.' + extension + (optional: '.' + temp extension)".
++ */
++ if (slen < sublen + 2)
++ return 0;
++
++ if (!tmp_ext) {
++ /* file has no temp extension */
++ if (s[slen - sublen - 1] != '.')
++ return 0;
++ return !strncasecmp(s + slen - sublen, sub, sublen);
++ }
++
++ for (i = 1; i < slen - sublen; i++) {
++ if (s[i] != '.')
++ continue;
++ if (!strncasecmp(s + i + 1, sub, sublen))
++ return 1;
++ }
++
++ return 0;
++}
++
++int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
++ bool hot, bool set)
++{
++ __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
++ int cold_count = le32_to_cpu(sbi->raw_super->extension_count);
++ int hot_count = sbi->raw_super->hot_ext_count;
++ int total_count = cold_count + hot_count;
++ int start, count;
++ int i;
++
++ if (set) {
++ if (total_count == F2FS_MAX_EXTENSION)
++ return -EINVAL;
++ } else {
++ if (!hot && !cold_count)
++ return -EINVAL;
++ if (hot && !hot_count)
++ return -EINVAL;
++ }
++
++ if (hot) {
++ start = cold_count;
++ count = total_count;
++ } else {
++ start = 0;
++ count = cold_count;
++ }
++
++ for (i = start; i < count; i++) {
++ if (strcmp(name, extlist[i]))
++ continue;
++
++ if (set)
++ return -EINVAL;
++
++ memcpy(extlist[i], extlist[i + 1],
++ F2FS_EXTENSION_LEN * (total_count - i - 1));
++ memset(extlist[total_count - 1], 0, F2FS_EXTENSION_LEN);
++ if (hot)
++ sbi->raw_super->hot_ext_count = hot_count - 1;
++ else
++ sbi->raw_super->extension_count =
++ cpu_to_le32(cold_count - 1);
++ return 0;
++ }
++
++ if (!set)
++ return -EINVAL;
++
++ if (hot) {
++ memcpy(extlist[count], name, strlen(name));
++ sbi->raw_super->hot_ext_count = hot_count + 1;
++ } else {
++ char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
++
++ memcpy(buf, &extlist[cold_count],
++ F2FS_EXTENSION_LEN * hot_count);
++ memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
++ memcpy(extlist[cold_count], name, strlen(name));
++ memcpy(&extlist[cold_count + 1], buf,
++ F2FS_EXTENSION_LEN * hot_count);
++ sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
++ }
++ return 0;
++}
++
++static void set_compress_new_inode(struct f2fs_sb_info *sbi, struct inode *dir,
++ struct inode *inode, const unsigned char *name)
++{
++ __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
++ unsigned char (*noext)[F2FS_EXTENSION_LEN] =
++ F2FS_OPTION(sbi).noextensions;
++ unsigned char (*ext)[F2FS_EXTENSION_LEN] = F2FS_OPTION(sbi).extensions;
++ unsigned char ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
++ unsigned char noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
++ int i, cold_count, hot_count;
++
++ if (!f2fs_sb_has_compression(sbi))
++ return;
++
++ if (S_ISDIR(inode->i_mode))
++ goto inherit_comp;
++
++ /* This name comes only from normal files. */
++ if (!name)
++ return;
++
++ /* Don't compress hot files. */
++ f2fs_down_read(&sbi->sb_lock);
++ cold_count = le32_to_cpu(sbi->raw_super->extension_count);
++ hot_count = sbi->raw_super->hot_ext_count;
++ for (i = cold_count; i < cold_count + hot_count; i++)
++ if (is_extension_exist(name, extlist[i], false))
++ break;
++ f2fs_up_read(&sbi->sb_lock);
++ if (i < (cold_count + hot_count))
++ return;
++
++ /* Don't compress unallowed extension. */
++ for (i = 0; i < noext_cnt; i++)
++ if (is_extension_exist(name, noext[i], false))
++ return;
++
++ /* Compress wanting extension. */
++ for (i = 0; i < ext_cnt; i++) {
++ if (is_extension_exist(name, ext[i], false)) {
++ set_compress_context(inode);
++ return;
++ }
++ }
++inherit_comp:
++ /* Inherit the {no-}compression flag in directory */
++ if (F2FS_I(dir)->i_flags & F2FS_NOCOMP_FL) {
++ F2FS_I(inode)->i_flags |= F2FS_NOCOMP_FL;
++ f2fs_mark_inode_dirty_sync(inode, true);
++ } else if (F2FS_I(dir)->i_flags & F2FS_COMPR_FL) {
++ set_compress_context(inode);
++ }
++}
++
+ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
+- struct inode *dir, umode_t mode)
++ struct inode *dir, umode_t mode,
++ const char *name)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ nid_t ino;
+@@ -114,12 +269,8 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
+ if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL)
+ set_inode_flag(inode, FI_PROJ_INHERIT);
+
+- if (f2fs_sb_has_compression(sbi)) {
+- /* Inherit the compression flag in directory */
+- if ((F2FS_I(dir)->i_flags & F2FS_COMPR_FL) &&
+- f2fs_may_compress(inode))
+- set_compress_context(inode);
+- }
++ /* Check compression first. */
++ set_compress_new_inode(sbi, dir, inode, name);
+
+ /* Should enable inline_data after compression set */
+ if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
+@@ -153,40 +304,6 @@ fail_drop:
+ return ERR_PTR(err);
+ }
+
+-static inline int is_extension_exist(const unsigned char *s, const char *sub,
+- bool tmp_ext)
+-{
+- size_t slen = strlen(s);
+- size_t sublen = strlen(sub);
+- int i;
+-
+- if (sublen == 1 && *sub == '*')
+- return 1;
+-
+- /*
+- * filename format of multimedia file should be defined as:
+- * "filename + '.' + extension + (optional: '.' + temp extension)".
+- */
+- if (slen < sublen + 2)
+- return 0;
+-
+- if (!tmp_ext) {
+- /* file has no temp extension */
+- if (s[slen - sublen - 1] != '.')
+- return 0;
+- return !strncasecmp(s + slen - sublen, sub, sublen);
+- }
+-
+- for (i = 1; i < slen - sublen; i++) {
+- if (s[i] != '.')
+- continue;
+- if (!strncasecmp(s + i + 1, sub, sublen))
+- return 1;
+- }
+-
+- return 0;
+-}
+-
+ /*
+ * Set file's temperature for hot/cold data separation
+ */
+@@ -217,124 +334,6 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
+ file_set_hot(inode);
+ }
+
+-int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
+- bool hot, bool set)
+-{
+- __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+- int cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+- int hot_count = sbi->raw_super->hot_ext_count;
+- int total_count = cold_count + hot_count;
+- int start, count;
+- int i;
+-
+- if (set) {
+- if (total_count == F2FS_MAX_EXTENSION)
+- return -EINVAL;
+- } else {
+- if (!hot && !cold_count)
+- return -EINVAL;
+- if (hot && !hot_count)
+- return -EINVAL;
+- }
+-
+- if (hot) {
+- start = cold_count;
+- count = total_count;
+- } else {
+- start = 0;
+- count = cold_count;
+- }
+-
+- for (i = start; i < count; i++) {
+- if (strcmp(name, extlist[i]))
+- continue;
+-
+- if (set)
+- return -EINVAL;
+-
+- memcpy(extlist[i], extlist[i + 1],
+- F2FS_EXTENSION_LEN * (total_count - i - 1));
+- memset(extlist[total_count - 1], 0, F2FS_EXTENSION_LEN);
+- if (hot)
+- sbi->raw_super->hot_ext_count = hot_count - 1;
+- else
+- sbi->raw_super->extension_count =
+- cpu_to_le32(cold_count - 1);
+- return 0;
+- }
+-
+- if (!set)
+- return -EINVAL;
+-
+- if (hot) {
+- memcpy(extlist[count], name, strlen(name));
+- sbi->raw_super->hot_ext_count = hot_count + 1;
+- } else {
+- char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
+-
+- memcpy(buf, &extlist[cold_count],
+- F2FS_EXTENSION_LEN * hot_count);
+- memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
+- memcpy(extlist[cold_count], name, strlen(name));
+- memcpy(&extlist[cold_count + 1], buf,
+- F2FS_EXTENSION_LEN * hot_count);
+- sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
+- }
+- return 0;
+-}
+-
+-static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
+- const unsigned char *name)
+-{
+- __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+- unsigned char (*noext)[F2FS_EXTENSION_LEN] = F2FS_OPTION(sbi).noextensions;
+- unsigned char (*ext)[F2FS_EXTENSION_LEN] = F2FS_OPTION(sbi).extensions;
+- unsigned char ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+- unsigned char noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+- int i, cold_count, hot_count;
+-
+- if (!f2fs_sb_has_compression(sbi) ||
+- F2FS_I(inode)->i_flags & F2FS_NOCOMP_FL ||
+- !f2fs_may_compress(inode) ||
+- (!ext_cnt && !noext_cnt))
+- return;
+-
+- f2fs_down_read(&sbi->sb_lock);
+-
+- cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+- hot_count = sbi->raw_super->hot_ext_count;
+-
+- for (i = cold_count; i < cold_count + hot_count; i++) {
+- if (is_extension_exist(name, extlist[i], false)) {
+- f2fs_up_read(&sbi->sb_lock);
+- return;
+- }
+- }
+-
+- f2fs_up_read(&sbi->sb_lock);
+-
+- for (i = 0; i < noext_cnt; i++) {
+- if (is_extension_exist(name, noext[i], false)) {
+- f2fs_disable_compressed_file(inode);
+- return;
+- }
+- }
+-
+- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
+- return;
+-
+- for (i = 0; i < ext_cnt; i++) {
+- if (!is_extension_exist(name, ext[i], false))
+- continue;
+-
+- /* Do not use inline_data with compression */
+- stat_dec_inline_inode(inode);
+- clear_inode_flag(inode, FI_INLINE_DATA);
+- set_compress_context(inode);
+- return;
+- }
+-}
+-
+ static int f2fs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
+ {
+@@ -352,15 +351,13 @@ static int f2fs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ if (err)
+ return err;
+
+- inode = f2fs_new_inode(mnt_userns, dir, mode);
++ inode = f2fs_new_inode(mnt_userns, dir, mode, dentry->d_name.name);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ if (!test_opt(sbi, DISABLE_EXT_IDENTIFY))
+ set_file_temperature(sbi, inode, dentry->d_name.name);
+
+- set_compress_inode(sbi, inode, dentry->d_name.name);
+-
+ inode->i_op = &f2fs_file_inode_operations;
+ inode->i_fop = &f2fs_file_operations;
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+@@ -689,7 +686,7 @@ static int f2fs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ if (err)
+ return err;
+
+- inode = f2fs_new_inode(mnt_userns, dir, S_IFLNK | S_IRWXUGO);
++ inode = f2fs_new_inode(mnt_userns, dir, S_IFLNK | S_IRWXUGO, NULL);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+@@ -760,7 +757,7 @@ static int f2fs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ if (err)
+ return err;
+
+- inode = f2fs_new_inode(mnt_userns, dir, S_IFDIR | mode);
++ inode = f2fs_new_inode(mnt_userns, dir, S_IFDIR | mode, NULL);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+@@ -817,7 +814,7 @@ static int f2fs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ if (err)
+ return err;
+
+- inode = f2fs_new_inode(mnt_userns, dir, mode);
++ inode = f2fs_new_inode(mnt_userns, dir, mode, NULL);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+@@ -856,7 +853,7 @@ static int __f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ if (err)
+ return err;
+
+- inode = f2fs_new_inode(mnt_userns, dir, mode);
++ inode = f2fs_new_inode(mnt_userns, dir, mode, NULL);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 983572f238969..b9ee5a1176a07 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1360,8 +1360,7 @@ static int read_node_page(struct page *page, blk_opf_t op_flags)
+ return err;
+
+ /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
+- if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR) ||
+- is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
++ if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
+ ClearPageUptodate(page);
+ return -ENOENT;
+ }
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index acf3d3fa43635..c1d0713666ee5 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -1170,7 +1170,7 @@ submit:
+
+ atomic_inc(&dcc->issued_discard);
+
+- f2fs_update_iostat(sbi, NULL, FS_DISCARD, 1);
++ f2fs_update_iostat(sbi, NULL, FS_DISCARD, len * F2FS_BLKSIZE);
+
+ lstart += len;
+ start += len;
+@@ -1448,7 +1448,7 @@ retry:
+ if (i + 1 < dpolicy->granularity)
+ break;
+
+- if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
++ if (i + 1 < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
+ return __issue_discard_cmd_orderly(sbi, dpolicy);
+
+ pend_list = &dcc->pend_list[i];
+@@ -2025,8 +2025,10 @@ int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
+
+ dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
+ "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
+- if (IS_ERR(dcc->f2fs_issue_discard))
++ if (IS_ERR(dcc->f2fs_issue_discard)) {
+ err = PTR_ERR(dcc->f2fs_issue_discard);
++ dcc->f2fs_issue_discard = NULL;
++ }
+
+ return err;
+ }
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 3834ead046200..67d51f5276061 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -4188,6 +4188,9 @@ try_onemore:
+ if (err)
+ goto free_bio_info;
+
++ spin_lock_init(&sbi->error_lock);
++ memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
++
+ init_f2fs_rwsem(&sbi->cp_rwsem);
+ init_f2fs_rwsem(&sbi->quota_sem);
+ init_waitqueue_head(&sbi->cp_wait);
+@@ -4255,9 +4258,6 @@ try_onemore:
+ goto free_devices;
+ }
+
+- spin_lock_init(&sbi->error_lock);
+- memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
+-
+ sbi->total_valid_node_count =
+ le32_to_cpu(sbi->ckpt->valid_node_count);
+ percpu_counter_set(&sbi->total_valid_inode_count,
+@@ -4523,9 +4523,9 @@ free_nm:
+ f2fs_destroy_node_manager(sbi);
+ free_sm:
+ f2fs_destroy_segment_manager(sbi);
+- f2fs_destroy_post_read_wq(sbi);
+ stop_ckpt_thread:
+ f2fs_stop_ckpt_thread(sbi);
++ f2fs_destroy_post_read_wq(sbi);
+ free_devices:
+ destroy_device_list(sbi);
+ kvfree(sbi->ckpt);
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index df335c258eb08..235a0948f6cc6 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1039,6 +1039,7 @@ static void delete_work_func(struct work_struct *work)
+ if (gfs2_queue_delete_work(gl, 5 * HZ))
+ return;
+ }
++ goto out;
+ }
+
+ inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
+@@ -1051,6 +1052,7 @@ static void delete_work_func(struct work_struct *work)
+ d_prune_aliases(inode);
+ iput(inode);
+ }
++out:
+ gfs2_glock_put(gl);
+ }
+
+diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
+index c4526f16355d5..a0746be3c1de7 100644
+--- a/fs/hfs/inode.c
++++ b/fs/hfs/inode.c
+@@ -458,6 +458,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ /* panic? */
+ return -EIO;
+
++ if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN)
++ return -EIO;
+ fd.search_key->cat = HFS_I(main_inode)->cat_key;
+ if (hfs_brec_find(&fd))
+ /* panic? */
+diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
+index 39f5e343bf4d4..fdb0edb8a607d 100644
+--- a/fs/hfs/trans.c
++++ b/fs/hfs/trans.c
+@@ -109,7 +109,7 @@ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr
+ if (nls_io) {
+ wchar_t ch;
+
+- while (srclen > 0) {
++ while (srclen > 0 && dstlen > 0) {
+ size = nls_io->char2uni(src, srclen, &ch);
+ if (size < 0) {
+ ch = '?';
+diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
+index a5db2e3b29801..6aa919e594834 100644
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -198,6 +198,8 @@ struct hfsplus_sb_info {
+ #define HFSPLUS_SB_HFSX 3
+ #define HFSPLUS_SB_CASEFOLD 4
+ #define HFSPLUS_SB_NOBARRIER 5
++#define HFSPLUS_SB_UID 6
++#define HFSPLUS_SB_GID 7
+
+ static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
+ {
+diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
+index aeab83ed1c9c6..b675581aa9d0f 100644
+--- a/fs/hfsplus/inode.c
++++ b/fs/hfsplus/inode.c
+@@ -192,11 +192,11 @@ static void hfsplus_get_perms(struct inode *inode,
+ mode = be16_to_cpu(perms->mode);
+
+ i_uid_write(inode, be32_to_cpu(perms->owner));
+- if (!i_uid_read(inode) && !mode)
++ if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode))
+ inode->i_uid = sbi->uid;
+
+ i_gid_write(inode, be32_to_cpu(perms->group));
+- if (!i_gid_read(inode) && !mode)
++ if ((test_bit(HFSPLUS_SB_GID, &sbi->flags)) || (!i_gid_read(inode) && !mode))
+ inode->i_gid = sbi->gid;
+
+ if (dir) {
+diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
+index 047e05c575601..c94a58762ad6d 100644
+--- a/fs/hfsplus/options.c
++++ b/fs/hfsplus/options.c
+@@ -140,6 +140,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
+ if (!uid_valid(sbi->uid)) {
+ pr_err("invalid uid specified\n");
+ return 0;
++ } else {
++ set_bit(HFSPLUS_SB_UID, &sbi->flags);
+ }
+ break;
+ case opt_gid:
+@@ -151,6 +153,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
+ if (!gid_valid(sbi->gid)) {
+ pr_err("invalid gid specified\n");
+ return 0;
++ } else {
++ set_bit(HFSPLUS_SB_GID, &sbi->flags);
+ }
+ break;
+ case opt_part:
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index df7772335dc0e..8eea709e36599 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -1377,7 +1377,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+
+ case Opt_size:
+ /* memparse() will accept a K/M/G without a digit */
+- if (!isdigit(param->string[0]))
++ if (!param->string || !isdigit(param->string[0]))
+ goto bad_val;
+ ctx->max_size_opt = memparse(param->string, &rest);
+ ctx->max_val_type = SIZE_STD;
+@@ -1387,7 +1387,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+
+ case Opt_nr_inodes:
+ /* memparse() will accept a K/M/G without a digit */
+- if (!isdigit(param->string[0]))
++ if (!param->string || !isdigit(param->string[0]))
+ goto bad_val;
+ ctx->nr_inodes = memparse(param->string, &rest);
+ return 0;
+@@ -1403,7 +1403,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+
+ case Opt_min_size:
+ /* memparse() will accept a K/M/G without a digit */
+- if (!isdigit(param->string[0]))
++ if (!param->string || !isdigit(param->string[0]))
+ goto bad_val;
+ ctx->min_size_opt = memparse(param->string, &rest);
+ ctx->min_val_type = SIZE_STD;
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 6b838d3ae7c2e..765838578a722 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -155,7 +155,7 @@ int dbMount(struct inode *ipbmap)
+ struct bmap *bmp;
+ struct dbmap_disk *dbmp_le;
+ struct metapage *mp;
+- int i;
++ int i, err;
+
+ /*
+ * allocate/initialize the in-memory bmap descriptor
+@@ -170,8 +170,8 @@ int dbMount(struct inode *ipbmap)
+ BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
+ PSIZE, 0);
+ if (mp == NULL) {
+- kfree(bmp);
+- return -EIO;
++ err = -EIO;
++ goto err_kfree_bmp;
+ }
+
+ /* copy the on-disk bmap descriptor to its in-memory version. */
+@@ -181,9 +181,8 @@ int dbMount(struct inode *ipbmap)
+ bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+ bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
+ if (!bmp->db_numag) {
+- release_metapage(mp);
+- kfree(bmp);
+- return -EINVAL;
++ err = -EINVAL;
++ goto err_release_metapage;
+ }
+
+ bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+@@ -194,6 +193,16 @@ int dbMount(struct inode *ipbmap)
+ bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+ bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
+ bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
++ if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) {
++ err = -EINVAL;
++ goto err_release_metapage;
++ }
++
++ if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
++ err = -EINVAL;
++ goto err_release_metapage;
++ }
++
+ for (i = 0; i < MAXAG; i++)
+ bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
+ bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
+@@ -214,6 +223,12 @@ int dbMount(struct inode *ipbmap)
+ BMAP_LOCK_INIT(bmp);
+
+ return (0);
++
++err_release_metapage:
++ release_metapage(mp);
++err_kfree_bmp:
++ kfree(bmp);
++ return err;
+ }
+
+
+diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
+index 9db4f5789c0ec..4fbbf88435e69 100644
+--- a/fs/jfs/namei.c
++++ b/fs/jfs/namei.c
+@@ -946,7 +946,7 @@ static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip,
+ if (ssize <= IDATASIZE) {
+ ip->i_op = &jfs_fast_symlink_inode_operations;
+
+- ip->i_link = JFS_IP(ip)->i_inline;
++ ip->i_link = JFS_IP(ip)->i_inline_all;
+ memcpy(ip->i_link, name, ssize);
+ ip->i_size = ssize - 1;
+
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index 3fa2139a0b309..92b1603b5abeb 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -108,15 +108,17 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+ entry->method = method;
+ entry->id = ksmbd_ipc_id_alloc();
+ if (entry->id < 0)
+- goto error;
++ goto free_entry;
+
+ resp = ksmbd_rpc_open(sess, entry->id);
+ if (!resp)
+- goto error;
++ goto free_id;
+
+ kvfree(resp);
+ return entry->id;
+-error:
++free_id:
++ ksmbd_rpc_id_free(entry->id);
++free_entry:
+ list_del(&entry->list);
+ kfree(entry);
+ return -EINVAL;
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 682d56345a1cf..aada4e7c87132 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -995,8 +995,8 @@ out:
+ EXPORT_SYMBOL_GPL(simple_attr_read);
+
+ /* interpret the buffer as a number to call the set function with */
+-ssize_t simple_attr_write(struct file *file, const char __user *buf,
+- size_t len, loff_t *ppos)
++static ssize_t simple_attr_write_xsigned(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos, bool is_signed)
+ {
+ struct simple_attr *attr;
+ unsigned long long val;
+@@ -1017,7 +1017,10 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ goto out;
+
+ attr->set_buf[size] = '\0';
+- ret = kstrtoull(attr->set_buf, 0, &val);
++ if (is_signed)
++ ret = kstrtoll(attr->set_buf, 0, &val);
++ else
++ ret = kstrtoull(attr->set_buf, 0, &val);
+ if (ret)
+ goto out;
+ ret = attr->set(attr->data, val);
+@@ -1027,8 +1030,21 @@ out:
+ mutex_unlock(&attr->mutex);
+ return ret;
+ }
++
++ssize_t simple_attr_write(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ return simple_attr_write_xsigned(file, buf, len, ppos, false);
++}
+ EXPORT_SYMBOL_GPL(simple_attr_write);
+
++ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ return simple_attr_write_xsigned(file, buf, len, ppos, true);
++}
++EXPORT_SYMBOL_GPL(simple_attr_write_signed);
++
+ /**
+ * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
+ * @sb: filesystem to do the file handle conversion on
+diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
+index e1c4617de7714..3515f17eaf3fb 100644
+--- a/fs/lockd/svcsubs.c
++++ b/fs/lockd/svcsubs.c
+@@ -176,7 +176,7 @@ nlm_delete_file(struct nlm_file *file)
+ }
+ }
+
+-static int nlm_unlock_files(struct nlm_file *file, fl_owner_t owner)
++static int nlm_unlock_files(struct nlm_file *file, const struct file_lock *fl)
+ {
+ struct file_lock lock;
+
+@@ -184,12 +184,15 @@ static int nlm_unlock_files(struct nlm_file *file, fl_owner_t owner)
+ lock.fl_type = F_UNLCK;
+ lock.fl_start = 0;
+ lock.fl_end = OFFSET_MAX;
+- lock.fl_owner = owner;
+- if (file->f_file[O_RDONLY] &&
+- vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL))
++ lock.fl_owner = fl->fl_owner;
++ lock.fl_pid = fl->fl_pid;
++ lock.fl_flags = FL_POSIX;
++
++ lock.fl_file = file->f_file[O_RDONLY];
++ if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL))
+ goto out_err;
+- if (file->f_file[O_WRONLY] &&
+- vfs_lock_file(file->f_file[O_WRONLY], F_SETLK, &lock, NULL))
++ lock.fl_file = file->f_file[O_WRONLY];
++ if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL))
+ goto out_err;
+ return 0;
+ out_err:
+@@ -226,7 +229,7 @@ again:
+ if (match(lockhost, host)) {
+
+ spin_unlock(&flctx->flc_lock);
+- if (nlm_unlock_files(file, fl->fl_owner))
++ if (nlm_unlock_files(file, fl))
+ return 1;
+ goto again;
+ }
+diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
+index 09833ec102fca..9bcd53d5c7d46 100644
+--- a/fs/nfs/fs_context.c
++++ b/fs/nfs/fs_context.c
+@@ -684,6 +684,8 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
+ return ret;
+ break;
+ case Opt_vers:
++ if (!param->string)
++ goto out_invalid_value;
+ trace_nfs_mount_assign(param->key, param->string);
+ ret = nfs_parse_version_string(fc, param->string);
+ if (ret < 0)
+@@ -696,6 +698,8 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
+ break;
+
+ case Opt_proto:
++ if (!param->string)
++ goto out_invalid_value;
+ trace_nfs_mount_assign(param->key, param->string);
+ protofamily = AF_INET;
+ switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
+@@ -732,6 +736,8 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
+ break;
+
+ case Opt_mountproto:
++ if (!param->string)
++ goto out_invalid_value;
+ trace_nfs_mount_assign(param->key, param->string);
+ mountfamily = AF_INET;
+ switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 647fc3f547cbe..ae7d4a8c728c2 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -739,12 +739,10 @@ unsigned long nfs_io_size(unsigned long iosize, enum xprt_transports proto)
+ iosize = NFS_DEF_FILE_IO_SIZE;
+ else if (iosize >= NFS_MAX_FILE_IO_SIZE)
+ iosize = NFS_MAX_FILE_IO_SIZE;
+- else
+- iosize = iosize & PAGE_MASK;
+
+- if (proto == XPRT_TRANSPORT_UDP)
++ if (proto == XPRT_TRANSPORT_UDP || iosize < PAGE_SIZE)
+ return nfs_block_bits(iosize, NULL);
+- return iosize;
++ return iosize & PAGE_MASK;
+ }
+
+ /*
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 2f336ace75554..88a23af2bd5c9 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -147,7 +147,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
+ struct nfs_fs_context *ctx;
+ struct fs_context *fc;
+ struct vfsmount *mnt = ERR_PTR(-ENOMEM);
+- struct nfs_server *server = NFS_SERVER(d_inode(path->dentry));
++ struct nfs_server *server = NFS_SB(path->dentry->d_sb);
+ struct nfs_client *client = server->nfs_client;
+ int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
+ int ret;
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index fe1aeb0f048f2..2fd465cab631d 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -1142,7 +1142,7 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
+ if (!segs)
+ return -ENOMEM;
+
+- xdr_set_scratch_buffer(xdr, &scratch_buf, 32);
++ xdr_set_scratch_buffer(xdr, &scratch_buf, sizeof(scratch_buf));
+ status = -EIO;
+ for (i = 0; i < segments; i++) {
+ status = decode_read_plus_segment(xdr, &segs[i]);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 86ed5c0142c3d..e51044a5f550f 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -122,6 +122,11 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
+ if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
+ return NULL;
+
++ label->lfs = 0;
++ label->pi = 0;
++ label->len = 0;
++ label->label = NULL;
++
+ err = security_dentry_init_security(dentry, sattr->ia_mode,
+ &dentry->d_name, NULL,
+ (void **)&label->label, &label->len);
+@@ -2126,18 +2131,18 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
+ }
+
+ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
+- fmode_t fmode)
++ fmode_t fmode)
+ {
+ struct nfs4_state *newstate;
++ struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
++ int openflags = opendata->o_arg.open_flags;
+ int ret;
+
+ if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
+ return 0;
+- opendata->o_arg.open_flags = 0;
+ opendata->o_arg.fmode = fmode;
+- opendata->o_arg.share_access = nfs4_map_atomic_open_share(
+- NFS_SB(opendata->dentry->d_sb),
+- fmode, 0);
++ opendata->o_arg.share_access =
++ nfs4_map_atomic_open_share(server, fmode, openflags);
+ memset(&opendata->o_res, 0, sizeof(opendata->o_res));
+ memset(&opendata->c_res, 0, sizeof(opendata->c_res));
+ nfs4_init_opendata_res(opendata);
+@@ -2719,10 +2724,15 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
+ struct nfs4_opendata *opendata;
+ int ret;
+
+- opendata = nfs4_open_recoverdata_alloc(ctx, state,
+- NFS4_OPEN_CLAIM_FH);
++ opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
+ if (IS_ERR(opendata))
+ return PTR_ERR(opendata);
++ /*
++ * We're not recovering a delegation, so ask for no delegation.
++ * Otherwise the recovery thread could deadlock with an outstanding
++ * delegation return.
++ */
++ opendata->o_arg.open_flags = O_DIRECT;
+ ret = nfs4_open_recover(opendata, state);
+ if (ret == -ESTALE)
+ d_drop(ctx->dentry);
+@@ -3796,7 +3806,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
+ int open_flags, struct iattr *attr, int *opened)
+ {
+ struct nfs4_state *state;
+- struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
++ struct nfs4_label l, *label;
+
+ label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
+
+@@ -4013,7 +4023,7 @@ static int _nfs4_discover_trunking(struct nfs_server *server,
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+- return -ENOMEM;
++ goto out_put_cred;
+ locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+ if (!locations)
+ goto out_free;
+@@ -4035,6 +4045,8 @@ out_free_2:
+ kfree(locations);
+ out_free:
+ __free_page(page);
++out_put_cred:
++ put_cred(cred);
+ return status;
+ }
+
+@@ -4682,7 +4694,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+ int flags)
+ {
+ struct nfs_server *server = NFS_SERVER(dir);
+- struct nfs4_label l, *ilabel = NULL;
++ struct nfs4_label l, *ilabel;
+ struct nfs_open_context *ctx;
+ struct nfs4_state *state;
+ int status = 0;
+@@ -5033,7 +5045,7 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
+ struct nfs4_exception exception = {
+ .interruptible = true,
+ };
+- struct nfs4_label l, *label = NULL;
++ struct nfs4_label l, *label;
+ int err;
+
+ label = nfs4_label_init_security(dir, dentry, sattr, &l);
+@@ -5074,7 +5086,7 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct nfs4_exception exception = {
+ .interruptible = true,
+ };
+- struct nfs4_label l, *label = NULL;
++ struct nfs4_label l, *label;
+ int err;
+
+ label = nfs4_label_init_security(dir, dentry, sattr, &l);
+@@ -5193,7 +5205,7 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
+ struct nfs4_exception exception = {
+ .interruptible = true,
+ };
+- struct nfs4_label l, *label = NULL;
++ struct nfs4_label l, *label;
+ int err;
+
+ label = nfs4_label_init_security(dir, dentry, sattr, &l);
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index a2d2d5d1b0888..03087ef1c7b4a 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1230,6 +1230,8 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+ if (IS_ERR(task)) {
+ printk(KERN_ERR "%s: kthread_run: %ld\n",
+ __func__, PTR_ERR(task));
++ if (!nfs_client_init_is_complete(clp))
++ nfs_mark_client_ready(clp, PTR_ERR(task));
+ nfs4_clear_state_manager_bit(clp);
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ nfs_put_client(clp);
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index acfe5f4bda480..deec76cf5afea 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -4234,19 +4234,17 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return -EIO;
++ bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ if (len < NFS4_MAXLABELLEN) {
+- if (label) {
+- if (label->len) {
+- if (label->len < len)
+- return -ERANGE;
+- memcpy(label->label, p, len);
+- }
++ if (label && label->len) {
++ if (label->len < len)
++ return -ERANGE;
++ memcpy(label->label, p, len);
+ label->len = len;
+ label->pi = pi;
+ label->lfs = lfs;
+ status = NFS_ATTR_FATTR_V4_SECURITY_LABEL;
+ }
+- bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ } else
+ printk(KERN_WARNING "%s: label too long (%u)!\n",
+ __func__, len);
+@@ -4755,12 +4753,10 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
+ if (status < 0)
+ goto xdr_error;
+
+- if (fattr->label) {
+- status = decode_attr_security_label(xdr, bitmap, fattr->label);
+- if (status < 0)
+- goto xdr_error;
+- fattr->valid |= status;
+- }
++ status = decode_attr_security_label(xdr, bitmap, fattr->label);
++ if (status < 0)
++ goto xdr_error;
++ fattr->valid |= status;
+
+ xdr_error:
+ dprintk("%s: xdr returned %d\n", __func__, -status);
+diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
+index 13e6e6897f6cf..65d4511b7af08 100644
+--- a/fs/nfsd/nfs2acl.c
++++ b/fs/nfsd/nfs2acl.c
+@@ -246,7 +246,6 @@ nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
+ struct nfsd3_getaclres *resp = rqstp->rq_resp;
+ struct dentry *dentry = resp->fh.fh_dentry;
+ struct inode *inode;
+- int w;
+
+ if (!svcxdr_encode_stat(xdr, resp->status))
+ return false;
+@@ -260,15 +259,6 @@ nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
+ if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
+ return false;
+
+- rqstp->rq_res.page_len = w = nfsacl_size(
+- (resp->mask & NFS_ACL) ? resp->acl_access : NULL,
+- (resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
+- while (w > 0) {
+- if (!*(rqstp->rq_next_page++))
+- return true;
+- w -= PAGE_SIZE;
+- }
+-
+ if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
+ resp->mask & NFS_ACL, 0))
+ return false;
+diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
+index 2fb9ee3564558..a34a22e272ad5 100644
+--- a/fs/nfsd/nfs3acl.c
++++ b/fs/nfsd/nfs3acl.c
+@@ -171,11 +171,7 @@ nfs3svc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
+ {
+ struct nfsd3_getaclres *resp = rqstp->rq_resp;
+ struct dentry *dentry = resp->fh.fh_dentry;
+- struct kvec *head = rqstp->rq_res.head;
+ struct inode *inode;
+- unsigned int base;
+- int n;
+- int w;
+
+ if (!svcxdr_encode_nfsstat3(xdr, resp->status))
+ return false;
+@@ -187,26 +183,12 @@ nfs3svc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
+ if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
+ return false;
+
+- base = (char *)xdr->p - (char *)head->iov_base;
+-
+- rqstp->rq_res.page_len = w = nfsacl_size(
+- (resp->mask & NFS_ACL) ? resp->acl_access : NULL,
+- (resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
+- while (w > 0) {
+- if (!*(rqstp->rq_next_page++))
+- return false;
+- w -= PAGE_SIZE;
+- }
+-
+- n = nfsacl_encode(&rqstp->rq_res, base, inode,
+- resp->acl_access,
+- resp->mask & NFS_ACL, 0);
+- if (n > 0)
+- n = nfsacl_encode(&rqstp->rq_res, base + n, inode,
+- resp->acl_default,
+- resp->mask & NFS_DFACL,
+- NFS_ACL_DEFAULT);
+- if (n <= 0)
++ if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
++ resp->mask & NFS_ACL, 0))
++ return false;
++ if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default,
++ resp->mask & NFS_DFACL,
++ NFS_ACL_DEFAULT))
+ return false;
+ break;
+ default:
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index f0e69edf5f0f1..6253cbe5f81b4 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -916,7 +916,6 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ } else {
+ if (!conn->cb_xprt)
+ return -EINVAL;
+- clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
+ clp->cl_cb_session = ses;
+ args.bc_xprt = conn->cb_xprt;
+ args.prognumber = clp->cl_cb_session->se_cb_prog;
+@@ -936,6 +935,9 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ rpc_shutdown_client(client);
+ return -ENOMEM;
+ }
++
++ if (clp->cl_minorversion != 0)
++ clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
+ clp->cl_cb_client = client;
+ clp->cl_cb_cred = cred;
+ rcu_read_lock();
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 8beb2bc4c328f..34d1cd5883fbb 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1133,6 +1133,8 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 0, (time64_t)0);
+ if (!status)
+ status = nfserrno(attrs.na_labelerr);
++ if (!status)
++ status = nfserrno(attrs.na_aclerr);
+ out:
+ nfsd_attrs_free(&attrs);
+ fh_drop_write(&cstate->current_fh);
+@@ -1463,13 +1465,6 @@ out_err:
+ return status;
+ }
+
+-static void
+-nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
+-{
+- nfs_do_sb_deactive(ss_mnt->mnt_sb);
+- mntput(ss_mnt);
+-}
+-
+ /*
+ * Verify COPY destination stateid.
+ *
+@@ -1572,11 +1567,6 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
+ {
+ }
+
+-static void
+-nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
+-{
+-}
+-
+ static struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
+ struct nfs_fh *src_fh,
+ nfs4_stateid *stateid)
+@@ -1644,6 +1634,7 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
+ u64 src_pos = copy->cp_src_pos;
+ u64 dst_pos = copy->cp_dst_pos;
+ int status;
++ loff_t end;
+
+ /* See RFC 7862 p.67: */
+ if (bytes_total == 0)
+@@ -1663,8 +1654,8 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
+ /* for a non-zero asynchronous copy do a commit of data */
+ if (nfsd4_copy_is_async(copy) && copy->cp_res.wr_bytes_written > 0) {
+ since = READ_ONCE(dst->f_wb_err);
+- status = vfs_fsync_range(dst, copy->cp_dst_pos,
+- copy->cp_res.wr_bytes_written, 0);
++ end = copy->cp_dst_pos + copy->cp_res.wr_bytes_written - 1;
++ status = vfs_fsync_range(dst, copy->cp_dst_pos, end, 0);
+ if (!status)
+ status = filemap_check_wb_err(dst->f_mapping, since);
+ if (!status)
+@@ -1771,7 +1762,7 @@ static int nfsd4_do_async_copy(void *data)
+ default:
+ nfserr = nfserr_offload_denied;
+ }
+- nfsd4_interssc_disconnect(copy->ss_mnt);
++ /* ss_mnt will be unmounted by the laundromat */
+ goto do_callback;
+ }
+ nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
+@@ -1852,8 +1843,10 @@ out_err:
+ if (async_copy)
+ cleanup_async_copy(async_copy);
+ status = nfserrno(-ENOMEM);
+- if (nfsd4_ssc_is_inter(copy))
+- nfsd4_interssc_disconnect(copy->ss_mnt);
++ /*
++ * source's vfsmount of inter-copy will be unmounted
++ * by the laundromat
++ */
+ goto out;
+ }
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 836bd825ca4ad..52b5552d0d70e 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -675,15 +675,26 @@ find_any_file(struct nfs4_file *f)
+ return ret;
+ }
+
+-static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
++static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
+ {
+- struct nfsd_file *ret = NULL;
++ lockdep_assert_held(&f->fi_lock);
++
++ if (f->fi_fds[O_RDWR])
++ return f->fi_fds[O_RDWR];
++ if (f->fi_fds[O_WRONLY])
++ return f->fi_fds[O_WRONLY];
++ if (f->fi_fds[O_RDONLY])
++ return f->fi_fds[O_RDONLY];
++ return NULL;
++}
++
++static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f)
++{
++ lockdep_assert_held(&f->fi_lock);
+
+- spin_lock(&f->fi_lock);
+ if (f->fi_deleg_file)
+- ret = nfsd_file_get(f->fi_deleg_file);
+- spin_unlock(&f->fi_lock);
+- return ret;
++ return f->fi_deleg_file;
++ return NULL;
+ }
+
+ static atomic_long_t num_delegations;
+@@ -2613,9 +2624,11 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
+ ols = openlockstateid(st);
+ oo = ols->st_stateowner;
+ nf = st->sc_file;
+- file = find_any_file(nf);
++
++ spin_lock(&nf->fi_lock);
++ file = find_any_file_locked(nf);
+ if (!file)
+- return 0;
++ goto out;
+
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+@@ -2637,8 +2650,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
+ seq_printf(s, ", ");
+ nfs4_show_owner(s, oo);
+ seq_printf(s, " }\n");
+- nfsd_file_put(file);
+-
++out:
++ spin_unlock(&nf->fi_lock);
+ return 0;
+ }
+
+@@ -2652,9 +2665,10 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
+ ols = openlockstateid(st);
+ oo = ols->st_stateowner;
+ nf = st->sc_file;
+- file = find_any_file(nf);
++ spin_lock(&nf->fi_lock);
++ file = find_any_file_locked(nf);
+ if (!file)
+- return 0;
++ goto out;
+
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+@@ -2674,8 +2688,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
+ seq_printf(s, ", ");
+ nfs4_show_owner(s, oo);
+ seq_printf(s, " }\n");
+- nfsd_file_put(file);
+-
++out:
++ spin_unlock(&nf->fi_lock);
+ return 0;
+ }
+
+@@ -2687,9 +2701,10 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
+
+ ds = delegstateid(st);
+ nf = st->sc_file;
+- file = find_deleg_file(nf);
++ spin_lock(&nf->fi_lock);
++ file = find_deleg_file_locked(nf);
+ if (!file)
+- return 0;
++ goto out;
+
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+@@ -2705,8 +2720,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
+ seq_printf(s, ", ");
+ nfs4_show_fname(s, file);
+ seq_printf(s, " }\n");
+- nfsd_file_put(file);
+-
++out:
++ spin_unlock(&nf->fi_lock);
+ return 0;
+ }
+
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index c8b89b4f94e0e..2064e6473d304 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -13,6 +13,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/backing-dev.h>
+ #include <linux/random.h>
++#include <linux/log2.h>
+ #include <linux/crc32.h>
+ #include "nilfs.h"
+ #include "segment.h"
+@@ -192,6 +193,34 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
+ return ret;
+ }
+
++/**
++ * nilfs_get_blocksize - get block size from raw superblock data
++ * @sb: super block instance
++ * @sbp: superblock raw data buffer
++ * @blocksize: place to store block size
++ *
++ * nilfs_get_blocksize() calculates the block size from the block size
++ * exponent information written in @sbp and stores it in @blocksize,
++ * or aborts with an error message if it's too large.
++ *
++ * Return Value: On success, 0 is returned. If the block size is too
++ * large, -EINVAL is returned.
++ */
++static int nilfs_get_blocksize(struct super_block *sb,
++ struct nilfs_super_block *sbp, int *blocksize)
++{
++ unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size);
++
++ if (unlikely(shift_bits >
++ ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)) {
++ nilfs_err(sb, "too large filesystem blocksize: 2 ^ %u KiB",
++ shift_bits);
++ return -EINVAL;
++ }
++ *blocksize = BLOCK_SIZE << shift_bits;
++ return 0;
++}
++
+ /**
+ * load_nilfs - load and recover the nilfs
+ * @nilfs: the_nilfs structure to be released
+@@ -245,11 +274,15 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
+ nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
+
+ /* verify consistency between two super blocks */
+- blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size);
++ err = nilfs_get_blocksize(sb, sbp[0], &blocksize);
++ if (err)
++ goto scan_error;
++
+ if (blocksize != nilfs->ns_blocksize) {
+ nilfs_warn(sb,
+ "blocksize differs between two super blocks (%d != %d)",
+ blocksize, nilfs->ns_blocksize);
++ err = -EINVAL;
+ goto scan_error;
+ }
+
+@@ -443,11 +476,33 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
+ return crc == le32_to_cpu(sbp->s_sum);
+ }
+
+-static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
++/**
++ * nilfs_sb2_bad_offset - check the location of the second superblock
++ * @sbp: superblock raw data buffer
++ * @offset: byte offset of second superblock calculated from device size
++ *
++ * nilfs_sb2_bad_offset() checks if the position on the second
++ * superblock is valid or not based on the filesystem parameters
++ * stored in @sbp. If @offset points to a location within the segment
++ * area, or if the parameters themselves are not normal, it is
++ * determined to be invalid.
++ *
++ * Return Value: true if invalid, false if valid.
++ */
++static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
+ {
+- return offset < ((le64_to_cpu(sbp->s_nsegments) *
+- le32_to_cpu(sbp->s_blocks_per_segment)) <<
+- (le32_to_cpu(sbp->s_log_block_size) + 10));
++ unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size);
++ u32 blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
++ u64 nsegments = le64_to_cpu(sbp->s_nsegments);
++ u64 index;
++
++ if (blocks_per_segment < NILFS_SEG_MIN_BLOCKS ||
++ shift_bits > ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)
++ return true;
++
++ index = offset >> (shift_bits + BLOCK_SIZE_BITS);
++ do_div(index, blocks_per_segment);
++ return index < nsegments;
+ }
+
+ static void nilfs_release_super_block(struct the_nilfs *nilfs)
+@@ -586,9 +641,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
+ if (err)
+ goto failed_sbh;
+
+- blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
+- if (blocksize < NILFS_MIN_BLOCK_SIZE ||
+- blocksize > NILFS_MAX_BLOCK_SIZE) {
++ err = nilfs_get_blocksize(sb, sbp, &blocksize);
++ if (err)
++ goto failed_sbh;
++
++ if (blocksize < NILFS_MIN_BLOCK_SIZE) {
+ nilfs_err(sb,
+ "couldn't mount because of unsupported filesystem blocksize %d",
+ blocksize);
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 71f870d497aed..578c2bcfb1d93 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -101,6 +101,10 @@ static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
+
+ asize = le32_to_cpu(attr->size);
+ run_off = le16_to_cpu(attr->nres.run_off);
++
++ if (run_off > asize)
++ return -EINVAL;
++
+ err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
+ vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
+ asize - run_off);
+@@ -1217,6 +1221,11 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ CLST svcn, evcn;
+ u16 ro;
+
++ if (!ni) {
++ /* Is record corrupted? */
++ return -ENOENT;
++ }
++
+ attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
+ if (!attr) {
+ /* Is record corrupted? */
+@@ -1232,6 +1241,10 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ }
+
+ ro = le16_to_cpu(attr->nres.run_off);
++
++ if (ro > le32_to_cpu(attr->size))
++ return -EINVAL;
++
+ err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
+ Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
+ if (err < 0)
+@@ -1901,6 +1914,11 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
+ u16 le_sz;
+ u16 roff = le16_to_cpu(attr->nres.run_off);
+
++ if (roff > le32_to_cpu(attr->size)) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
+ evcn1 - 1, svcn, Add2Ptr(attr, roff),
+ le32_to_cpu(attr->size) - roff);
+diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
+index bad6d8a849a24..c0c6bcbc8c05c 100644
+--- a/fs/ntfs3/attrlist.c
++++ b/fs/ntfs3/attrlist.c
+@@ -68,6 +68,11 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
+
+ run_init(&ni->attr_list.run);
+
++ if (run_off > le32_to_cpu(attr->size)) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno,
+ 0, le64_to_cpu(attr->nres.evcn), 0,
+ Add2Ptr(attr, run_off),
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index e92bbd754365e..45f95c1cb2584 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -661,7 +661,7 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
+ if (!wnd->bits_last)
+ wnd->bits_last = wbits;
+
+- wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS);
++ wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
+ if (!wnd->free_bits)
+ return -ENOMEM;
+
+@@ -1424,7 +1424,7 @@ int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
+
+ down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
+
+- for (; iw < wnd->nbits; iw++, wbit = 0) {
++ for (; iw < wnd->nwnd; iw++, wbit = 0) {
+ CLST lcn_wnd = iw * wbits;
+ struct buffer_head *bh;
+
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 381a38a06ec22..b1b476fb7229b 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -568,6 +568,12 @@ static int ni_repack(struct ntfs_inode *ni)
+ }
+
+ roff = le16_to_cpu(attr->nres.run_off);
++
++ if (roff > le32_to_cpu(attr->size)) {
++ err = -EINVAL;
++ break;
++ }
++
+ err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn,
+ Add2Ptr(attr, roff),
+ le32_to_cpu(attr->size) - roff);
+@@ -1589,6 +1595,9 @@ int ni_delete_all(struct ntfs_inode *ni)
+ asize = le32_to_cpu(attr->size);
+ roff = le16_to_cpu(attr->nres.run_off);
+
++ if (roff > asize)
++ return -EINVAL;
++
+ /* run==1 means unpack and deallocate. */
+ run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
+ Add2Ptr(attr, roff), asize - roff);
+@@ -2291,6 +2300,11 @@ remove_wof:
+ asize = le32_to_cpu(attr->size);
+ roff = le16_to_cpu(attr->nres.run_off);
+
++ if (roff > asize) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ /*run==1 Means unpack and deallocate. */
+ run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
+ Add2Ptr(attr, roff), asize - roff);
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index 0d611a6c5511f..c662d2a519072 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -1132,7 +1132,7 @@ static int read_log_page(struct ntfs_log *log, u32 vbo,
+ return -EINVAL;
+
+ if (!*buffer) {
+- to_free = kmalloc(bytes, GFP_NOFS);
++ to_free = kmalloc(log->page_size, GFP_NOFS);
+ if (!to_free)
+ return -ENOMEM;
+ *buffer = to_free;
+@@ -1180,10 +1180,7 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ struct restart_info *info)
+ {
+ u32 skip, vbo;
+- struct RESTART_HDR *r_page = kmalloc(DefaultLogPageSize, GFP_NOFS);
+-
+- if (!r_page)
+- return -ENOMEM;
++ struct RESTART_HDR *r_page = NULL;
+
+ /* Determine which restart area we are looking for. */
+ if (first) {
+@@ -1197,7 +1194,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ /* Loop continuously until we succeed. */
+ for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
+ bool usa_error;
+- u32 sys_page_size;
+ bool brst, bchk;
+ struct RESTART_AREA *ra;
+
+@@ -1251,24 +1247,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ goto check_result;
+ }
+
+- /* Read the entire restart area. */
+- sys_page_size = le32_to_cpu(r_page->sys_page_size);
+- if (DefaultLogPageSize != sys_page_size) {
+- kfree(r_page);
+- r_page = kzalloc(sys_page_size, GFP_NOFS);
+- if (!r_page)
+- return -ENOMEM;
+-
+- if (read_log_page(log, vbo,
+- (struct RECORD_PAGE_HDR **)&r_page,
+- &usa_error)) {
+- /* Ignore any errors. */
+- kfree(r_page);
+- r_page = NULL;
+- continue;
+- }
+- }
+-
+ if (is_client_area_valid(r_page, usa_error)) {
+ info->valid_page = true;
+ ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
+@@ -2727,6 +2705,9 @@ static inline bool check_attr(const struct MFT_REC *rec,
+ return false;
+ }
+
++ if (run_off > asize)
++ return false;
++
+ if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
+ Add2Ptr(attr, run_off), asize - run_off) < 0) {
+ return false;
+@@ -4771,6 +4752,12 @@ fake_attr:
+ u16 roff = le16_to_cpu(attr->nres.run_off);
+ CLST svcn = le64_to_cpu(attr->nres.svcn);
+
++ if (roff > t32) {
++ kfree(oa->attr);
++ oa->attr = NULL;
++ goto fake_attr;
++ }
++
+ err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
+ le64_to_cpu(attr->nres.evcn), svcn,
+ Add2Ptr(attr, roff), t32 - roff);
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 4ed15f64b17f6..b6e22bcb929ba 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -1849,9 +1849,10 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
+ goto out;
+ }
+
+- root_sdh = resident_data(attr);
++ root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
+ if (root_sdh->type != ATTR_ZERO ||
+- root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
++ root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
++ offsetof(struct INDEX_ROOT, ihdr) + root_sdh->ihdr.used > attr->res.data_size) {
+ err = -EINVAL;
+ goto out;
+ }
+@@ -1867,9 +1868,10 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
+ goto out;
+ }
+
+- root_sii = resident_data(attr);
++ root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
+ if (root_sii->type != ATTR_ZERO ||
+- root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
++ root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
++ offsetof(struct INDEX_ROOT, ihdr) + root_sii->ihdr.used > attr->res.data_size) {
+ err = -EINVAL;
+ goto out;
+ }
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 440328147e7e3..c27b4fe575136 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1017,6 +1017,12 @@ ok:
+ err = 0;
+ }
+
++ /* check for index header length */
++ if (offsetof(struct INDEX_BUFFER, ihdr) + ib->ihdr.used > bytes) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ in->index = ib;
+ *node = in;
+
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index d5a3afbbbfd8c..e352aa37330cd 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -129,6 +129,9 @@ next_attr:
+ rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
+ asize = le32_to_cpu(attr->size);
+
++ if (le16_to_cpu(attr->name_off) + attr->name_len > asize)
++ goto out;
++
+ switch (attr->type) {
+ case ATTR_STD:
+ if (attr->non_res ||
+@@ -364,7 +367,13 @@ next_attr:
+ attr_unpack_run:
+ roff = le16_to_cpu(attr->nres.run_off);
+
++ if (roff > asize) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ t64 = le64_to_cpu(attr->nres.svcn);
++
+ err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
+ t64, Add2Ptr(attr, roff), asize - roff);
+ if (err < 0)
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index 7d2fac5ee2156..af1e4b364ea8e 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -220,6 +220,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ return NULL;
+ }
+
++ if (off + asize < off) {
++ /* overflow check */
++ return NULL;
++ }
++
+ attr = Add2Ptr(attr, asize);
+ off += asize;
+ }
+@@ -260,6 +265,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ if (t16 + t32 > asize)
+ return NULL;
+
++ if (attr->name_len &&
++ le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len > t16) {
++ return NULL;
++ }
++
+ return attr;
+ }
+
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index 47012c9bf505e..8e2fe0f69203b 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -672,7 +672,7 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
+ if (boot->sectors_per_clusters <= 0x80)
+ return boot->sectors_per_clusters;
+ if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */
+- return 1U << (0 - boot->sectors_per_clusters);
++ return 1U << -(s8)boot->sectors_per_clusters;
+ return -EINVAL;
+ }
+
+@@ -789,7 +789,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ : (u32)boot->record_size
+ << sbi->cluster_bits;
+
+- if (record_size > MAXIMUM_BYTES_PER_MFT)
++ if (record_size > MAXIMUM_BYTES_PER_MFT || record_size < SECTOR_SIZE)
+ goto out;
+
+ sbi->record_bits = blksize_bits(record_size);
+@@ -1141,7 +1141,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ goto put_inode_out;
+ }
+ bytes = inode->i_size;
+- sbi->def_table = t = kmalloc(bytes, GFP_NOFS);
++ sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN);
+ if (!t) {
+ err = -ENOMEM;
+ goto put_inode_out;
+@@ -1260,9 +1260,9 @@ load_root:
+ ref.low = cpu_to_le32(MFT_REC_ROOT);
+ ref.seq = cpu_to_le16(MFT_REC_ROOT);
+ inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
+- if (IS_ERR(inode)) {
++ if (IS_ERR(inode) || !inode->i_op) {
+ ntfs_err(sb, "Failed to load root.");
+- err = PTR_ERR(inode);
++ err = IS_ERR(inode) ? PTR_ERR(inode) : -EINVAL;
+ goto out;
+ }
+
+@@ -1281,6 +1281,7 @@ out:
+ * Free resources here.
+ * ntfs_fs_free will be called with fc->s_fs_info = NULL
+ */
++ put_mount_options(sbi->options);
+ put_ntfs(sbi);
+ sb->s_fs_info = NULL;
+
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index 7de8718c68a90..ea582b4fe1d9d 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -107,7 +107,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
+ return -EFBIG;
+
+ /* Allocate memory for packed Ea. */
+- ea_p = kmalloc(size + add_bytes, GFP_NOFS);
++ ea_p = kmalloc(size_add(size, add_bytes), GFP_NOFS);
+ if (!ea_p)
+ return -ENOMEM;
+
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 126671e6caeda..3fb98b4569a28 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -157,7 +157,7 @@ static void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
+ replay_map->rm_state = REPLAY_DONE;
+ }
+
+-static void ocfs2_free_replay_slots(struct ocfs2_super *osb)
++void ocfs2_free_replay_slots(struct ocfs2_super *osb)
+ {
+ struct ocfs2_replay_map *replay_map = osb->replay_map;
+
+diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
+index 969d0aa287187..41c382f68529e 100644
+--- a/fs/ocfs2/journal.h
++++ b/fs/ocfs2/journal.h
+@@ -150,6 +150,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb);
+ void ocfs2_recovery_exit(struct ocfs2_super *osb);
+
+ int ocfs2_compute_replay_slots(struct ocfs2_super *osb);
++void ocfs2_free_replay_slots(struct ocfs2_super *osb);
+ /*
+ * Journal Control:
+ * Initialize, Load, Shutdown, Wipe a journal.
+diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
+index 317126261523b..a8d5ca98fa57c 100644
+--- a/fs/ocfs2/stackglue.c
++++ b/fs/ocfs2/stackglue.c
+@@ -669,6 +669,8 @@ static struct ctl_table_header *ocfs2_table_header;
+
+ static int __init ocfs2_stack_glue_init(void)
+ {
++ int ret;
++
+ strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
+
+ ocfs2_table_header = register_sysctl("fs/ocfs2/nm", ocfs2_nm_table);
+@@ -678,7 +680,11 @@ static int __init ocfs2_stack_glue_init(void)
+ return -ENOMEM; /* or something. */
+ }
+
+- return ocfs2_sysfs_init();
++ ret = ocfs2_sysfs_init();
++ if (ret)
++ unregister_sysctl_table(ocfs2_table_header);
++
++ return ret;
+ }
+
+ static void __exit ocfs2_stack_glue_exit(void)
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 42c993e53924f..0b0e6a1321018 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1159,6 +1159,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+ out_dismount:
+ atomic_set(&osb->vol_state, VOLUME_DISABLED);
+ wake_up(&osb->osb_mount_event);
++ ocfs2_free_replay_slots(osb);
+ ocfs2_dismount_volume(sb, 1);
+ goto out;
+
+@@ -1822,12 +1823,14 @@ static int ocfs2_mount_volume(struct super_block *sb)
+ status = ocfs2_truncate_log_init(osb);
+ if (status < 0) {
+ mlog_errno(status);
+- goto out_system_inodes;
++ goto out_check_volume;
+ }
+
+ ocfs2_super_unlock(osb, 1);
+ return 0;
+
++out_check_volume:
++ ocfs2_free_replay_slots(osb);
+ out_system_inodes:
+ if (osb->local_alloc_state == OCFS2_LA_ENABLED)
+ ocfs2_shutdown_local_alloc(osb);
+diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
+index 29eaa45443727..1b508f5433846 100644
+--- a/fs/orangefs/orangefs-debugfs.c
++++ b/fs/orangefs/orangefs-debugfs.c
+@@ -194,15 +194,10 @@ void orangefs_debugfs_init(int debug_mask)
+ */
+ static void orangefs_kernel_debug_init(void)
+ {
+- int rc = -ENOMEM;
+- char *k_buffer = NULL;
++ static char k_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { };
+
+ gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+
+- k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+- if (!k_buffer)
+- goto out;
+-
+ if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ strcpy(k_buffer, kernel_debug_string);
+ strcat(k_buffer, "\n");
+@@ -213,15 +208,14 @@ static void orangefs_kernel_debug_init(void)
+
+ debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer,
+ &kernel_debug_fops);
+-
+-out:
+- gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+ }
+
+
+ void orangefs_debugfs_cleanup(void)
+ {
+ debugfs_remove_recursive(debug_dir);
++ kfree(debug_help_string);
++ debug_help_string = NULL;
+ }
+
+ /* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
+@@ -297,18 +291,13 @@ static int help_show(struct seq_file *m, void *v)
+ /*
+ * initialize the client-debug file.
+ */
+-static int orangefs_client_debug_init(void)
++static void orangefs_client_debug_init(void)
+ {
+
+- int rc = -ENOMEM;
+- char *c_buffer = NULL;
++ static char c_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { };
+
+ gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+
+- c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+- if (!c_buffer)
+- goto out;
+-
+ if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ strcpy(c_buffer, client_debug_string);
+ strcat(c_buffer, "\n");
+@@ -322,13 +311,6 @@ static int orangefs_client_debug_init(void)
+ debug_dir,
+ c_buffer,
+ &kernel_debug_fops);
+-
+- rc = 0;
+-
+-out:
+-
+- gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+- return rc;
+ }
+
+ /* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/
+@@ -671,6 +653,7 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
+ memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE);
+ strlcat(debug_help_string, new, string_size);
+ mutex_unlock(&orangefs_help_file_lock);
++ kfree(new);
+ }
+
+ rc = 0;
+diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
+index cd7297815f91e..5ab741c60b7e2 100644
+--- a/fs/orangefs/orangefs-mod.c
++++ b/fs/orangefs/orangefs-mod.c
+@@ -141,7 +141,7 @@ static int __init orangefs_init(void)
+ gossip_err("%s: could not initialize device subsystem %d!\n",
+ __func__,
+ ret);
+- goto cleanup_device;
++ goto cleanup_sysfs;
+ }
+
+ ret = register_filesystem(&orangefs_fs_type);
+@@ -152,11 +152,11 @@ static int __init orangefs_init(void)
+ goto out;
+ }
+
+- orangefs_sysfs_exit();
+-
+-cleanup_device:
+ orangefs_dev_cleanup();
+
++cleanup_sysfs:
++ orangefs_sysfs_exit();
++
+ sysfs_init_failed:
+ orangefs_debugfs_cleanup();
+
+diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
+index de80b62553bb1..be4ba03a01a0f 100644
+--- a/fs/orangefs/orangefs-sysfs.c
++++ b/fs/orangefs/orangefs-sysfs.c
+@@ -896,9 +896,18 @@ static struct attribute *orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(orangefs_default);
+
++static struct kobject *orangefs_obj;
++
++static void orangefs_obj_release(struct kobject *kobj)
++{
++ kfree(orangefs_obj);
++ orangefs_obj = NULL;
++}
++
+ static struct kobj_type orangefs_ktype = {
+ .sysfs_ops = &orangefs_sysfs_ops,
+ .default_groups = orangefs_default_groups,
++ .release = orangefs_obj_release,
+ };
+
+ static struct orangefs_attribute acache_hard_limit_attribute =
+@@ -934,9 +943,18 @@ static struct attribute *acache_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(acache_orangefs_default);
+
++static struct kobject *acache_orangefs_obj;
++
++static void acache_orangefs_obj_release(struct kobject *kobj)
++{
++ kfree(acache_orangefs_obj);
++ acache_orangefs_obj = NULL;
++}
++
+ static struct kobj_type acache_orangefs_ktype = {
+ .sysfs_ops = &orangefs_sysfs_ops,
+ .default_groups = acache_orangefs_default_groups,
++ .release = acache_orangefs_obj_release,
+ };
+
+ static struct orangefs_attribute capcache_hard_limit_attribute =
+@@ -972,9 +990,18 @@ static struct attribute *capcache_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(capcache_orangefs_default);
+
++static struct kobject *capcache_orangefs_obj;
++
++static void capcache_orangefs_obj_release(struct kobject *kobj)
++{
++ kfree(capcache_orangefs_obj);
++ capcache_orangefs_obj = NULL;
++}
++
+ static struct kobj_type capcache_orangefs_ktype = {
+ .sysfs_ops = &orangefs_sysfs_ops,
+ .default_groups = capcache_orangefs_default_groups,
++ .release = capcache_orangefs_obj_release,
+ };
+
+ static struct orangefs_attribute ccache_hard_limit_attribute =
+@@ -1010,9 +1037,18 @@ static struct attribute *ccache_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(ccache_orangefs_default);
+
++static struct kobject *ccache_orangefs_obj;
++
++static void ccache_orangefs_obj_release(struct kobject *kobj)
++{
++ kfree(ccache_orangefs_obj);
++ ccache_orangefs_obj = NULL;
++}
++
+ static struct kobj_type ccache_orangefs_ktype = {
+ .sysfs_ops = &orangefs_sysfs_ops,
+ .default_groups = ccache_orangefs_default_groups,
++ .release = ccache_orangefs_obj_release,
+ };
+
+ static struct orangefs_attribute ncache_hard_limit_attribute =
+@@ -1048,9 +1084,18 @@ static struct attribute *ncache_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(ncache_orangefs_default);
+
++static struct kobject *ncache_orangefs_obj;
++
++static void ncache_orangefs_obj_release(struct kobject *kobj)
++{
++ kfree(ncache_orangefs_obj);
++ ncache_orangefs_obj = NULL;
++}
++
+ static struct kobj_type ncache_orangefs_ktype = {
+ .sysfs_ops = &orangefs_sysfs_ops,
+ .default_groups = ncache_orangefs_default_groups,
++ .release = ncache_orangefs_obj_release,
+ };
+
+ static struct orangefs_attribute pc_acache_attribute =
+@@ -1079,9 +1124,18 @@ static struct attribute *pc_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(pc_orangefs_default);
+
++static struct kobject *pc_orangefs_obj;
++
++static void pc_orangefs_obj_release(struct kobject *kobj)
++{
++ kfree(pc_orangefs_obj);
++ pc_orangefs_obj = NULL;
++}
++
+ static struct kobj_type pc_orangefs_ktype = {
+ .sysfs_ops = &orangefs_sysfs_ops,
+ .default_groups = pc_orangefs_default_groups,
++ .release = pc_orangefs_obj_release,
+ };
+
+ static struct orangefs_attribute stats_reads_attribute =
+@@ -1103,19 +1157,20 @@ static struct attribute *stats_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(stats_orangefs_default);
+
++static struct kobject *stats_orangefs_obj;
++
++static void stats_orangefs_obj_release(struct kobject *kobj)
++{
++ kfree(stats_orangefs_obj);
++ stats_orangefs_obj = NULL;
++}
++
+ static struct kobj_type stats_orangefs_ktype = {
+ .sysfs_ops = &orangefs_sysfs_ops,
+ .default_groups = stats_orangefs_default_groups,
++ .release = stats_orangefs_obj_release,
+ };
+
+-static struct kobject *orangefs_obj;
+-static struct kobject *acache_orangefs_obj;
+-static struct kobject *capcache_orangefs_obj;
+-static struct kobject *ccache_orangefs_obj;
+-static struct kobject *ncache_orangefs_obj;
+-static struct kobject *pc_orangefs_obj;
+-static struct kobject *stats_orangefs_obj;
+-
+ int orangefs_sysfs_init(void)
+ {
+ int rc = -EINVAL;
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 6b03457f72bb1..c3032cef391ef 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -592,28 +592,42 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
+ goto out_revert_creds;
+ }
+
+- err = -ENOMEM;
+- override_cred = prepare_creds();
+- if (override_cred) {
++ if (!attr->hardlink) {
++ err = -ENOMEM;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ goto out_revert_creds;
++ /*
++ * In the creation cases(create, mkdir, mknod, symlink),
++ * ovl should transfer current's fs{u,g}id to underlying
++ * fs. Because underlying fs want to initialize its new
++ * inode owner using current's fs{u,g}id. And in this
++ * case, the @inode is a new inode that is initialized
++ * in inode_init_owner() to current's fs{u,g}id. So use
++ * the inode's i_{u,g}id to override the cred's fs{u,g}id.
++ *
++ * But in the other hardlink case, ovl_link() does not
++ * create a new inode, so just use the ovl mounter's
++ * fs{u,g}id.
++ */
+ override_cred->fsuid = inode->i_uid;
+ override_cred->fsgid = inode->i_gid;
+- if (!attr->hardlink) {
+- err = security_dentry_create_files_as(dentry,
+- attr->mode, &dentry->d_name, old_cred,
+- override_cred);
+- if (err) {
+- put_cred(override_cred);
+- goto out_revert_creds;
+- }
++ err = security_dentry_create_files_as(dentry,
++ attr->mode, &dentry->d_name, old_cred,
++ override_cred);
++ if (err) {
++ put_cred(override_cred);
++ goto out_revert_creds;
+ }
+ put_cred(override_creds(override_cred));
+ put_cred(override_cred);
+-
+- if (!ovl_dentry_is_whiteout(dentry))
+- err = ovl_create_upper(dentry, inode, attr);
+- else
+- err = ovl_create_over_whiteout(dentry, inode, attr);
+ }
++
++ if (!ovl_dentry_is_whiteout(dentry))
++ err = ovl_create_upper(dentry, inode, attr);
++ else
++ err = ovl_create_over_whiteout(dentry, inode, attr);
++
+ out_revert_creds:
+ revert_creds(old_cred);
+ return err;
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index a1a22f58ba183..6011f955436ba 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -96,6 +96,7 @@ static int ovl_change_flags(struct file *file, unsigned int flags)
+
+ spin_lock(&file->f_lock);
+ file->f_flags = (file->f_flags & ~OVL_SETFL_MASK) | flags;
++ file->f_iocb_flags = iocb_flags(file);
+ spin_unlock(&file->f_lock);
+
+ return 0;
+@@ -517,9 +518,16 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
+ const struct cred *old_cred;
+ int ret;
+
++ inode_lock(inode);
++ /* Update mode */
++ ovl_copyattr(inode);
++ ret = file_remove_privs(file);
++ if (ret)
++ goto out_unlock;
++
+ ret = ovl_real_fdget(file, &real);
+ if (ret)
+- return ret;
++ goto out_unlock;
+
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ ret = vfs_fallocate(real.file, mode, offset, len);
+@@ -530,6 +538,9 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
+
+ fdput(real);
+
++out_unlock:
++ inode_unlock(inode);
++
+ return ret;
+ }
+
+@@ -567,14 +578,23 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
+ const struct cred *old_cred;
+ loff_t ret;
+
++ inode_lock(inode_out);
++ if (op != OVL_DEDUPE) {
++ /* Update mode */
++ ovl_copyattr(inode_out);
++ ret = file_remove_privs(file_out);
++ if (ret)
++ goto out_unlock;
++ }
++
+ ret = ovl_real_fdget(file_out, &real_out);
+ if (ret)
+- return ret;
++ goto out_unlock;
+
+ ret = ovl_real_fdget(file_in, &real_in);
+ if (ret) {
+ fdput(real_out);
+- return ret;
++ goto out_unlock;
+ }
+
+ old_cred = ovl_override_creds(file_inode(file_out)->i_sb);
+@@ -603,6 +623,9 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
+ fdput(real_in);
+ fdput(real_out);
+
++out_unlock:
++ inode_unlock(inode_out);
++
+ return ret;
+ }
+
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index a29a8afe9b262..3d14a3f1465d1 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -139,11 +139,16 @@ static int ovl_dentry_revalidate_common(struct dentry *dentry,
+ unsigned int flags, bool weak)
+ {
+ struct ovl_entry *oe = dentry->d_fsdata;
++ struct inode *inode = d_inode_rcu(dentry);
+ struct dentry *upper;
+ unsigned int i;
+ int ret = 1;
+
+- upper = ovl_dentry_upper(dentry);
++ /* Careful in RCU mode */
++ if (!inode)
++ return -ECHILD;
++
++ upper = ovl_i_dentry_upper(inode);
+ if (upper)
+ ret = ovl_revalidate_real(upper, flags, weak);
+
+diff --git a/fs/pnode.c b/fs/pnode.c
+index 1106137c747a3..468e4e65a615d 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -244,7 +244,7 @@ static int propagate_one(struct mount *m)
+ }
+ do {
+ struct mount *parent = last_source->mnt_parent;
+- if (last_source == first_source)
++ if (peers(last_source, first_source))
+ break;
+ done = parent->mnt_master == p;
+ if (done && peers(n, parent))
+diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
+index 8adabde685f13..c49d554cc9ae9 100644
+--- a/fs/pstore/Kconfig
++++ b/fs/pstore/Kconfig
+@@ -126,6 +126,7 @@ config PSTORE_CONSOLE
+ config PSTORE_PMSG
+ bool "Log user space messages"
+ depends on PSTORE
++ select RT_MUTEXES
+ help
+ When the option is enabled, pstore will export a character
+ interface /dev/pmsg0 to log user space messages. On reboot
+diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
+index d8542ec2f38c6..18cf94b597e05 100644
+--- a/fs/pstore/pmsg.c
++++ b/fs/pstore/pmsg.c
+@@ -7,9 +7,10 @@
+ #include <linux/device.h>
+ #include <linux/fs.h>
+ #include <linux/uaccess.h>
++#include <linux/rtmutex.h>
+ #include "internal.h"
+
+-static DEFINE_MUTEX(pmsg_lock);
++static DEFINE_RT_MUTEX(pmsg_lock);
+
+ static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+@@ -28,9 +29,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ if (!access_ok(buf, count))
+ return -EFAULT;
+
+- mutex_lock(&pmsg_lock);
++ rt_mutex_lock(&pmsg_lock);
+ ret = psinfo->write_user(&record, buf);
+- mutex_unlock(&pmsg_lock);
++ rt_mutex_unlock(&pmsg_lock);
+ return ret ? ret : count;
+ }
+
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index fefe3d391d3af..f3fa3625d772c 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -670,7 +670,7 @@ static int ramoops_parse_dt(struct platform_device *pdev,
+ field = value; \
+ }
+
+- parse_u32("mem-type", pdata->record_size, pdata->mem_type);
++ parse_u32("mem-type", pdata->mem_type, pdata->mem_type);
+ parse_u32("record-size", pdata->record_size, 0);
+ parse_u32("console-size", pdata->console_size, 0);
+ parse_u32("ftrace-size", pdata->ftrace_size, 0);
+@@ -735,6 +735,7 @@ static int ramoops_probe(struct platform_device *pdev)
+ /* Make sure we didn't get bogus platform data pointer. */
+ if (!pdata) {
+ pr_err("NULL platform data\n");
++ err = -EINVAL;
+ goto fail_out;
+ }
+
+@@ -742,6 +743,7 @@ static int ramoops_probe(struct platform_device *pdev)
+ !pdata->ftrace_size && !pdata->pmsg_size)) {
+ pr_err("The memory size and the record/console size must be "
+ "non-zero\n");
++ err = -EINVAL;
+ goto fail_out;
+ }
+
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index a89e33719fcf2..8bf09886e7e66 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -439,7 +439,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
+ phys_addr_t addr = page_start + i * PAGE_SIZE;
+ pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ }
+- vaddr = vmap(pages, page_count, VM_MAP, prot);
++ /*
++ * VM_IOREMAP used here to bypass this region during vread()
++ * and kmap_atomic() (i.e. kcore) to avoid __va() failures.
++ */
++ vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot);
+ kfree(pages);
+
+ /*
+diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c
+index 017d0d4ad3295..2770746bb7aa1 100644
+--- a/fs/pstore/zone.c
++++ b/fs/pstore/zone.c
+@@ -761,7 +761,7 @@ static inline int notrace psz_kmsg_write_record(struct psz_context *cxt,
+ /* avoid destroying old data, allocate a new one */
+ len = zone->buffer_size + sizeof(*zone->buffer);
+ zone->oldbuf = zone->buffer;
+- zone->buffer = kzalloc(len, GFP_KERNEL);
++ zone->buffer = kzalloc(len, GFP_ATOMIC);
+ if (!zone->buffer) {
+ zone->buffer = zone->oldbuf;
+ return -ENOMEM;
+diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
+index 3d7a35d6a18bc..b916859992ec8 100644
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -696,6 +696,7 @@ static int reiserfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+
+ out_failed:
+ reiserfs_write_unlock(dir->i_sb);
++ reiserfs_security_free(&security);
+ return retval;
+ }
+
+@@ -779,6 +780,7 @@ static int reiserfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+
+ out_failed:
+ reiserfs_write_unlock(dir->i_sb);
++ reiserfs_security_free(&security);
+ return retval;
+ }
+
+@@ -878,6 +880,7 @@ static int reiserfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ retval = journal_end(&th);
+ out_failed:
+ reiserfs_write_unlock(dir->i_sb);
++ reiserfs_security_free(&security);
+ return retval;
+ }
+
+@@ -1194,6 +1197,7 @@ static int reiserfs_symlink(struct user_namespace *mnt_userns,
+ retval = journal_end(&th);
+ out_failed:
+ reiserfs_write_unlock(parent_dir->i_sb);
++ reiserfs_security_free(&security);
+ return retval;
+ }
+
+diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
+index 8965c8e5e172b..857a65b057264 100644
+--- a/fs/reiserfs/xattr_security.c
++++ b/fs/reiserfs/xattr_security.c
+@@ -50,6 +50,7 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode,
+ int error;
+
+ sec->name = NULL;
++ sec->value = NULL;
+
+ /* Don't add selinux attributes on xattrs - they'll never get used */
+ if (IS_PRIVATE(dir))
+@@ -95,7 +96,6 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
+
+ void reiserfs_security_free(struct reiserfs_security_handle *sec)
+ {
+- kfree(sec->name);
+ kfree(sec->value);
+ sec->name = NULL;
+ sec->value = NULL;
+diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
+index d4ec9bb97de95..3b8567564e7e4 100644
+--- a/fs/sysv/itree.c
++++ b/fs/sysv/itree.c
+@@ -438,7 +438,7 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size)
+ res += blocks;
+ direct = 1;
+ }
+- return blocks;
++ return res;
+ }
+
+ int sysv_getattr(struct user_namespace *mnt_userns, const struct path *path,
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index dce6ae9ae306c..f713d108f21d3 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -439,6 +439,12 @@ static int udf_get_block(struct inode *inode, sector_t block,
+ iinfo->i_next_alloc_goal++;
+ }
+
++ /*
++ * Block beyond EOF and prealloc extents? Just discard preallocation
++ * as it is not useful and complicates things.
++ */
++ if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents)
++ udf_discard_prealloc(inode);
+ udf_clear_extent_cache(inode);
+ phys = inode_getblk(inode, block, &err, &new);
+ if (!phys)
+@@ -488,8 +494,6 @@ static int udf_do_extend_file(struct inode *inode,
+ uint32_t add;
+ int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+ struct super_block *sb = inode->i_sb;
+- struct kernel_lb_addr prealloc_loc = {};
+- uint32_t prealloc_len = 0;
+ struct udf_inode_info *iinfo;
+ int err;
+
+@@ -510,19 +514,6 @@ static int udf_do_extend_file(struct inode *inode,
+ ~(sb->s_blocksize - 1);
+ }
+
+- /* Last extent are just preallocated blocks? */
+- if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+- EXT_NOT_RECORDED_ALLOCATED) {
+- /* Save the extent so that we can reattach it to the end */
+- prealloc_loc = last_ext->extLocation;
+- prealloc_len = last_ext->extLength;
+- /* Mark the extent as a hole */
+- last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+- (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+- last_ext->extLocation.logicalBlockNum = 0;
+- last_ext->extLocation.partitionReferenceNum = 0;
+- }
+-
+ /* Can we merge with the previous extent? */
+ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+ EXT_NOT_RECORDED_NOT_ALLOCATED) {
+@@ -550,7 +541,7 @@ static int udf_do_extend_file(struct inode *inode,
+ * more extents, we may need to enter possible following
+ * empty indirect extent.
+ */
+- if (new_block_bytes || prealloc_len)
++ if (new_block_bytes)
+ udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
+ }
+
+@@ -584,17 +575,6 @@ static int udf_do_extend_file(struct inode *inode,
+ }
+
+ out:
+- /* Do we have some preallocated blocks saved? */
+- if (prealloc_len) {
+- err = udf_add_aext(inode, last_pos, &prealloc_loc,
+- prealloc_len, 1);
+- if (err)
+- return err;
+- last_ext->extLocation = prealloc_loc;
+- last_ext->extLength = prealloc_len;
+- count++;
+- }
+-
+ /* last_pos should point to the last written extent... */
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ last_pos->offset -= sizeof(struct short_ad);
+@@ -610,13 +590,17 @@ out:
+ static void udf_do_extend_final_block(struct inode *inode,
+ struct extent_position *last_pos,
+ struct kernel_long_ad *last_ext,
+- uint32_t final_block_len)
++ uint32_t new_elen)
+ {
+- struct super_block *sb = inode->i_sb;
+ uint32_t added_bytes;
+
+- added_bytes = final_block_len -
+- (last_ext->extLength & (sb->s_blocksize - 1));
++ /*
++ * Extent already large enough? It may be already rounded up to block
++ * size...
++ */
++ if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
++ return;
++ added_bytes = (last_ext->extLength & UDF_EXTENT_LENGTH_MASK) - new_elen;
+ last_ext->extLength += added_bytes;
+ UDF_I(inode)->i_lenExtents += added_bytes;
+
+@@ -633,12 +617,12 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ int8_t etype;
+ struct super_block *sb = inode->i_sb;
+ sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
+- unsigned long partial_final_block;
++ loff_t new_elen;
+ int adsize;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ struct kernel_long_ad extent;
+ int err = 0;
+- int within_final_block;
++ bool within_last_ext;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ adsize = sizeof(struct short_ad);
+@@ -647,8 +631,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ else
+ BUG();
+
++ /*
++ * When creating hole in file, just don't bother with preserving
++ * preallocation. It likely won't be very useful anyway.
++ */
++ udf_discard_prealloc(inode);
++
+ etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
+- within_final_block = (etype != -1);
++ within_last_ext = (etype != -1);
++ /* We don't expect extents past EOF... */
++ WARN_ON_ONCE(within_last_ext &&
++ elen > ((loff_t)offset + 1) << inode->i_blkbits);
+
+ if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
+ (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
+@@ -664,19 +657,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ extent.extLength |= etype << 30;
+ }
+
+- partial_final_block = newsize & (sb->s_blocksize - 1);
++ new_elen = ((loff_t)offset << inode->i_blkbits) |
++ (newsize & (sb->s_blocksize - 1));
+
+ /* File has extent covering the new size (could happen when extending
+ * inside a block)?
+ */
+- if (within_final_block) {
++ if (within_last_ext) {
+ /* Extending file within the last file block */
+- udf_do_extend_final_block(inode, &epos, &extent,
+- partial_final_block);
++ udf_do_extend_final_block(inode, &epos, &extent, new_elen);
+ } else {
+- loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
+- partial_final_block;
+- err = udf_do_extend_file(inode, &epos, &extent, add);
++ err = udf_do_extend_file(inode, &epos, &extent, new_elen);
+ }
+
+ if (err < 0)
+@@ -777,10 +768,11 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ goto out_free;
+ }
+
+- /* Are we beyond EOF? */
++ /* Are we beyond EOF and preallocated extent? */
+ if (etype == -1) {
+ int ret;
+ loff_t hole_len;
++
+ isBeyondEOF = true;
+ if (count) {
+ if (c)
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index ae7bc13a5298a..7c95c549dd64e 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -1091,8 +1091,9 @@ static int udf_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ return -EINVAL;
+
+ ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
+- if (IS_ERR(ofi)) {
+- retval = PTR_ERR(ofi);
++ if (!ofi || IS_ERR(ofi)) {
++ if (IS_ERR(ofi))
++ retval = PTR_ERR(ofi);
+ goto end_rename;
+ }
+
+@@ -1101,8 +1102,7 @@ static int udf_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+
+ brelse(ofibh.sbh);
+ tloc = lelb_to_cpu(ocfi.icb.extLocation);
+- if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0)
+- != old_inode->i_ino)
++ if (udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) != old_inode->i_ino)
+ goto end_rename;
+
+ nfi = udf_find_entry(new_dir, &new_dentry->d_name, &nfibh, &ncfi);
+diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
+index 532cda99644ee..036ebd892b852 100644
+--- a/fs/udf/truncate.c
++++ b/fs/udf/truncate.c
+@@ -120,60 +120,42 @@ void udf_truncate_tail_extent(struct inode *inode)
+
+ void udf_discard_prealloc(struct inode *inode)
+ {
+- struct extent_position epos = { NULL, 0, {0, 0} };
++ struct extent_position epos = {};
++ struct extent_position prev_epos = {};
+ struct kernel_lb_addr eloc;
+ uint32_t elen;
+ uint64_t lbcount = 0;
+ int8_t etype = -1, netype;
+- int adsize;
+ struct udf_inode_info *iinfo = UDF_I(inode);
++ int bsize = 1 << inode->i_blkbits;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
+- inode->i_size == iinfo->i_lenExtents)
++ ALIGN(inode->i_size, bsize) == ALIGN(iinfo->i_lenExtents, bsize))
+ return;
+
+- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+- adsize = sizeof(struct short_ad);
+- else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
+- adsize = sizeof(struct long_ad);
+- else
+- adsize = 0;
+-
+ epos.block = iinfo->i_location;
+
+ /* Find the last extent in the file */
+- while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
+- etype = netype;
++ while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 0)) != -1) {
++ brelse(prev_epos.bh);
++ prev_epos = epos;
++ if (prev_epos.bh)
++ get_bh(prev_epos.bh);
++
++ etype = udf_next_aext(inode, &epos, &eloc, &elen, 1);
+ lbcount += elen;
+ }
+ if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
+- epos.offset -= adsize;
+ lbcount -= elen;
+- extent_trunc(inode, &epos, &eloc, etype, elen, 0);
+- if (!epos.bh) {
+- iinfo->i_lenAlloc =
+- epos.offset -
+- udf_file_entry_alloc_offset(inode);
+- mark_inode_dirty(inode);
+- } else {
+- struct allocExtDesc *aed =
+- (struct allocExtDesc *)(epos.bh->b_data);
+- aed->lengthAllocDescs =
+- cpu_to_le32(epos.offset -
+- sizeof(struct allocExtDesc));
+- if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
+- UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
+- udf_update_tag(epos.bh->b_data, epos.offset);
+- else
+- udf_update_tag(epos.bh->b_data,
+- sizeof(struct allocExtDesc));
+- mark_buffer_dirty_inode(epos.bh, inode);
+- }
++ udf_delete_aext(inode, prev_epos);
++ udf_free_blocks(inode->i_sb, inode, &eloc, 0,
++ DIV_ROUND_UP(elen, 1 << inode->i_blkbits));
+ }
+ /* This inode entry is in-memory only and thus we don't have to mark
+ * the inode dirty */
+ iinfo->i_lenExtents = lbcount;
+ brelse(epos.bh);
++ brelse(prev_epos.bh);
+ }
+
+ static void udf_update_alloc_ext_desc(struct inode *inode,
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 61107b6bbed29..427b8cea1f968 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -1140,7 +1140,7 @@ static int xattr_list_one(char **buffer, ssize_t *remaining_size,
+ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
+ char *buffer, size_t size)
+ {
+- bool trusted = capable(CAP_SYS_ADMIN);
++ bool trusted = ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
+ struct simple_xattr *xattr;
+ ssize_t remaining_size = size;
+ int err = 0;
+diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
+index 56aee949c6fa2..4d830fc55a3df 100644
+--- a/include/drm/drm_connector.h
++++ b/include/drm/drm_connector.h
+@@ -656,6 +656,12 @@ struct drm_display_info {
+ * @mso_pixel_overlap: eDP MSO segment pixel overlap, 0-8 pixels.
+ */
+ u8 mso_pixel_overlap;
++
++ /**
++ * @max_dsc_bpp: Maximum DSC target bitrate, if it is set to 0 the
++ * monitor's default value is used instead.
++ */
++ u32 max_dsc_bpp;
+ };
+
+ int drm_display_info_set_bus_formats(struct drm_display_info *info,
+diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
+index 17a0310e8aaaf..b7d3f3843f1e6 100644
+--- a/include/drm/ttm/ttm_tt.h
++++ b/include/drm/ttm/ttm_tt.h
+@@ -88,7 +88,7 @@ struct ttm_tt {
+ #define TTM_TT_FLAG_EXTERNAL (1 << 2)
+ #define TTM_TT_FLAG_EXTERNAL_MAPPABLE (1 << 3)
+
+-#define TTM_TT_FLAG_PRIV_POPULATED (1 << 31)
++#define TTM_TT_FLAG_PRIV_POPULATED (1U << 31)
+ uint32_t page_flags;
+ /** @num_pages: Number of pages in the page array. */
+ uint32_t num_pages;
+diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
+index 07b8a282c2682..04809edab33cf 100644
+--- a/include/dt-bindings/clock/imx8mn-clock.h
++++ b/include/dt-bindings/clock/imx8mn-clock.h
+@@ -16,40 +16,48 @@
+ #define IMX8MN_CLK_EXT4 7
+ #define IMX8MN_AUDIO_PLL1_REF_SEL 8
+ #define IMX8MN_AUDIO_PLL2_REF_SEL 9
+-#define IMX8MN_VIDEO_PLL1_REF_SEL 10
++#define IMX8MN_VIDEO_PLL_REF_SEL 10
++#define IMX8MN_VIDEO_PLL1_REF_SEL IMX8MN_VIDEO_PLL_REF_SEL
+ #define IMX8MN_DRAM_PLL_REF_SEL 11
+ #define IMX8MN_GPU_PLL_REF_SEL 12
+-#define IMX8MN_VPU_PLL_REF_SEL 13
++#define IMX8MN_M7_ALT_PLL_REF_SEL 13
++#define IMX8MN_VPU_PLL_REF_SEL IMX8MN_M7_ALT_PLL_REF_SEL
+ #define IMX8MN_ARM_PLL_REF_SEL 14
+ #define IMX8MN_SYS_PLL1_REF_SEL 15
+ #define IMX8MN_SYS_PLL2_REF_SEL 16
+ #define IMX8MN_SYS_PLL3_REF_SEL 17
+ #define IMX8MN_AUDIO_PLL1 18
+ #define IMX8MN_AUDIO_PLL2 19
+-#define IMX8MN_VIDEO_PLL1 20
++#define IMX8MN_VIDEO_PLL 20
++#define IMX8MN_VIDEO_PLL1 IMX8MN_VIDEO_PLL
+ #define IMX8MN_DRAM_PLL 21
+ #define IMX8MN_GPU_PLL 22
+-#define IMX8MN_VPU_PLL 23
++#define IMX8MN_M7_ALT_PLL 23
++#define IMX8MN_VPU_PLL IMX8MN_M7_ALT_PLL
+ #define IMX8MN_ARM_PLL 24
+ #define IMX8MN_SYS_PLL1 25
+ #define IMX8MN_SYS_PLL2 26
+ #define IMX8MN_SYS_PLL3 27
+ #define IMX8MN_AUDIO_PLL1_BYPASS 28
+ #define IMX8MN_AUDIO_PLL2_BYPASS 29
+-#define IMX8MN_VIDEO_PLL1_BYPASS 30
++#define IMX8MN_VIDEO_PLL_BYPASS 30
++#define IMX8MN_VIDEO_PLL1_BYPASS IMX8MN_VIDEO_PLL_BYPASS
+ #define IMX8MN_DRAM_PLL_BYPASS 31
+ #define IMX8MN_GPU_PLL_BYPASS 32
+-#define IMX8MN_VPU_PLL_BYPASS 33
++#define IMX8MN_M7_ALT_PLL_BYPASS 33
++#define IMX8MN_VPU_PLL_BYPASS IMX8MN_M7_ALT_PLL_BYPASS
+ #define IMX8MN_ARM_PLL_BYPASS 34
+ #define IMX8MN_SYS_PLL1_BYPASS 35
+ #define IMX8MN_SYS_PLL2_BYPASS 36
+ #define IMX8MN_SYS_PLL3_BYPASS 37
+ #define IMX8MN_AUDIO_PLL1_OUT 38
+ #define IMX8MN_AUDIO_PLL2_OUT 39
+-#define IMX8MN_VIDEO_PLL1_OUT 40
++#define IMX8MN_VIDEO_PLL_OUT 40
++#define IMX8MN_VIDEO_PLL1_OUT IMX8MN_VIDEO_PLL_OUT
+ #define IMX8MN_DRAM_PLL_OUT 41
+ #define IMX8MN_GPU_PLL_OUT 42
+-#define IMX8MN_VPU_PLL_OUT 43
++#define IMX8MN_M7_ALT_PLL_OUT 43
++#define IMX8MN_VPU_PLL_OUT IMX8MN_M7_ALT_PLL_OUT
+ #define IMX8MN_ARM_PLL_OUT 44
+ #define IMX8MN_SYS_PLL1_OUT 45
+ #define IMX8MN_SYS_PLL2_OUT 46
+diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
+index 9d5cc2ddde896..1417b7b1b7dfe 100644
+--- a/include/dt-bindings/clock/imx8mp-clock.h
++++ b/include/dt-bindings/clock/imx8mp-clock.h
+@@ -324,8 +324,9 @@
+ #define IMX8MP_CLK_CLKOUT2_SEL 317
+ #define IMX8MP_CLK_CLKOUT2_DIV 318
+ #define IMX8MP_CLK_CLKOUT2 319
++#define IMX8MP_CLK_USB_SUSP 320
+
+-#define IMX8MP_CLK_END 320
++#define IMX8MP_CLK_END 321
+
+ #define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0
+ #define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1
+diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
+index 2aea877d644f8..2b98720084285 100644
+--- a/include/linux/btf_ids.h
++++ b/include/linux/btf_ids.h
+@@ -204,7 +204,7 @@ extern struct btf_id_set8 name;
+
+ #else
+
+-#define BTF_ID_LIST(name) static u32 __maybe_unused name[5];
++#define BTF_ID_LIST(name) static u32 __maybe_unused name[16];
+ #define BTF_ID(prefix, name)
+ #define BTF_ID_FLAGS(prefix, name, ...)
+ #define BTF_ID_UNUSED
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
+index f60674692d365..ea2d919fd9c79 100644
+--- a/include/linux/debugfs.h
++++ b/include/linux/debugfs.h
+@@ -45,7 +45,7 @@ struct debugfs_u32_array {
+
+ extern struct dentry *arch_debugfs_dir;
+
+-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
++#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
+ static int __fops ## _open(struct inode *inode, struct file *file) \
+ { \
+ __simple_attr_check_format(__fmt, 0ull); \
+@@ -56,10 +56,16 @@ static const struct file_operations __fops = { \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+ .read = debugfs_attr_read, \
+- .write = debugfs_attr_write, \
++ .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
+ .llseek = no_llseek, \
+ }
+
++#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
++ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
++
++#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
++ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
++
+ typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+
+ #if defined(CONFIG_DEBUG_FS)
+@@ -102,6 +108,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos);
+ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
++ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos);
+
+ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+ struct dentry *new_dir, const char *new_name);
+@@ -254,6 +262,13 @@ static inline ssize_t debugfs_attr_write(struct file *file,
+ return -ENODEV;
+ }
+
++static inline ssize_t debugfs_attr_write_signed(struct file *file,
++ const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ return -ENODEV;
++}
++
+ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+ struct dentry *new_dir, char *new_name)
+ {
+diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
+index 30eb30d6909b0..36a486505b081 100644
+--- a/include/linux/eventfd.h
++++ b/include/linux/eventfd.h
+@@ -40,6 +40,7 @@ struct file *eventfd_fget(int fd);
+ struct eventfd_ctx *eventfd_ctx_fdget(int fd);
+ struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
+ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
++__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask);
+ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
+ __u64 *cnt);
+ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
+@@ -61,7 +62,13 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+ return ERR_PTR(-ENOSYS);
+ }
+
+-static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
++static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
++{
++ return -ENOSYS;
++}
++
++static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
++ unsigned mask)
+ {
+ return -ENOSYS;
+ }
+diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
+index 1067a8450826b..5001a11258e4d 100644
+--- a/include/linux/fortify-string.h
++++ b/include/linux/fortify-string.h
+@@ -18,7 +18,7 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
+
+ #define __compiletime_strlen(p) \
+ ({ \
+- unsigned char *__p = (unsigned char *)(p); \
++ char *__p = (char *)(p); \
+ size_t __ret = SIZE_MAX; \
+ size_t __p_size = __member_size(p); \
+ if (__p_size != SIZE_MAX && \
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 59ae95ddb6793..6b115bce14b98 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3493,7 +3493,7 @@ void simple_transaction_set(struct file *file, size_t n);
+ * All attributes contain a text representation of a numeric value
+ * that are accessed with the get() and set() functions.
+ */
+-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
++#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
+ static int __fops ## _open(struct inode *inode, struct file *file) \
+ { \
+ __simple_attr_check_format(__fmt, 0ull); \
+@@ -3504,10 +3504,16 @@ static const struct file_operations __fops = { \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+ .read = simple_attr_read, \
+- .write = simple_attr_write, \
++ .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \
+ .llseek = generic_file_llseek, \
+ }
+
++#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
++ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
++
++#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
++ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
++
+ static inline __printf(1, 2)
+ void __simple_attr_check_format(const char *fmt, ...)
+ {
+@@ -3522,6 +3528,8 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos);
+ ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
++ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos);
+
+ struct ctl_table;
+ int __init list_bdev_fs_names(char *buf, size_t size);
+diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
+index e230c7c46110a..c3618255b1504 100644
+--- a/include/linux/hisi_acc_qm.h
++++ b/include/linux/hisi_acc_qm.h
+@@ -384,14 +384,14 @@ struct hisi_qp {
+ static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+ {
+- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+- device, NULL);
++ struct pci_dev *pdev;
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
++ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %u\n",
+@@ -401,6 +401,8 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
++
++ pci_dev_put(pdev);
+ }
+
+ ret = kstrtou32(val, 10, &n);
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 3b42264333ef8..646f1da9f27e0 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -1341,6 +1341,8 @@ struct hv_ring_buffer_debug_info {
+ int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info);
+
++bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
++
+ /* Vmbus interface */
+ #define vmbus_driver_register(driver) \
+ __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 79690938d9a2d..d3088666f3f44 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -4594,7 +4594,7 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
+ return 0;
+ }
+
+- return common + mle->variable[0];
++ return sizeof(*mle) + common + mle->variable[0];
+ }
+
+ /**
+diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
+index 515ca09764fe4..bcbefb7574751 100644
+--- a/include/linux/iio/imu/adis.h
++++ b/include/linux/iio/imu/adis.h
+@@ -402,9 +402,20 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
+ __adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
+ })
+
+-int adis_enable_irq(struct adis *adis, bool enable);
+ int __adis_check_status(struct adis *adis);
+ int __adis_initial_startup(struct adis *adis);
++int __adis_enable_irq(struct adis *adis, bool enable);
++
++static inline int adis_enable_irq(struct adis *adis, bool enable)
++{
++ int ret;
++
++ mutex_lock(&adis->state_lock);
++ ret = __adis_enable_irq(adis, enable);
++ mutex_unlock(&adis->state_lock);
++
++ return ret;
++}
+
+ static inline int adis_check_status(struct adis *adis)
+ {
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index eddf8ee270e74..ba2bd604359d4 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -171,31 +171,38 @@ static inline bool dev_xmit_complete(int rc)
+ * (unsigned long) so they can be read and written atomically.
+ */
+
++#define NET_DEV_STAT(FIELD) \
++ union { \
++ unsigned long FIELD; \
++ atomic_long_t __##FIELD; \
++ }
++
+ struct net_device_stats {
+- unsigned long rx_packets;
+- unsigned long tx_packets;
+- unsigned long rx_bytes;
+- unsigned long tx_bytes;
+- unsigned long rx_errors;
+- unsigned long tx_errors;
+- unsigned long rx_dropped;
+- unsigned long tx_dropped;
+- unsigned long multicast;
+- unsigned long collisions;
+- unsigned long rx_length_errors;
+- unsigned long rx_over_errors;
+- unsigned long rx_crc_errors;
+- unsigned long rx_frame_errors;
+- unsigned long rx_fifo_errors;
+- unsigned long rx_missed_errors;
+- unsigned long tx_aborted_errors;
+- unsigned long tx_carrier_errors;
+- unsigned long tx_fifo_errors;
+- unsigned long tx_heartbeat_errors;
+- unsigned long tx_window_errors;
+- unsigned long rx_compressed;
+- unsigned long tx_compressed;
++ NET_DEV_STAT(rx_packets);
++ NET_DEV_STAT(tx_packets);
++ NET_DEV_STAT(rx_bytes);
++ NET_DEV_STAT(tx_bytes);
++ NET_DEV_STAT(rx_errors);
++ NET_DEV_STAT(tx_errors);
++ NET_DEV_STAT(rx_dropped);
++ NET_DEV_STAT(tx_dropped);
++ NET_DEV_STAT(multicast);
++ NET_DEV_STAT(collisions);
++ NET_DEV_STAT(rx_length_errors);
++ NET_DEV_STAT(rx_over_errors);
++ NET_DEV_STAT(rx_crc_errors);
++ NET_DEV_STAT(rx_frame_errors);
++ NET_DEV_STAT(rx_fifo_errors);
++ NET_DEV_STAT(rx_missed_errors);
++ NET_DEV_STAT(tx_aborted_errors);
++ NET_DEV_STAT(tx_carrier_errors);
++ NET_DEV_STAT(tx_fifo_errors);
++ NET_DEV_STAT(tx_heartbeat_errors);
++ NET_DEV_STAT(tx_window_errors);
++ NET_DEV_STAT(rx_compressed);
++ NET_DEV_STAT(tx_compressed);
+ };
++#undef NET_DEV_STAT
+
+ /* per-cpu stats, allocated on demand.
+ * Try to fit them in a single cache line, for dev_get_stats() sake.
+@@ -5164,4 +5171,9 @@ extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
+
+ extern struct net_device *blackhole_netdev;
+
++/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
++#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
++#define DEV_STATS_ADD(DEV, FIELD, VAL) \
++ atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
++
+ #endif /* _LINUX_NETDEVICE_H */
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index 050d7d0cd81b0..d9fbc5afeaf72 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -7,6 +7,7 @@
+ #ifndef _LINUX_NVME_H
+ #define _LINUX_NVME_H
+
++#include <linux/bits.h>
+ #include <linux/types.h>
+ #include <linux/uuid.h>
+
+@@ -639,7 +640,7 @@ enum {
+ NVME_CMD_EFFECTS_NCC = 1 << 2,
+ NVME_CMD_EFFECTS_NIC = 1 << 3,
+ NVME_CMD_EFFECTS_CCC = 1 << 4,
+- NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
++ NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
+ NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
+ };
+
+diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
+index 81d6e4ec2294b..0260f5ea98fe1 100644
+--- a/include/linux/proc_fs.h
++++ b/include/linux/proc_fs.h
+@@ -208,8 +208,10 @@ static inline void proc_remove(struct proc_dir_entry *de) {}
+ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; }
+
+ #define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;})
++#define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;})
+ #define proc_create_net(name, mode, parent, state_size, ops) ({NULL;})
+ #define proc_create_net_single(name, mode, parent, show, data) ({NULL;})
++#define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;})
+
+ static inline struct pid *tgid_pidfd_to_pid(const struct file *file)
+ {
+diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
+index f9a7461e72b80..d3b4a3d4514ab 100644
+--- a/include/linux/regulator/driver.h
++++ b/include/linux/regulator/driver.h
+@@ -687,7 +687,8 @@ static inline int regulator_err2notif(int err)
+
+
+ struct regulator_dev *
+-regulator_register(const struct regulator_desc *regulator_desc,
++regulator_register(struct device *dev,
++ const struct regulator_desc *regulator_desc,
+ const struct regulator_config *config);
+ struct regulator_dev *
+ devm_regulator_register(struct device *dev,
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index 70d6cb94e5802..84f787416a54d 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -82,6 +82,7 @@ struct sk_psock {
+ u32 apply_bytes;
+ u32 cork_bytes;
+ u32 eval;
++ bool redir_ingress; /* undefined if sk_redir is null */
+ struct sk_msg *cork;
+ struct sk_psock_progs progs;
+ #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
+diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
+index 93884086f3924..adc80e29168ea 100644
+--- a/include/linux/timerqueue.h
++++ b/include/linux/timerqueue.h
+@@ -35,7 +35,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
+ {
+ struct rb_node *leftmost = rb_first_cached(&head->rb_root);
+
+- return rb_entry(leftmost, struct timerqueue_node, node);
++ return rb_entry_safe(leftmost, struct timerqueue_node, node);
+ }
+
+ static inline void timerqueue_init(struct timerqueue_node *node)
+diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
+index 2f6b0861322ae..ac60c9fcfe9a6 100644
+--- a/include/media/dvbdev.h
++++ b/include/media/dvbdev.h
+@@ -126,6 +126,7 @@ struct dvb_adapter {
+ * struct dvb_device - represents a DVB device node
+ *
+ * @list_head: List head with all DVB devices
++ * @ref: reference counter
+ * @fops: pointer to struct file_operations
+ * @adapter: pointer to the adapter that holds this device node
+ * @type: type of the device, as defined by &enum dvb_device_type.
+@@ -156,6 +157,7 @@ struct dvb_adapter {
+ */
+ struct dvb_device {
+ struct list_head list_head;
++ struct kref ref;
+ const struct file_operations *fops;
+ struct dvb_adapter *adapter;
+ enum dvb_device_type type;
+@@ -187,6 +189,20 @@ struct dvb_device {
+ void *priv;
+ };
+
++/**
++ * dvb_device_get - Increase dvb_device reference
++ *
++ * @dvbdev: pointer to struct dvb_device
++ */
++struct dvb_device *dvb_device_get(struct dvb_device *dvbdev);
++
++/**
++ * dvb_device_put - Decrease dvb_device reference
++ *
++ * @dvbdev: pointer to struct dvb_device
++ */
++void dvb_device_put(struct dvb_device *dvbdev);
++
+ /**
+ * dvb_register_adapter - Registers a new DVB adapter
+ *
+@@ -231,29 +247,17 @@ int dvb_register_device(struct dvb_adapter *adap,
+ /**
+ * dvb_remove_device - Remove a registered DVB device
+ *
+- * This does not free memory. To do that, call dvb_free_device().
++ * This does not free memory. dvb_free_device() will do that when
++ * reference counter is empty
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+ void dvb_remove_device(struct dvb_device *dvbdev);
+
+-/**
+- * dvb_free_device - Free memory occupied by a DVB device.
+- *
+- * Call dvb_unregister_device() before calling this function.
+- *
+- * @dvbdev: pointer to struct dvb_device
+- */
+-void dvb_free_device(struct dvb_device *dvbdev);
+
+ /**
+ * dvb_unregister_device - Unregisters a DVB device
+ *
+- * This is a combination of dvb_remove_device() and dvb_free_device().
+- * Using this function is usually a mistake, and is often an indicator
+- * for a use-after-free bug (when a userspace process keeps a file
+- * handle to a detached device).
+- *
+ * @dvbdev: pointer to struct dvb_device
+ */
+ void dvb_unregister_device(struct dvb_device *dvbdev);
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 684f1cd287301..7a381fcef939d 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -274,6 +274,26 @@ enum {
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN,
++
++ /*
++ * When this quirk is set, the HCI_OP_LE_SET_EXT_SCAN_ENABLE command is
++ * disabled. This is required for some Broadcom controllers which
++ * erroneously claim to support extended scanning.
++ *
++ * This quirk can be set before hci_register_dev is called or
++ * during the hdev->setup vendor callback.
++ */
++ HCI_QUIRK_BROKEN_EXT_SCAN,
++
++ /*
++ * When this quirk is set, the HCI_OP_GET_MWS_TRANSPORT_CONFIG command is
++ * disabled. This is required for some Broadcom controllers which
++ * erroneously claim to support MWS Transport Layer Configuration.
++ *
++ * This quirk can be set before hci_register_dev is called or
++ * during the hdev->setup vendor callback.
++ */
++ HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG,
+ };
+
+ /* HCI device flags */
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index c54bc71254afa..7f585e5dd71b8 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1689,7 +1689,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+
+ /* Use ext scanning if set ext scan param and ext scan enable is supported */
+ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
+- ((dev)->commands[37] & 0x40))
++ ((dev)->commands[37] & 0x40) && \
++ !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks))
++
+ /* Use ext create connection if command is supported */
+ #define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
+
+@@ -1717,6 +1719,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+ ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
+ #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
+
++#define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
++ (!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks)))
++
+ /* ----- HCI protocols ----- */
+ #define HCI_PROTO_DEFER 0x01
+
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 00b479ce6b99c..d67fda89cd0fa 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -356,9 +356,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
+ {
+- /* TODO : stats should be SMP safe */
+- dev->stats.rx_packets++;
+- dev->stats.rx_bytes += skb->len;
++ DEV_STATS_INC(dev, rx_packets);
++ DEV_STATS_ADD(dev, rx_bytes, skb->len);
+ __skb_tunnel_rx(skb, dev, net);
+ }
+
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index ff1804a0c4692..1fca6a88114ad 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -351,11 +351,11 @@ struct ip_vs_seq {
+
+ /* counters per cpu */
+ struct ip_vs_counters {
+- __u64 conns; /* connections scheduled */
+- __u64 inpkts; /* incoming packets */
+- __u64 outpkts; /* outgoing packets */
+- __u64 inbytes; /* incoming bytes */
+- __u64 outbytes; /* outgoing bytes */
++ u64_stats_t conns; /* connections scheduled */
++ u64_stats_t inpkts; /* incoming packets */
++ u64_stats_t outpkts; /* outgoing packets */
++ u64_stats_t inbytes; /* incoming bytes */
++ u64_stats_t outbytes; /* outgoing bytes */
+ };
+ /* Stats per cpu */
+ struct ip_vs_cpu_stats {
+diff --git a/include/net/mrp.h b/include/net/mrp.h
+index 92cd3fb6cf9da..b28915ffea284 100644
+--- a/include/net/mrp.h
++++ b/include/net/mrp.h
+@@ -124,6 +124,7 @@ struct mrp_applicant {
+ struct sk_buff *pdu;
+ struct rb_root mad;
+ struct rcu_head rcu;
++ bool active;
+ };
+
+ struct mrp_port {
+diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
+index efc9085c68927..6ec140b0a61bf 100644
+--- a/include/net/sock_reuseport.h
++++ b/include/net/sock_reuseport.h
+@@ -16,6 +16,7 @@ struct sock_reuseport {
+ u16 max_socks; /* length of socks */
+ u16 num_socks; /* elements in socks */
+ u16 num_closed_socks; /* closed elements in socks */
++ u16 incoming_cpu;
+ /* The last synq overflow event timestamp of this
+ * reuse->socks[] group.
+ */
+@@ -58,5 +59,6 @@ static inline bool reuseport_has_conns(struct sock *sk)
+ }
+
+ void reuseport_has_conns_set(struct sock *sk);
++void reuseport_update_incoming_cpu(struct sock *sk, int val);
+
+ #endif /* _SOCK_REUSEPORT_H */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 14d45661a84d8..5b70b241ce71b 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -2291,8 +2291,8 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
+ #endif /* CONFIG_BPF_SYSCALL */
+
+-int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
+- int flags);
++int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
++ struct sk_msg *msg, u32 bytes, int flags);
+ #endif /* CONFIG_NET_SOCK_MSG */
+
+ #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
+diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
+index 25ec8c181688d..eba23daf2c290 100644
+--- a/include/sound/hda_codec.h
++++ b/include/sound/hda_codec.h
+@@ -258,6 +258,7 @@ struct hda_codec {
+ unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
+ unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
+ unsigned int forced_resume:1; /* forced resume for jack */
++ unsigned int no_stream_clean_at_suspend:1; /* do not clean streams at suspend */
+
+ #ifdef CONFIG_PM
+ unsigned long power_on_acct;
+diff --git a/include/sound/pcm.h b/include/sound/pcm.h
+index 7b1a022910e8e..27040b472a4f6 100644
+--- a/include/sound/pcm.h
++++ b/include/sound/pcm.h
+@@ -106,24 +106,24 @@ struct snd_pcm_ops {
+ #define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1)
+
+ /* If you change this don't forget to change rates[] table in pcm_native.c */
+-#define SNDRV_PCM_RATE_5512 (1<<0) /* 5512Hz */
+-#define SNDRV_PCM_RATE_8000 (1<<1) /* 8000Hz */
+-#define SNDRV_PCM_RATE_11025 (1<<2) /* 11025Hz */
+-#define SNDRV_PCM_RATE_16000 (1<<3) /* 16000Hz */
+-#define SNDRV_PCM_RATE_22050 (1<<4) /* 22050Hz */
+-#define SNDRV_PCM_RATE_32000 (1<<5) /* 32000Hz */
+-#define SNDRV_PCM_RATE_44100 (1<<6) /* 44100Hz */
+-#define SNDRV_PCM_RATE_48000 (1<<7) /* 48000Hz */
+-#define SNDRV_PCM_RATE_64000 (1<<8) /* 64000Hz */
+-#define SNDRV_PCM_RATE_88200 (1<<9) /* 88200Hz */
+-#define SNDRV_PCM_RATE_96000 (1<<10) /* 96000Hz */
+-#define SNDRV_PCM_RATE_176400 (1<<11) /* 176400Hz */
+-#define SNDRV_PCM_RATE_192000 (1<<12) /* 192000Hz */
+-#define SNDRV_PCM_RATE_352800 (1<<13) /* 352800Hz */
+-#define SNDRV_PCM_RATE_384000 (1<<14) /* 384000Hz */
+-
+-#define SNDRV_PCM_RATE_CONTINUOUS (1<<30) /* continuous range */
+-#define SNDRV_PCM_RATE_KNOT (1<<31) /* supports more non-continuos rates */
++#define SNDRV_PCM_RATE_5512 (1U<<0) /* 5512Hz */
++#define SNDRV_PCM_RATE_8000 (1U<<1) /* 8000Hz */
++#define SNDRV_PCM_RATE_11025 (1U<<2) /* 11025Hz */
++#define SNDRV_PCM_RATE_16000 (1U<<3) /* 16000Hz */
++#define SNDRV_PCM_RATE_22050 (1U<<4) /* 22050Hz */
++#define SNDRV_PCM_RATE_32000 (1U<<5) /* 32000Hz */
++#define SNDRV_PCM_RATE_44100 (1U<<6) /* 44100Hz */
++#define SNDRV_PCM_RATE_48000 (1U<<7) /* 48000Hz */
++#define SNDRV_PCM_RATE_64000 (1U<<8) /* 64000Hz */
++#define SNDRV_PCM_RATE_88200 (1U<<9) /* 88200Hz */
++#define SNDRV_PCM_RATE_96000 (1U<<10) /* 96000Hz */
++#define SNDRV_PCM_RATE_176400 (1U<<11) /* 176400Hz */
++#define SNDRV_PCM_RATE_192000 (1U<<12) /* 192000Hz */
++#define SNDRV_PCM_RATE_352800 (1U<<13) /* 352800Hz */
++#define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */
++
++#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */
++#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */
+
+ #define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\
+ SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index c6b372401c278..ff57e7f9914cc 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -322,7 +322,7 @@ TRACE_EVENT(f2fs_unlink_enter,
+ __field(ino_t, ino)
+ __field(loff_t, size)
+ __field(blkcnt_t, blocks)
+- __field(const char *, name)
++ __string(name, dentry->d_name.name)
+ ),
+
+ TP_fast_assign(
+@@ -330,7 +330,7 @@ TRACE_EVENT(f2fs_unlink_enter,
+ __entry->ino = dir->i_ino;
+ __entry->size = dir->i_size;
+ __entry->blocks = dir->i_blocks;
+- __entry->name = dentry->d_name.name;
++ __assign_str(name, dentry->d_name.name);
+ ),
+
+ TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, "
+@@ -338,7 +338,7 @@ TRACE_EVENT(f2fs_unlink_enter,
+ show_dev_ino(__entry),
+ __entry->size,
+ (unsigned long long)__entry->blocks,
+- __entry->name)
++ __get_str(name))
+ );
+
+ DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit,
+@@ -940,25 +940,29 @@ TRACE_EVENT(f2fs_direct_IO_enter,
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+- __field(struct kiocb *, iocb)
++ __field(loff_t, ki_pos)
++ __field(int, ki_flags)
++ __field(u16, ki_ioprio)
+ __field(unsigned long, len)
+ __field(int, rw)
+ ),
+
+ TP_fast_assign(
+- __entry->dev = inode->i_sb->s_dev;
+- __entry->ino = inode->i_ino;
+- __entry->iocb = iocb;
+- __entry->len = len;
+- __entry->rw = rw;
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->ki_pos = iocb->ki_pos;
++ __entry->ki_flags = iocb->ki_flags;
++ __entry->ki_ioprio = iocb->ki_ioprio;
++ __entry->len = len;
++ __entry->rw = rw;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d",
+ show_dev_ino(__entry),
+- __entry->iocb->ki_pos,
++ __entry->ki_pos,
+ __entry->len,
+- __entry->iocb->ki_flags,
+- __entry->iocb->ki_ioprio,
++ __entry->ki_flags,
++ __entry->ki_ioprio,
+ __entry->rw)
+ );
+
+@@ -1407,19 +1411,19 @@ TRACE_EVENT(f2fs_write_checkpoint,
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, reason)
+- __field(char *, msg)
++ __string(dest_msg, msg)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->reason = reason;
+- __entry->msg = msg;
++ __assign_str(dest_msg, msg);
+ ),
+
+ TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
+ show_dev(__entry->dev),
+ show_cpreason(__entry->reason),
+- __entry->msg)
++ __get_str(dest_msg))
+ );
+
+ DECLARE_EVENT_CLASS(f2fs_discard,
+diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h
+index 59363a083ecb9..d92691c78cff6 100644
+--- a/include/trace/events/ib_mad.h
++++ b/include/trace/events/ib_mad.h
+@@ -49,7 +49,6 @@ DECLARE_EVENT_CLASS(ib_mad_send_template,
+ __field(int, retries_left)
+ __field(int, max_retries)
+ __field(int, retry)
+- __field(u16, pkey)
+ ),
+
+ TP_fast_assign(
+@@ -89,7 +88,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template,
+ "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \
+ "method 0x%x status 0x%x class_specific 0x%x tid 0x%llx " \
+ "attr_id 0x%x attr_mod 0x%x => dlid 0x%08x sl %d "\
+- "pkey 0x%x rpqn 0x%x rqpkey 0x%x",
++ "rpqn 0x%x rqpkey 0x%x",
+ __entry->dev_index, __entry->port_num, __entry->qp_num,
+ __entry->agent_priv, be64_to_cpu(__entry->wrtid),
+ __entry->retries_left, __entry->max_retries,
+@@ -100,7 +99,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template,
+ be16_to_cpu(__entry->class_specific),
+ be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
+ be32_to_cpu(__entry->attr_mod),
+- be32_to_cpu(__entry->dlid), __entry->sl, __entry->pkey,
++ be32_to_cpu(__entry->dlid), __entry->sl,
+ __entry->rqpn, __entry->rqkey
+ )
+ );
+@@ -204,7 +203,6 @@ TRACE_EVENT(ib_mad_recv_done_handler,
+ __field(u16, wc_status)
+ __field(u32, slid)
+ __field(u32, dev_index)
+- __field(u16, pkey)
+ ),
+
+ TP_fast_assign(
+@@ -224,9 +222,6 @@ TRACE_EVENT(ib_mad_recv_done_handler,
+ __entry->slid = wc->slid;
+ __entry->src_qp = wc->src_qp;
+ __entry->sl = wc->sl;
+- ib_query_pkey(qp_info->port_priv->device,
+- qp_info->port_priv->port_num,
+- wc->pkey_index, &__entry->pkey);
+ __entry->wc_status = wc->status;
+ ),
+
+@@ -234,7 +229,7 @@ TRACE_EVENT(ib_mad_recv_done_handler,
+ "base_ver 0x%02x class 0x%02x class_ver 0x%02x " \
+ "method 0x%02x status 0x%04x class_specific 0x%04x " \
+ "tid 0x%016llx attr_id 0x%04x attr_mod 0x%08x " \
+- "slid 0x%08x src QP%d, sl %d pkey 0x%04x",
++ "slid 0x%08x src QP%d, sl %d",
+ __entry->dev_index, __entry->port_num, __entry->qp_num,
+ __entry->wc_status,
+ __entry->length,
+@@ -244,7 +239,7 @@ TRACE_EVENT(ib_mad_recv_done_handler,
+ be16_to_cpu(__entry->class_specific),
+ be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
+ be32_to_cpu(__entry->attr_mod),
+- __entry->slid, __entry->src_qp, __entry->sl, __entry->pkey
++ __entry->slid, __entry->src_qp, __entry->sl
+ )
+ );
+
+diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
+index 8a3432d0f0dcb..e687658843b1c 100644
+--- a/include/uapi/linux/eventpoll.h
++++ b/include/uapi/linux/eventpoll.h
+@@ -41,6 +41,12 @@
+ #define EPOLLMSG (__force __poll_t)0x00000400
+ #define EPOLLRDHUP (__force __poll_t)0x00002000
+
++/*
++ * Internal flag - wakeup generated by io_uring, used to detect recursion back
++ * into the io_uring poll handler.
++ */
++#define EPOLL_URING_WAKE ((__force __poll_t)(1U << 27))
++
+ /* Set exclusive wakeup mode for the target file descriptor */
+ #define EPOLLEXCLUSIVE ((__force __poll_t)(1U << 28))
+
+diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
+index 2b9e7feba3f32..1d553bedbdb51 100644
+--- a/include/uapi/linux/idxd.h
++++ b/include/uapi/linux/idxd.h
+@@ -295,7 +295,7 @@ struct dsa_completion_record {
+ };
+
+ uint32_t delta_rec_size;
+- uint32_t crc_val;
++ uint64_t crc_val;
+
+ /* DIF check & strip */
+ struct {
+diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
+index 2df3225b562fa..9d4c4078e8d00 100644
+--- a/include/uapi/linux/io_uring.h
++++ b/include/uapi/linux/io_uring.h
+@@ -296,10 +296,28 @@ enum io_uring_op {
+ *
+ * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
+ * the buf_index field.
++ *
++ * IORING_SEND_ZC_REPORT_USAGE
++ * If set, SEND[MSG]_ZC should report
++ * the zerocopy usage in cqe.res
++ * for the IORING_CQE_F_NOTIF cqe.
++ * 0 is reported if zerocopy was actually possible.
++ * IORING_NOTIF_USAGE_ZC_COPIED if data was copied
++ * (at least partially).
+ */
+ #define IORING_RECVSEND_POLL_FIRST (1U << 0)
+ #define IORING_RECV_MULTISHOT (1U << 1)
+ #define IORING_RECVSEND_FIXED_BUF (1U << 2)
++#define IORING_SEND_ZC_REPORT_USAGE (1U << 3)
++
++/*
++ * cqe.res for IORING_CQE_F_NOTIF if
++ * IORING_SEND_ZC_REPORT_USAGE was requested
++ *
++ * It should be treated as a flag, all other
++ * bits of cqe.res should be treated as reserved!
++ */
++#define IORING_NOTIF_USAGE_ZC_COPIED (1U << 31)
+
+ /*
+ * accept flags stored in sqe->ioprio
+diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
+index 0723a9cce747c..01717181339eb 100644
+--- a/include/uapi/linux/swab.h
++++ b/include/uapi/linux/swab.h
+@@ -3,7 +3,7 @@
+ #define _UAPI_LINUX_SWAB_H
+
+ #include <linux/types.h>
+-#include <linux/compiler.h>
++#include <linux/stddef.h>
+ #include <asm/bitsperlong.h>
+ #include <asm/swab.h>
+
+diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
+index f6fde06db4b4e..745790ce3c261 100644
+--- a/include/uapi/rdma/hns-abi.h
++++ b/include/uapi/rdma/hns-abi.h
+@@ -85,11 +85,26 @@ struct hns_roce_ib_create_qp_resp {
+ __aligned_u64 dwqe_mmap_key;
+ };
+
++enum {
++ HNS_ROCE_EXSGE_FLAGS = 1 << 0,
++};
++
++enum {
++ HNS_ROCE_RSP_EXSGE_FLAGS = 1 << 0,
++};
++
+ struct hns_roce_ib_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+ __u32 cqe_size;
+ __u32 srq_tab_size;
+ __u32 reserved;
++ __u32 config;
++ __u32 max_inline_data;
++};
++
++struct hns_roce_ib_alloc_ucontext {
++ __u32 config;
++ __u32 reserved;
+ };
+
+ struct hns_roce_ib_alloc_pd_resp {
+diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
+index 6d4a2c60808dd..00d2703e8fca5 100644
+--- a/include/uapi/sound/asequencer.h
++++ b/include/uapi/sound/asequencer.h
+@@ -328,10 +328,10 @@ typedef int __bitwise snd_seq_client_type_t;
+ #define KERNEL_CLIENT ((__force snd_seq_client_type_t) 2)
+
+ /* event filter flags */
+-#define SNDRV_SEQ_FILTER_BROADCAST (1<<0) /* accept broadcast messages */
+-#define SNDRV_SEQ_FILTER_MULTICAST (1<<1) /* accept multicast messages */
+-#define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */
+-#define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */
++#define SNDRV_SEQ_FILTER_BROADCAST (1U<<0) /* accept broadcast messages */
++#define SNDRV_SEQ_FILTER_MULTICAST (1U<<1) /* accept multicast messages */
++#define SNDRV_SEQ_FILTER_BOUNCE (1U<<2) /* accept bounce event in error */
++#define SNDRV_SEQ_FILTER_USE_EVENT (1U<<31) /* use event filter */
+
+ struct snd_seq_client_info {
+ int client; /* client number to inquire */
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 61cd7ffd0f6aa..71f1cabb9f3d4 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -495,7 +495,7 @@ static void io_eventfd_ops(struct rcu_head *rcu)
+ int ops = atomic_xchg(&ev_fd->ops, 0);
+
+ if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
+- eventfd_signal(ev_fd->cq_ev_fd, 1);
++ eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
+
+ /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
+ * ordering in a race but if references are 0 we know we have to free
+@@ -531,7 +531,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
+ goto out;
+
+ if (likely(eventfd_signal_allowed())) {
+- eventfd_signal(ev_fd->cq_ev_fd, 1);
++ eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
+ } else {
+ atomic_inc(&ev_fd->refs);
+ if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
+@@ -1757,7 +1757,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ return ret;
+
+ /* If the op doesn't have a file, we're not polling for it */
+- if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
++ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
+ io_iopoll_req_issued(req, issue_flags);
+
+ return 0;
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 50bc3af449534..4334cd30c423d 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -4,6 +4,7 @@
+ #include <linux/errno.h>
+ #include <linux/lockdep.h>
+ #include <linux/io_uring_types.h>
++#include <uapi/linux/eventpoll.h>
+ #include "io-wq.h"
+ #include "slist.h"
+ #include "filetable.h"
+@@ -207,12 +208,18 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
+ static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
+ {
+ /*
+- * wake_up_all() may seem excessive, but io_wake_function() and
+- * io_should_wake() handle the termination of the loop and only
+- * wake as many waiters as we need to.
++ * Trigger waitqueue handler on all waiters on our waitqueue. This
++ * won't necessarily wake up all the tasks, io_should_wake() will make
++ * that decision.
++ *
++ * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
++ * set in the mask so that if we recurse back into our own poll
++ * waitqueue handlers, we know we have a dependency between eventfd or
++ * epoll and should terminate multishot poll at that point.
+ */
+ if (waitqueue_active(&ctx->cq_wait))
+- wake_up_all(&ctx->cq_wait);
++ __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
++ poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+ }
+
+ static inline void io_cqring_wake(struct io_ring_ctx *ctx)
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index 90d2fc6fd80e4..a49ccab262d53 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -164,12 +164,10 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
+ }
+
+ done:
++ if (ret == -EAGAIN)
++ return -EAGAIN;
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+- /* put file to avoid an attempt to IOPOLL the req */
+- if (!(req->flags & REQ_F_FIXED_FILE))
+- io_put_file(req->file);
+- req->file = NULL;
+ return IOU_OK;
+ }
+diff --git a/io_uring/net.c b/io_uring/net.c
+index ab83da7e80f04..bdd2b4e370b35 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -479,6 +479,7 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ compat_ssize_t clen;
+
++ iomsg->free_iov = NULL;
+ if (msg.msg_iovlen == 0) {
+ sr->len = 0;
+ } else if (msg.msg_iovlen > 1) {
+@@ -805,10 +806,10 @@ retry_multishot:
+ goto retry_multishot;
+
+ if (mshot_finished) {
+- io_netmsg_recycle(req, issue_flags);
+ /* fast path, check for non-NULL to avoid function call */
+ if (kmsg->free_iov)
+ kfree(kmsg->free_iov);
++ io_netmsg_recycle(req, issue_flags);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ }
+
+@@ -937,7 +938,8 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+
+ zc->flags = READ_ONCE(sqe->ioprio);
+ if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
+- IORING_RECVSEND_FIXED_BUF))
++ IORING_RECVSEND_FIXED_BUF |
++ IORING_SEND_ZC_REPORT_USAGE))
+ return -EINVAL;
+ notif = zc->notif = io_alloc_notif(ctx);
+ if (!notif)
+@@ -955,6 +957,9 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ req->imu = READ_ONCE(ctx->user_bufs[idx]);
+ io_req_set_rsrc_node(notif, ctx, 0);
+ }
++ if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
++ io_notif_to_data(notif)->zc_report = true;
++ }
+
+ if (req->opcode == IORING_OP_SEND_ZC) {
+ if (READ_ONCE(sqe->__pad3[0]))
+diff --git a/io_uring/notif.c b/io_uring/notif.c
+index e37c6569d82e8..4bfef10161fa0 100644
+--- a/io_uring/notif.c
++++ b/io_uring/notif.c
+@@ -18,6 +18,10 @@ static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
+ __io_unaccount_mem(ctx->user, nd->account_pages);
+ nd->account_pages = 0;
+ }
++
++ if (nd->zc_report && (nd->zc_copied || !nd->zc_used))
++ notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
++
+ io_req_task_complete(notif, locked);
+ }
+
+@@ -28,6 +32,13 @@ static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
+ struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
+ struct io_kiocb *notif = cmd_to_io_kiocb(nd);
+
++ if (nd->zc_report) {
++ if (success && !nd->zc_used && skb)
++ WRITE_ONCE(nd->zc_used, true);
++ else if (!success && !nd->zc_copied)
++ WRITE_ONCE(nd->zc_copied, true);
++ }
++
+ if (refcount_dec_and_test(&uarg->refcnt)) {
+ notif->io_task_work.func = __io_notif_complete_tw;
+ io_req_task_work_add(notif);
+@@ -55,6 +66,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
+ nd->account_pages = 0;
+ nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
+ nd->uarg.callback = io_uring_tx_zerocopy_callback;
++ nd->zc_report = nd->zc_used = nd->zc_copied = false;
+ refcount_set(&nd->uarg.refcnt, 1);
+ return notif;
+ }
+diff --git a/io_uring/notif.h b/io_uring/notif.h
+index 5b4d710c8ca54..4ae696273c781 100644
+--- a/io_uring/notif.h
++++ b/io_uring/notif.h
+@@ -13,6 +13,9 @@ struct io_notif_data {
+ struct file *file;
+ struct ubuf_info uarg;
+ unsigned long account_pages;
++ bool zc_report;
++ bool zc_used;
++ bool zc_copied;
+ };
+
+ void io_notif_flush(struct io_kiocb *notif);
+diff --git a/io_uring/opdef.c b/io_uring/opdef.c
+index 83dc0f9ad3b2f..04dd2c983fce4 100644
+--- a/io_uring/opdef.c
++++ b/io_uring/opdef.c
+@@ -63,6 +63,7 @@ const struct io_op_def io_op_defs[] = {
+ .audit_skip = 1,
+ .ioprio = 1,
+ .iopoll = 1,
++ .iopoll_queue = 1,
+ .async_size = sizeof(struct io_async_rw),
+ .name = "READV",
+ .prep = io_prep_rw,
+@@ -80,6 +81,7 @@ const struct io_op_def io_op_defs[] = {
+ .audit_skip = 1,
+ .ioprio = 1,
+ .iopoll = 1,
++ .iopoll_queue = 1,
+ .async_size = sizeof(struct io_async_rw),
+ .name = "WRITEV",
+ .prep = io_prep_rw,
+@@ -103,6 +105,7 @@ const struct io_op_def io_op_defs[] = {
+ .audit_skip = 1,
+ .ioprio = 1,
+ .iopoll = 1,
++ .iopoll_queue = 1,
+ .async_size = sizeof(struct io_async_rw),
+ .name = "READ_FIXED",
+ .prep = io_prep_rw,
+@@ -118,6 +121,7 @@ const struct io_op_def io_op_defs[] = {
+ .audit_skip = 1,
+ .ioprio = 1,
+ .iopoll = 1,
++ .iopoll_queue = 1,
+ .async_size = sizeof(struct io_async_rw),
+ .name = "WRITE_FIXED",
+ .prep = io_prep_rw,
+@@ -277,6 +281,7 @@ const struct io_op_def io_op_defs[] = {
+ .audit_skip = 1,
+ .ioprio = 1,
+ .iopoll = 1,
++ .iopoll_queue = 1,
+ .async_size = sizeof(struct io_async_rw),
+ .name = "READ",
+ .prep = io_prep_rw,
+@@ -292,6 +297,7 @@ const struct io_op_def io_op_defs[] = {
+ .audit_skip = 1,
+ .ioprio = 1,
+ .iopoll = 1,
++ .iopoll_queue = 1,
+ .async_size = sizeof(struct io_async_rw),
+ .name = "WRITE",
+ .prep = io_prep_rw,
+@@ -481,6 +487,7 @@ const struct io_op_def io_op_defs[] = {
+ .plug = 1,
+ .name = "URING_CMD",
+ .iopoll = 1,
++ .iopoll_queue = 1,
+ .async_size = uring_cmd_pdu_size(1),
+ .prep = io_uring_cmd_prep,
+ .issue = io_uring_cmd,
+diff --git a/io_uring/opdef.h b/io_uring/opdef.h
+index 3efe06d25473a..df7e13d9bfba7 100644
+--- a/io_uring/opdef.h
++++ b/io_uring/opdef.h
+@@ -25,6 +25,8 @@ struct io_op_def {
+ unsigned ioprio : 1;
+ /* supports iopoll */
+ unsigned iopoll : 1;
++ /* have to be put into the iopoll list */
++ unsigned iopoll_queue : 1;
+ /* opcode specific path will handle ->async_data allocation if needed */
+ unsigned manual_alloc : 1;
+ /* size of async data needed, if any */
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index d9bf1767867e6..fded1445a803b 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -429,6 +429,14 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ return 0;
+
+ if (io_poll_get_ownership(req)) {
++ /*
++ * If we trigger a multishot poll off our own wakeup path,
++ * disable multishot as there is a circular dependency between
++ * CQ posting and triggering the event.
++ */
++ if (mask & EPOLL_URING_WAKE)
++ poll->events |= EPOLLONESHOT;
++
+ /* optional, saves extra locking for removal in tw handler */
+ if (mask && poll->events & EPOLLONESHOT) {
+ list_del_init(&poll->wait.entry);
+diff --git a/io_uring/timeout.c b/io_uring/timeout.c
+index e8a8c20994805..06200fe73a044 100644
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -72,10 +72,12 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
+ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
+ __must_hold(&ctx->completion_lock)
+ {
+- u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
++ u32 seq;
+ struct io_timeout *timeout, *tmp;
+
+ spin_lock_irq(&ctx->timeout_lock);
++ seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
++
+ list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
+ struct io_kiocb *req = cmd_to_io_kiocb(timeout);
+ u32 events_needed, events_got;
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index 467a194b8a2ec..d09aa1c1e3e65 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -1726,7 +1726,8 @@ static int __init init_mqueue_fs(void)
+
+ if (!setup_mq_sysctls(&init_ipc_ns)) {
+ pr_warn("sysctl registration failed\n");
+- return -ENOMEM;
++ error = -ENOMEM;
++ goto out_kmem;
+ }
+
+ error = register_filesystem(&mqueue_fs_type);
+@@ -1744,8 +1745,9 @@ static int __init init_mqueue_fs(void)
+ out_filesystem:
+ unregister_filesystem(&mqueue_fs_type);
+ out_sysctl:
+- kmem_cache_destroy(mqueue_inode_cachep);
+ retire_mq_sysctls(&init_ipc_ns);
++out_kmem:
++ kmem_cache_destroy(mqueue_inode_cachep);
+ return error;
+ }
+
+diff --git a/kernel/Makefile b/kernel/Makefile
+index d754e0be1176d..ebc692242b68b 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -41,9 +41,6 @@ UBSAN_SANITIZE_kcov.o := n
+ KMSAN_SANITIZE_kcov.o := n
+ CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector
+
+-# Don't instrument error handlers
+-CFLAGS_REMOVE_cfi.o := $(CC_FLAGS_CFI)
+-
+ obj-y += sched/
+ obj-y += locking/
+ obj-y += power/
+diff --git a/kernel/acct.c b/kernel/acct.c
+index 62200d799b9b0..034a26daabb2e 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -350,6 +350,8 @@ static comp_t encode_comp_t(unsigned long value)
+ exp++;
+ }
+
++ if (exp > (((comp_t) ~0U) >> MANTSIZE))
++ return (comp_t) ~0U;
+ /*
+ * Clean it up and polish it off.
+ */
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 35c07afac924e..efdbba2a0230e 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -4481,6 +4481,11 @@ static int btf_func_proto_check(struct btf_verifier_env *env,
+ break;
+ }
+
++ if (btf_type_is_resolve_source_only(arg_type)) {
++ btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
++ return -EINVAL;
++ }
++
+ if (args[i].name_off &&
+ (!btf_name_offset_valid(btf, args[i].name_off) ||
+ !btf_name_valid_identifier(btf, args[i].name_off))) {
+diff --git a/kernel/bpf/cgroup_iter.c b/kernel/bpf/cgroup_iter.c
+index 9fcf09f2ef00f..c187a9e62bdbb 100644
+--- a/kernel/bpf/cgroup_iter.c
++++ b/kernel/bpf/cgroup_iter.c
+@@ -164,16 +164,30 @@ static int cgroup_iter_seq_init(void *priv, struct bpf_iter_aux_info *aux)
+ struct cgroup_iter_priv *p = (struct cgroup_iter_priv *)priv;
+ struct cgroup *cgrp = aux->cgroup.start;
+
++ /* bpf_iter_attach_cgroup() has already acquired an extra reference
++ * for the start cgroup, but the reference may be released after
++ * cgroup_iter_seq_init(), so acquire another reference for the
++ * start cgroup.
++ */
+ p->start_css = &cgrp->self;
++ css_get(p->start_css);
+ p->terminate = false;
+ p->visited_all = false;
+ p->order = aux->cgroup.order;
+ return 0;
+ }
+
++static void cgroup_iter_seq_fini(void *priv)
++{
++ struct cgroup_iter_priv *p = (struct cgroup_iter_priv *)priv;
++
++ css_put(p->start_css);
++}
++
+ static const struct bpf_iter_seq_info cgroup_iter_seq_info = {
+ .seq_ops = &cgroup_iter_seq_ops,
+ .init_seq_private = cgroup_iter_seq_init,
++ .fini_seq_private = cgroup_iter_seq_fini,
+ .seq_priv_size = sizeof(struct cgroup_iter_priv),
+ };
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 7b373a5e861f4..439ed7e5a82b8 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -3504,9 +3504,9 @@ static int bpf_prog_attach(const union bpf_attr *attr)
+ case BPF_PROG_TYPE_LSM:
+ if (ptype == BPF_PROG_TYPE_LSM &&
+ prog->expected_attach_type != BPF_LSM_CGROUP)
+- return -EINVAL;
+-
+- ret = cgroup_bpf_prog_attach(attr, ptype, prog);
++ ret = -EINVAL;
++ else
++ ret = cgroup_bpf_prog_attach(attr, ptype, prog);
+ break;
+ default:
+ ret = -EINVAL;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 264b3dc714cc4..242fe307032f1 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1008,9 +1008,9 @@ static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+- if (ksize(dst) < bytes) {
++ if (ksize(dst) < ksize(src)) {
+ kfree(dst);
+- dst = kmalloc_track_caller(bytes, flags);
++ dst = kmalloc_track_caller(kmalloc_size_roundup(bytes), flags);
+ if (!dst)
+ return NULL;
+ }
+@@ -1027,12 +1027,14 @@ out:
+ */
+ static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
+ {
++ size_t alloc_size;
+ void *new_arr;
+
+ if (!new_n || old_n == new_n)
+ goto out;
+
+- new_arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
++ alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
++ new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
+ if (!new_arr) {
+ kfree(arr);
+ return NULL;
+@@ -2504,9 +2506,11 @@ static int push_jmp_history(struct bpf_verifier_env *env,
+ {
+ u32 cnt = cur->jmp_history_cnt;
+ struct bpf_idx_pair *p;
++ size_t alloc_size;
+
+ cnt++;
+- p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
++ alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
++ p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
+ if (!p)
+ return -ENOMEM;
+ p[cnt - 1].idx = env->insn_idx;
+@@ -2768,7 +2772,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
+ }
+ }
+
+-static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
++static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
+ int spi)
+ {
+ struct bpf_verifier_state *st = env->cur_state;
+@@ -2785,7 +2789,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
+ if (!env->bpf_capable)
+ return 0;
+
+- func = st->frame[st->curframe];
++ func = st->frame[frame];
+ if (regno >= 0) {
+ reg = &func->regs[regno];
+ if (reg->type != SCALAR_VALUE) {
+@@ -2866,7 +2870,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
+ break;
+
+ new_marks = false;
+- func = st->frame[st->curframe];
++ func = st->frame[frame];
+ bitmap_from_u64(mask, reg_mask);
+ for_each_set_bit(i, mask, 32) {
+ reg = &func->regs[i];
+@@ -2932,12 +2936,17 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
+
+ int mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ {
+- return __mark_chain_precision(env, regno, -1);
++ return __mark_chain_precision(env, env->cur_state->curframe, regno, -1);
+ }
+
+-static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
++static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno)
+ {
+- return __mark_chain_precision(env, -1, spi);
++ return __mark_chain_precision(env, frame, regno, -1);
++}
++
++static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi)
++{
++ return __mark_chain_precision(env, frame, -1, spi);
+ }
+
+ static bool is_spillable_regtype(enum bpf_reg_type type)
+@@ -3186,14 +3195,17 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
+ stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+ mark_stack_slot_scratched(env, spi);
+
+- if (!env->allow_ptr_leaks
+- && *stype != NOT_INIT
+- && *stype != SCALAR_VALUE) {
+- /* Reject the write if there's are spilled pointers in
+- * range. If we didn't reject here, the ptr status
+- * would be erased below (even though not all slots are
+- * actually overwritten), possibly opening the door to
+- * leaks.
++ if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) {
++ /* Reject the write if range we may write to has not
++ * been initialized beforehand. If we didn't reject
++ * here, the ptr status would be erased below (even
++ * though not all slots are actually overwritten),
++ * possibly opening the door to leaks.
++ *
++ * We do however catch STACK_INVALID case below, and
++ * only allow reading possibly uninitialized memory
++ * later for CAP_PERFMON, as the write may not happen to
++ * that slot.
+ */
+ verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
+ insn_idx, i);
+@@ -5159,10 +5171,6 @@ static int check_stack_range_initialized(
+ goto mark;
+ }
+
+- if (is_spilled_reg(&state->stack[spi]) &&
+- base_type(state->stack[spi].spilled_ptr.type) == PTR_TO_BTF_ID)
+- goto mark;
+-
+ if (is_spilled_reg(&state->stack[spi]) &&
+ (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
+ env->allow_ptr_leaks)) {
+@@ -5193,6 +5201,11 @@ mark:
+ mark_reg_read(env, &state->stack[spi].spilled_ptr,
+ state->stack[spi].spilled_ptr.parent,
+ REG_LIVE_READ64);
++ /* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
++ * be sure that whether stack slot is written to or not. Hence,
++ * we must still conservatively propagate reads upwards even if
++ * helper may write to the entire memory range.
++ */
+ }
+ return update_stack_depth(env, state, min_off);
+ }
+@@ -9211,6 +9224,11 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
+ return err;
+ return adjust_ptr_min_max_vals(env, insn,
+ dst_reg, src_reg);
++ } else if (dst_reg->precise) {
++ /* if dst_reg is precise, src_reg should be precise as well */
++ err = mark_chain_precision(env, insn->src_reg);
++ if (err)
++ return err;
+ }
+ } else {
+ /* Pretend the src is a reg with a known value, since we only
+@@ -11847,34 +11865,36 @@ static int propagate_precision(struct bpf_verifier_env *env,
+ {
+ struct bpf_reg_state *state_reg;
+ struct bpf_func_state *state;
+- int i, err = 0;
++ int i, err = 0, fr;
+
+- state = old->frame[old->curframe];
+- state_reg = state->regs;
+- for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
+- if (state_reg->type != SCALAR_VALUE ||
+- !state_reg->precise)
+- continue;
+- if (env->log.level & BPF_LOG_LEVEL2)
+- verbose(env, "propagating r%d\n", i);
+- err = mark_chain_precision(env, i);
+- if (err < 0)
+- return err;
+- }
++ for (fr = old->curframe; fr >= 0; fr--) {
++ state = old->frame[fr];
++ state_reg = state->regs;
++ for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
++ if (state_reg->type != SCALAR_VALUE ||
++ !state_reg->precise)
++ continue;
++ if (env->log.level & BPF_LOG_LEVEL2)
++ verbose(env, "frame %d: propagating r%d\n", i, fr);
++ err = mark_chain_precision_frame(env, fr, i);
++ if (err < 0)
++ return err;
++ }
+
+- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+- if (!is_spilled_reg(&state->stack[i]))
+- continue;
+- state_reg = &state->stack[i].spilled_ptr;
+- if (state_reg->type != SCALAR_VALUE ||
+- !state_reg->precise)
+- continue;
+- if (env->log.level & BPF_LOG_LEVEL2)
+- verbose(env, "propagating fp%d\n",
+- (-i - 1) * BPF_REG_SIZE);
+- err = mark_chain_precision_stack(env, i);
+- if (err < 0)
+- return err;
++ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
++ if (!is_spilled_reg(&state->stack[i]))
++ continue;
++ state_reg = &state->stack[i].spilled_ptr;
++ if (state_reg->type != SCALAR_VALUE ||
++ !state_reg->precise)
++ continue;
++ if (env->log.level & BPF_LOG_LEVEL2)
++ verbose(env, "frame %d: propagating fp%d\n",
++ (-i - 1) * BPF_REG_SIZE, fr);
++ err = mark_chain_precision_stack_frame(env, fr, i);
++ if (err < 0)
++ return err;
++ }
+ }
+ return 0;
+ }
+@@ -13386,6 +13406,10 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
+ if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
+ continue;
+
++ /* Zero-extension is done by the caller. */
++ if (bpf_pseudo_kfunc_call(&insn))
++ continue;
++
+ if (WARN_ON(load_reg == -1)) {
+ verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
+ return -EFAULT;
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index bbad5e375d3ba..98a7a7b1471b7 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -663,21 +663,51 @@ static bool cpuhp_next_state(bool bringup,
+ return true;
+ }
+
+-static int cpuhp_invoke_callback_range(bool bringup,
+- unsigned int cpu,
+- struct cpuhp_cpu_state *st,
+- enum cpuhp_state target)
++static int __cpuhp_invoke_callback_range(bool bringup,
++ unsigned int cpu,
++ struct cpuhp_cpu_state *st,
++ enum cpuhp_state target,
++ bool nofail)
+ {
+ enum cpuhp_state state;
+- int err = 0;
++ int ret = 0;
+
+ while (cpuhp_next_state(bringup, &state, st, target)) {
++ int err;
++
+ err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
+- if (err)
++ if (!err)
++ continue;
++
++ if (nofail) {
++ pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
++ cpu, bringup ? "UP" : "DOWN",
++ cpuhp_get_step(st->state)->name,
++ st->state, err);
++ ret = -1;
++ } else {
++ ret = err;
+ break;
++ }
+ }
+
+- return err;
++ return ret;
++}
++
++static inline int cpuhp_invoke_callback_range(bool bringup,
++ unsigned int cpu,
++ struct cpuhp_cpu_state *st,
++ enum cpuhp_state target)
++{
++ return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
++}
++
++static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
++ unsigned int cpu,
++ struct cpuhp_cpu_state *st,
++ enum cpuhp_state target)
++{
++ __cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
+ }
+
+ static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
+@@ -999,7 +1029,6 @@ static int take_cpu_down(void *_param)
+ struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
+ enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
+ int err, cpu = smp_processor_id();
+- int ret;
+
+ /* Ensure this CPU doesn't handle any more interrupts. */
+ err = __cpu_disable();
+@@ -1012,13 +1041,10 @@ static int take_cpu_down(void *_param)
+ */
+ WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
+
+- /* Invoke the former CPU_DYING callbacks */
+- ret = cpuhp_invoke_callback_range(false, cpu, st, target);
+-
+ /*
+- * DYING must not fail!
++ * Invoke the former CPU_DYING callbacks. DYING must not fail!
+ */
+- WARN_ON_ONCE(ret);
++ cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
+
+ /* Give up timekeeping duties */
+ tick_handover_do_timer();
+@@ -1296,16 +1322,14 @@ void notify_cpu_starting(unsigned int cpu)
+ {
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
+- int ret;
+
+ rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
+ cpumask_set_cpu(cpu, &cpus_booted_once_mask);
+- ret = cpuhp_invoke_callback_range(true, cpu, st, target);
+
+ /*
+ * STARTING must not fail!
+ */
+- WARN_ON_ONCE(ret);
++ cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
+ }
+
+ /*
+@@ -2326,8 +2350,10 @@ static ssize_t target_store(struct device *dev, struct device_attribute *attr,
+
+ if (st->state < target)
+ ret = cpu_up(dev->id, target);
+- else
++ else if (st->state > target)
+ ret = cpu_down(dev->id, target);
++ else if (WARN_ON(st->target != target))
++ st->target = target;
+ out:
+ unlock_device_hotplug();
+ return ret ? ret : count;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7f04f995c9754..732b392fc5c63 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -11193,13 +11193,15 @@ static int pmu_dev_alloc(struct pmu *pmu)
+
+ pmu->dev->groups = pmu->attr_groups;
+ device_initialize(pmu->dev);
+- ret = dev_set_name(pmu->dev, "%s", pmu->name);
+- if (ret)
+- goto free_dev;
+
+ dev_set_drvdata(pmu->dev, pmu);
+ pmu->dev->bus = &pmu_bus;
+ pmu->dev->release = pmu_dev_release;
++
++ ret = dev_set_name(pmu->dev, "%s", pmu->name);
++ if (ret)
++ goto free_dev;
++
+ ret = device_add(pmu->dev);
+ if (ret)
+ goto free_dev;
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 08969f5aa38d5..844dfdc8c639c 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -535,6 +535,9 @@ void put_task_stack(struct task_struct *tsk)
+
+ void free_task(struct task_struct *tsk)
+ {
++#ifdef CONFIG_SECCOMP
++ WARN_ON_ONCE(tsk->seccomp.filter);
++#endif
+ release_user_cpus_ptr(tsk);
+ scs_release(tsk);
+
+@@ -2406,12 +2409,6 @@ static __latent_entropy struct task_struct *copy_process(
+
+ spin_lock(&current->sighand->siglock);
+
+- /*
+- * Copy seccomp details explicitly here, in case they were changed
+- * before holding sighand lock.
+- */
+- copy_seccomp(p);
+-
+ rv_task_fork(p);
+
+ rseq_fork(p, clone_flags);
+@@ -2428,6 +2425,14 @@ static __latent_entropy struct task_struct *copy_process(
+ goto bad_fork_cancel_cgroup;
+ }
+
++ /* No more failure paths after this point. */
++
++ /*
++ * Copy seccomp details explicitly here, in case they were changed
++ * before holding sighand lock.
++ */
++ copy_seccomp(p);
++
+ init_task_pid_links(p);
+ if (likely(p->pid)) {
+ ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index b22ef1efe7511..514e4582b8634 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -638,6 +638,7 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
+ bool pi, bool pending_op)
+ {
+ u32 uval, nval, mval;
++ pid_t owner;
+ int err;
+
+ /* Futex address must be 32bit aligned */
+@@ -659,6 +660,10 @@ retry:
+ * 2. A woken up waiter is killed before it can acquire the
+ * futex in user space.
+ *
++ * In the second case, the wake up notification could be generated
++ * by the unlock path in user space after setting the futex value
++ * to zero or by the kernel after setting the OWNER_DIED bit below.
++ *
+ * In both cases the TID validation below prevents a wakeup of
+ * potential waiters which can cause these waiters to block
+ * forever.
+@@ -667,24 +672,27 @@ retry:
+ *
+ * 1) task->robust_list->list_op_pending != NULL
+ * @pending_op == true
+- * 2) User space futex value == 0
++ * 2) The owner part of user space futex value == 0
+ * 3) Regular futex: @pi == false
+ *
+ * If these conditions are met, it is safe to attempt waking up a
+ * potential waiter without touching the user space futex value and
+- * trying to set the OWNER_DIED bit. The user space futex value is
+- * uncontended and the rest of the user space mutex state is
+- * consistent, so a woken waiter will just take over the
+- * uncontended futex. Setting the OWNER_DIED bit would create
+- * inconsistent state and malfunction of the user space owner died
+- * handling.
++ * trying to set the OWNER_DIED bit. If the futex value is zero,
++ * the rest of the user space mutex state is consistent, so a woken
++ * waiter will just take over the uncontended futex. Setting the
++ * OWNER_DIED bit would create inconsistent state and malfunction
++ * of the user space owner died handling. Otherwise, the OWNER_DIED
++ * bit is already set, and the woken waiter is expected to deal with
++ * this.
+ */
+- if (pending_op && !pi && !uval) {
++ owner = uval & FUTEX_TID_MASK;
++
++ if (pending_op && !pi && !owner) {
+ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
+ return 0;
+ }
+
+- if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
++ if (owner != task_pid_vnr(curr))
+ return 0;
+
+ /*
+diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c
+index 086a22d1adb78..a8074079b09e8 100644
+--- a/kernel/futex/syscalls.c
++++ b/kernel/futex/syscalls.c
+@@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
+ }
+
+ futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
+- if (!futexv)
+- return -ENOMEM;
++ if (!futexv) {
++ ret = -ENOMEM;
++ goto destroy_timer;
++ }
+
+ ret = futex_parse_waitv(futexv, waiters, nr_futexes);
+ if (!ret)
+ ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
+
++ kfree(futexv);
++
++destroy_timer:
+ if (timeout) {
+ hrtimer_cancel(&to.timer);
+ destroy_hrtimer_on_stack(&to.timer);
+ }
+-
+- kfree(futexv);
+ return ret;
+ }
+
+diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
+index 7971e989e425b..74a4ef1da9ad7 100644
+--- a/kernel/gcov/gcc_4_7.c
++++ b/kernel/gcov/gcc_4_7.c
+@@ -82,6 +82,7 @@ struct gcov_fn_info {
+ * @version: gcov version magic indicating the gcc version used for compilation
+ * @next: list head for a singly-linked list
+ * @stamp: uniquifying time stamp
++ * @checksum: unique object checksum
+ * @filename: name of the associated gcov data file
+ * @merge: merge functions (null for unused counter type)
+ * @n_functions: number of instrumented functions
+@@ -94,6 +95,10 @@ struct gcov_info {
+ unsigned int version;
+ struct gcov_info *next;
+ unsigned int stamp;
++ /* Since GCC 12.1 a checksum field is added. */
++#if (__GNUC__ >= 12)
++ unsigned int checksum;
++#endif
+ const char *filename;
+ void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int);
+ unsigned int n_functions;
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index f09c60393e559..5fdc0b5575797 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -52,6 +52,7 @@ enum {
+ * IRQS_PENDING - irq is pending and replayed later
+ * IRQS_SUSPENDED - irq is suspended
+ * IRQS_NMI - irq line is used to deliver NMIs
++ * IRQS_SYSFS - descriptor has been added to sysfs
+ */
+ enum {
+ IRQS_AUTODETECT = 0x00000001,
+@@ -64,6 +65,7 @@ enum {
+ IRQS_SUSPENDED = 0x00000800,
+ IRQS_TIMINGS = 0x00001000,
+ IRQS_NMI = 0x00002000,
++ IRQS_SYSFS = 0x00004000,
+ };
+
+ #include "debug.h"
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index a91f9001103ce..fd09962744014 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -288,22 +288,25 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
+ if (irq_kobj_base) {
+ /*
+ * Continue even in case of failure as this is nothing
+- * crucial.
++ * crucial and failures in the late irq_sysfs_init()
++ * cannot be rolled back.
+ */
+ if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
+ pr_warn("Failed to add kobject for irq %d\n", irq);
++ else
++ desc->istate |= IRQS_SYSFS;
+ }
+ }
+
+ static void irq_sysfs_del(struct irq_desc *desc)
+ {
+ /*
+- * If irq_sysfs_init() has not yet been invoked (early boot), then
+- * irq_kobj_base is NULL and the descriptor was never added.
+- * kobject_del() complains about a object with no parent, so make
+- * it conditional.
++ * Only invoke kobject_del() when kobject_add() was successfully
++ * invoked for the descriptor. This covers both early boot, where
++ * sysfs is not initialized yet, and the case of a failed
++ * kobject_add() invocation.
+ */
+- if (irq_kobj_base)
++ if (desc->istate & IRQS_SYSFS)
+ kobject_del(&desc->kobj);
+ }
+
+diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
+index fe12dfe254ecf..54d077e1a2dc7 100644
+--- a/kernel/kcsan/core.c
++++ b/kernel/kcsan/core.c
+@@ -14,10 +14,12 @@
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
++#include <linux/minmax.h>
+ #include <linux/moduleparam.h>
+ #include <linux/percpu.h>
+ #include <linux/preempt.h>
+ #include <linux/sched.h>
++#include <linux/string.h>
+ #include <linux/uaccess.h>
+
+ #include "encoding.h"
+@@ -1308,3 +1310,51 @@ noinline void __tsan_atomic_signal_fence(int memorder)
+ }
+ }
+ EXPORT_SYMBOL(__tsan_atomic_signal_fence);
++
++#ifdef __HAVE_ARCH_MEMSET
++void *__tsan_memset(void *s, int c, size_t count);
++noinline void *__tsan_memset(void *s, int c, size_t count)
++{
++ /*
++ * Instead of not setting up watchpoints where accessed size is greater
++ * than MAX_ENCODABLE_SIZE, truncate checked size to MAX_ENCODABLE_SIZE.
++ */
++ size_t check_len = min_t(size_t, count, MAX_ENCODABLE_SIZE);
++
++ check_access(s, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
++ return memset(s, c, count);
++}
++#else
++void *__tsan_memset(void *s, int c, size_t count) __alias(memset);
++#endif
++EXPORT_SYMBOL(__tsan_memset);
++
++#ifdef __HAVE_ARCH_MEMMOVE
++void *__tsan_memmove(void *dst, const void *src, size_t len);
++noinline void *__tsan_memmove(void *dst, const void *src, size_t len)
++{
++ size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
++
++ check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
++ check_access(src, check_len, 0, _RET_IP_);
++ return memmove(dst, src, len);
++}
++#else
++void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove);
++#endif
++EXPORT_SYMBOL(__tsan_memmove);
++
++#ifdef __HAVE_ARCH_MEMCPY
++void *__tsan_memcpy(void *dst, const void *src, size_t len);
++noinline void *__tsan_memcpy(void *dst, const void *src, size_t len)
++{
++ size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
++
++ check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
++ check_access(src, check_len, 0, _RET_IP_);
++ return memcpy(dst, src, len);
++}
++#else
++void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy);
++#endif
++EXPORT_SYMBOL(__tsan_memcpy);
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 3050631e528d9..1c18ecf9f98b1 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2213,13 +2213,9 @@ int register_kretprobe(struct kretprobe *rp)
+ rp->kp.post_handler = NULL;
+
+ /* Pre-allocate memory for max kretprobe instances */
+- if (rp->maxactive <= 0) {
+-#ifdef CONFIG_PREEMPTION
++ if (rp->maxactive <= 0)
+ rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
+-#else
+- rp->maxactive = num_possible_cpus();
+-#endif
+- }
++
+ #ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler);
+ if (!rp->rh)
+@@ -2364,6 +2360,14 @@ static void kill_kprobe(struct kprobe *p)
+
+ lockdep_assert_held(&kprobe_mutex);
+
++ /*
++ * The module is going away. We should disarm the kprobe which
++ * is using ftrace, because ftrace framework is still available at
++ * 'MODULE_STATE_GOING' notification.
++ */
++ if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
++ disarm_kprobe_ftrace(p);
++
+ p->flags |= KPROBE_FLAG_GONE;
+ if (kprobe_aggrprobe(p)) {
+ /*
+@@ -2380,14 +2384,6 @@ static void kill_kprobe(struct kprobe *p)
+ * the original probed function (which will be freed soon) any more.
+ */
+ arch_remove_kprobe(p);
+-
+- /*
+- * The module is going away. We should disarm the kprobe which
+- * is using ftrace, because ftrace framework is still available at
+- * 'MODULE_STATE_GOING' notification.
+- */
+- if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
+- disarm_kprobe_ftrace(p);
+ }
+
+ /* Disable one kprobe */
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 7779ee8abc2a0..010cf4e6d0b8f 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
+ * set this bit before looking at the lock.
+ */
+
+-static __always_inline void
+-rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
++static __always_inline struct task_struct *
++rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
+ {
+ unsigned long val = (unsigned long)owner;
+
+ if (rt_mutex_has_waiters(lock))
+ val |= RT_MUTEX_HAS_WAITERS;
+
+- WRITE_ONCE(lock->owner, (struct task_struct *)val);
++ return (struct task_struct *)val;
++}
++
++static __always_inline void
++rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
++{
++ /*
++ * lock->wait_lock is held but explicit acquire semantics are needed
++ * for a new lock owner so WRITE_ONCE is insufficient.
++ */
++ xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
++}
++
++static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
++{
++ /* lock->wait_lock is held so the unlock provides release semantics. */
++ WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
+ }
+
+ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
+@@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
+ ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
+ }
+
+-static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
++static __always_inline void
++fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
+ {
+ unsigned long owner, *p = (unsigned long *) &lock->owner;
+
+@@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
+ * still set.
+ */
+ owner = READ_ONCE(*p);
+- if (owner & RT_MUTEX_HAS_WAITERS)
+- WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
++ if (owner & RT_MUTEX_HAS_WAITERS) {
++ /*
++ * See rt_mutex_set_owner() and rt_mutex_clear_owner() on
++ * why xchg_acquire() is used for updating owner for
++ * locking and WRITE_ONCE() for unlocking.
++ *
++ * WRITE_ONCE() would work for the acquire case too, but
++ * in case that the lock acquisition failed it might
++ * force other lockers into the slow path unnecessarily.
++ */
++ if (acquire_lock)
++ xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
++ else
++ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
++ }
+ }
+
+ /*
+@@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
+ owner = *p;
+ } while (cmpxchg_relaxed(p, owner,
+ owner | RT_MUTEX_HAS_WAITERS) != owner);
++
++ /*
++ * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
++ * operations in the event of contention. Ensure the successful
++ * cmpxchg is visible.
++ */
++ smp_mb__after_atomic();
+ }
+
+ /*
+@@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
+ * try_to_take_rt_mutex() sets the lock waiters bit
+ * unconditionally. Clean this up.
+ */
+- fixup_rt_mutex_waiters(lock);
++ fixup_rt_mutex_waiters(lock, true);
+
+ return ret;
+ }
+@@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
+ * try_to_take_rt_mutex() sets the waiter bit
+ * unconditionally. We might have to fix that up.
+ */
+- fixup_rt_mutex_waiters(lock);
++ fixup_rt_mutex_waiters(lock, true);
+
+ trace_contention_end(lock, ret);
+
+@@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally.
+ * We might have to fix that up:
+ */
+- fixup_rt_mutex_waiters(lock);
++ fixup_rt_mutex_waiters(lock, true);
+ debug_rt_mutex_free_waiter(&waiter);
+
+ trace_contention_end(lock, 0);
+diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
+index 900220941caac..cb9fdff76a8a3 100644
+--- a/kernel/locking/rtmutex_api.c
++++ b/kernel/locking/rtmutex_api.c
+@@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
+ void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
+ {
+ debug_rt_mutex_proxy_unlock(lock);
+- rt_mutex_set_owner(lock, NULL);
++ rt_mutex_clear_owner(lock);
+ }
+
+ /**
+@@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ * have to fix that up.
+ */
+- fixup_rt_mutex_waiters(lock);
++ fixup_rt_mutex_waiters(lock, true);
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ return ret;
+@@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ * have to fix that up.
+ */
+- fixup_rt_mutex_waiters(lock);
++ fixup_rt_mutex_waiters(lock, false);
+
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
+index c033572d83f0e..720e719253cd1 100644
+--- a/kernel/module/decompress.c
++++ b/kernel/module/decompress.c
+@@ -114,8 +114,8 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ do {
+ struct page *page = module_get_next_page(info);
+
+- if (!page) {
+- retval = -ENOMEM;
++ if (IS_ERR(page)) {
++ retval = PTR_ERR(page);
+ goto out_inflate_end;
+ }
+
+@@ -173,8 +173,8 @@ static ssize_t module_xz_decompress(struct load_info *info,
+ do {
+ struct page *page = module_get_next_page(info);
+
+- if (!page) {
+- retval = -ENOMEM;
++ if (IS_ERR(page)) {
++ retval = PTR_ERR(page);
+ goto out;
+ }
+
+diff --git a/kernel/padata.c b/kernel/padata.c
+index e5819bb8bd1dc..de90af5fcbe6b 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -207,14 +207,16 @@ int padata_do_parallel(struct padata_shell *ps,
+ pw = padata_work_alloc();
+ spin_unlock(&padata_works_lock);
+
++ if (!pw) {
++ /* Maximum works limit exceeded, run in the current task. */
++ padata->parallel(padata);
++ }
++
+ rcu_read_unlock_bh();
+
+ if (pw) {
+ padata_work_init(pw, padata_parallel_worker, padata, 0);
+ queue_work(pinst->parallel_wq, &pw->pw_work);
+- } else {
+- /* Maximum works limit exceeded, run in the current task. */
+- padata->parallel(padata);
+ }
+
+ return 0;
+@@ -388,13 +390,16 @@ void padata_do_serial(struct padata_priv *padata)
+ int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
+ struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
+ struct padata_priv *cur;
++ struct list_head *pos;
+
+ spin_lock(&reorder->lock);
+ /* Sort in ascending order of sequence number. */
+- list_for_each_entry_reverse(cur, &reorder->list, list)
++ list_for_each_prev(pos, &reorder->list) {
++ cur = list_entry(pos, struct padata_priv, list);
+ if (cur->seq_nr < padata->seq_nr)
+ break;
+- list_add(&padata->list, &cur->list);
++ }
++ list_add(&padata->list, pos);
+ spin_unlock(&reorder->lock);
+
+ /*
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 2a406753af904..c20ca5fb9adc8 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1723,8 +1723,8 @@ static unsigned long minimum_image_size(unsigned long saveable)
+ * /sys/power/reserved_size, respectively). To make this happen, we compute the
+ * total number of available page frames and allocate at least
+ *
+- * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
+- * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
++ * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
++ * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
+ *
+ * of them, which corresponds to the maximum size of a hibernation image.
+ *
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 93416afebd59c..14d9384fba056 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -2418,7 +2418,7 @@ void rcu_force_quiescent_state(void)
+ struct rcu_node *rnp_old = NULL;
+
+ /* Funnel through hierarchy to reduce memory contention. */
+- rnp = __this_cpu_read(rcu_data.mynode);
++ rnp = raw_cpu_read(rcu_data.mynode);
+ for (; rnp != NULL; rnp = rnp->parent) {
+ ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
+ !raw_spin_trylock(&rnp->fqslock);
+diff --git a/kernel/relay.c b/kernel/relay.c
+index d7edc934c56d5..88bcb09f0a1f2 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -148,13 +148,13 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
+ {
+ struct rchan_buf *buf;
+
+- if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
++ if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t))
+ return NULL;
+
+ buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+- buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *),
++ buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t),
+ GFP_KERNEL);
+ if (!buf->padding)
+ goto free_buf;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index daff72f003858..535af9fbea7b8 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1392,7 +1392,7 @@ static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
+ if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
+ return;
+
+- WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
++ uclamp_rq_set(rq, clamp_id, clamp_value);
+ }
+
+ static inline
+@@ -1543,8 +1543,8 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
+ if (bucket->tasks == 1 || uc_se->value > bucket->value)
+ bucket->value = uc_se->value;
+
+- if (uc_se->value > READ_ONCE(uc_rq->value))
+- WRITE_ONCE(uc_rq->value, uc_se->value);
++ if (uc_se->value > uclamp_rq_get(rq, clamp_id))
++ uclamp_rq_set(rq, clamp_id, uc_se->value);
+ }
+
+ /*
+@@ -1610,7 +1610,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
+ if (likely(bucket->tasks))
+ return;
+
+- rq_clamp = READ_ONCE(uc_rq->value);
++ rq_clamp = uclamp_rq_get(rq, clamp_id);
+ /*
+ * Defensive programming: this should never happen. If it happens,
+ * e.g. due to future modification, warn and fixup the expected value.
+@@ -1618,7 +1618,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
+ SCHED_WARN_ON(bucket->value > rq_clamp);
+ if (bucket->value >= rq_clamp) {
+ bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
+- WRITE_ONCE(uc_rq->value, bkt_clamp);
++ uclamp_rq_set(rq, clamp_id, bkt_clamp);
+ }
+ }
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index e4a0b8bd941c7..0f32acb05055f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4280,14 +4280,16 @@ static inline unsigned long task_util_est(struct task_struct *p)
+ }
+
+ #ifdef CONFIG_UCLAMP_TASK
+-static inline unsigned long uclamp_task_util(struct task_struct *p)
++static inline unsigned long uclamp_task_util(struct task_struct *p,
++ unsigned long uclamp_min,
++ unsigned long uclamp_max)
+ {
+- return clamp(task_util_est(p),
+- uclamp_eff_value(p, UCLAMP_MIN),
+- uclamp_eff_value(p, UCLAMP_MAX));
++ return clamp(task_util_est(p), uclamp_min, uclamp_max);
+ }
+ #else
+-static inline unsigned long uclamp_task_util(struct task_struct *p)
++static inline unsigned long uclamp_task_util(struct task_struct *p,
++ unsigned long uclamp_min,
++ unsigned long uclamp_max)
+ {
+ return task_util_est(p);
+ }
+@@ -4426,10 +4428,135 @@ done:
+ trace_sched_util_est_se_tp(&p->se);
+ }
+
+-static inline int task_fits_capacity(struct task_struct *p,
+- unsigned long capacity)
++static inline int util_fits_cpu(unsigned long util,
++ unsigned long uclamp_min,
++ unsigned long uclamp_max,
++ int cpu)
+ {
+- return fits_capacity(uclamp_task_util(p), capacity);
++ unsigned long capacity_orig, capacity_orig_thermal;
++ unsigned long capacity = capacity_of(cpu);
++ bool fits, uclamp_max_fits;
++
++ /*
++ * Check if the real util fits without any uclamp boost/cap applied.
++ */
++ fits = fits_capacity(util, capacity);
++
++ if (!uclamp_is_used())
++ return fits;
++
++ /*
++ * We must use capacity_orig_of() for comparing against uclamp_min and
++ * uclamp_max. We only care about capacity pressure (by using
++ * capacity_of()) for comparing against the real util.
++ *
++ * If a task is boosted to 1024 for example, we don't want a tiny
++ * pressure to skew the check whether it fits a CPU or not.
++ *
++ * Similarly if a task is capped to capacity_orig_of(little_cpu), it
++ * should fit a little cpu even if there's some pressure.
++ *
++ * Only exception is for thermal pressure since it has a direct impact
++ * on available OPP of the system.
++ *
++ * We honour it for uclamp_min only as a drop in performance level
++ * could result in not getting the requested minimum performance level.
++ *
++ * For uclamp_max, we can tolerate a drop in performance level as the
++ * goal is to cap the task. So it's okay if it's getting less.
++ *
++ * In case of capacity inversion, which is not handled yet, we should
++ * honour the inverted capacity for both uclamp_min and uclamp_max all
++ * the time.
++ */
++ capacity_orig = capacity_orig_of(cpu);
++ capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
++
++ /*
++ * We want to force a task to fit a cpu as implied by uclamp_max.
++ * But we do have some corner cases to cater for..
++ *
++ *
++ * C=z
++ * | ___
++ * | C=y | |
++ * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
++ * | C=x | | | |
++ * | ___ | | | |
++ * | | | | | | | (util somewhere in this region)
++ * | | | | | | |
++ * | | | | | | |
++ * +----------------------------------------
++ * cpu0 cpu1 cpu2
++ *
++ * In the above example if a task is capped to a specific performance
++ * point, y, then when:
++ *
++ * * util = 80% of x then it does not fit on cpu0 and should migrate
++ * to cpu1
++ * * util = 80% of y then it is forced to fit on cpu1 to honour
++ * uclamp_max request.
++ *
++ * which is what we're enforcing here. A task always fits if
++ * uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig,
++ * the normal upmigration rules should withhold still.
++ *
++ * Only exception is when we are on max capacity, then we need to be
++ * careful not to block overutilized state. This is so because:
++ *
++ * 1. There's no concept of capping at max_capacity! We can't go
++ * beyond this performance level anyway.
++ * 2. The system is being saturated when we're operating near
++ * max capacity, it doesn't make sense to block overutilized.
++ */
++ uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE);
++ uclamp_max_fits = !uclamp_max_fits && (uclamp_max <= capacity_orig);
++ fits = fits || uclamp_max_fits;
++
++ /*
++ *
++ * C=z
++ * | ___ (region a, capped, util >= uclamp_max)
++ * | C=y | |
++ * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
++ * | C=x | | | |
++ * | ___ | | | | (region b, uclamp_min <= util <= uclamp_max)
++ * |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min
++ * | | | | | | |
++ * | | | | | | | (region c, boosted, util < uclamp_min)
++ * +----------------------------------------
++ * cpu0 cpu1 cpu2
++ *
++ * a) If util > uclamp_max, then we're capped, we don't care about
++ * actual fitness value here. We only care if uclamp_max fits
++ * capacity without taking margin/pressure into account.
++ * See comment above.
++ *
++ * b) If uclamp_min <= util <= uclamp_max, then the normal
++ * fits_capacity() rules apply. Except we need to ensure that we
++ * enforce we remain within uclamp_max, see comment above.
++ *
++ * c) If util < uclamp_min, then we are boosted. Same as (b) but we
++ * need to take into account the boosted value fits the CPU without
++ * taking margin/pressure into account.
++ *
++ * Cases (a) and (b) are handled in the 'fits' variable already. We
++ * just need to consider an extra check for case (c) after ensuring we
++ * handle the case uclamp_min > uclamp_max.
++ */
++ uclamp_min = min(uclamp_min, uclamp_max);
++ if (util < uclamp_min && capacity_orig != SCHED_CAPACITY_SCALE)
++ fits = fits && (uclamp_min <= capacity_orig_thermal);
++
++ return fits;
++}
++
++static inline int task_fits_cpu(struct task_struct *p, int cpu)
++{
++ unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
++ unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
++ unsigned long util = task_util_est(p);
++ return util_fits_cpu(util, uclamp_min, uclamp_max, cpu);
+ }
+
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+@@ -4442,7 +4569,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ return;
+ }
+
+- if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
++ if (task_fits_cpu(p, cpu_of(rq))) {
+ rq->misfit_task_load = 0;
+ return;
+ }
+@@ -5862,7 +5989,10 @@ static inline void hrtick_update(struct rq *rq)
+ #ifdef CONFIG_SMP
+ static inline bool cpu_overutilized(int cpu)
+ {
+- return !fits_capacity(cpu_util_cfs(cpu), capacity_of(cpu));
++ unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
++ unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
++
++ return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
+ }
+
+ static inline void update_overutilized_status(struct rq *rq)
+@@ -6654,21 +6784,23 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
+ static int
+ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+- unsigned long task_util, best_cap = 0;
++ unsigned long task_util, util_min, util_max, best_cap = 0;
+ int cpu, best_cpu = -1;
+ struct cpumask *cpus;
+
+ cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
+ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
+
+- task_util = uclamp_task_util(p);
++ task_util = task_util_est(p);
++ util_min = uclamp_eff_value(p, UCLAMP_MIN);
++ util_max = uclamp_eff_value(p, UCLAMP_MAX);
+
+ for_each_cpu_wrap(cpu, cpus, target) {
+ unsigned long cpu_cap = capacity_of(cpu);
+
+ if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
+ continue;
+- if (fits_capacity(task_util, cpu_cap))
++ if (util_fits_cpu(task_util, util_min, util_max, cpu))
+ return cpu;
+
+ if (cpu_cap > best_cap) {
+@@ -6680,10 +6812,13 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
+ return best_cpu;
+ }
+
+-static inline bool asym_fits_capacity(unsigned long task_util, int cpu)
++static inline bool asym_fits_cpu(unsigned long util,
++ unsigned long util_min,
++ unsigned long util_max,
++ int cpu)
+ {
+ if (sched_asym_cpucap_active())
+- return fits_capacity(task_util, capacity_of(cpu));
++ return util_fits_cpu(util, util_min, util_max, cpu);
+
+ return true;
+ }
+@@ -6695,7 +6830,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ {
+ bool has_idle_core = false;
+ struct sched_domain *sd;
+- unsigned long task_util;
++ unsigned long task_util, util_min, util_max;
+ int i, recent_used_cpu;
+
+ /*
+@@ -6704,7 +6839,9 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ */
+ if (sched_asym_cpucap_active()) {
+ sync_entity_load_avg(&p->se);
+- task_util = uclamp_task_util(p);
++ task_util = task_util_est(p);
++ util_min = uclamp_eff_value(p, UCLAMP_MIN);
++ util_max = uclamp_eff_value(p, UCLAMP_MAX);
+ }
+
+ /*
+@@ -6713,7 +6850,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ lockdep_assert_irqs_disabled();
+
+ if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
+- asym_fits_capacity(task_util, target))
++ asym_fits_cpu(task_util, util_min, util_max, target))
+ return target;
+
+ /*
+@@ -6721,7 +6858,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ */
+ if (prev != target && cpus_share_cache(prev, target) &&
+ (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
+- asym_fits_capacity(task_util, prev))
++ asym_fits_cpu(task_util, util_min, util_max, prev))
+ return prev;
+
+ /*
+@@ -6736,7 +6873,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ in_task() &&
+ prev == smp_processor_id() &&
+ this_rq()->nr_running <= 1 &&
+- asym_fits_capacity(task_util, prev)) {
++ asym_fits_cpu(task_util, util_min, util_max, prev)) {
+ return prev;
+ }
+
+@@ -6748,7 +6885,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ cpus_share_cache(recent_used_cpu, target) &&
+ (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
+ cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
+- asym_fits_capacity(task_util, recent_used_cpu)) {
++ asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
+ return recent_used_cpu;
+ }
+
+@@ -7044,6 +7181,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ {
+ struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
+ unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
++ unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0;
++ unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
+ struct root_domain *rd = this_rq()->rd;
+ int cpu, best_energy_cpu, target = -1;
+ struct sched_domain *sd;
+@@ -7068,7 +7207,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ target = prev_cpu;
+
+ sync_entity_load_avg(&p->se);
+- if (!task_util_est(p))
++ if (!uclamp_task_util(p, p_util_min, p_util_max))
+ goto unlock;
+
+ eenv_task_busy_time(&eenv, p, prev_cpu);
+@@ -7076,6 +7215,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ for (; pd; pd = pd->next) {
+ unsigned long cpu_cap, cpu_thermal_cap, util;
+ unsigned long cur_delta, max_spare_cap = 0;
++ unsigned long rq_util_min, rq_util_max;
++ unsigned long util_min, util_max;
+ bool compute_prev_delta = false;
+ int max_spare_cap_cpu = -1;
+ unsigned long base_energy;
+@@ -7112,8 +7253,26 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ * much capacity we can get out of the CPU; this is
+ * aligned with sched_cpu_util().
+ */
+- util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
+- if (!fits_capacity(util, cpu_cap))
++ if (uclamp_is_used()) {
++ if (uclamp_rq_is_idle(cpu_rq(cpu))) {
++ util_min = p_util_min;
++ util_max = p_util_max;
++ } else {
++ /*
++ * Open code uclamp_rq_util_with() except for
++ * the clamp() part. Ie: apply max aggregation
++ * only. util_fits_cpu() logic requires to
++ * operate on non clamped util but must use the
++ * max-aggregated uclamp_{min, max}.
++ */
++ rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
++ rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
++
++ util_min = max(rq_util_min, p_util_min);
++ util_max = max(rq_util_max, p_util_max);
++ }
++ }
++ if (!util_fits_cpu(util, util_min, util_max, cpu))
+ continue;
+
+ lsub_positive(&cpu_cap, util);
+@@ -8276,7 +8435,7 @@ static int detach_tasks(struct lb_env *env)
+
+ case migrate_misfit:
+ /* This is not a misfit task */
+- if (task_fits_capacity(p, capacity_of(env->src_cpu)))
++ if (task_fits_cpu(p, env->src_cpu))
+ goto next;
+
+ env->imbalance = 0;
+@@ -9281,6 +9440,10 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
+
+ memset(sgs, 0, sizeof(*sgs));
+
++ /* Assume that task can't fit any CPU of the group */
++ if (sd->flags & SD_ASYM_CPUCAPACITY)
++ sgs->group_misfit_task_load = 1;
++
+ for_each_cpu(i, sched_group_span(group)) {
+ struct rq *rq = cpu_rq(i);
+ unsigned int local;
+@@ -9300,12 +9463,12 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
+ if (!nr_running && idle_cpu_without(i, p))
+ sgs->idle_cpus++;
+
+- }
++ /* Check if task fits in the CPU */
++ if (sd->flags & SD_ASYM_CPUCAPACITY &&
++ sgs->group_misfit_task_load &&
++ task_fits_cpu(p, i))
++ sgs->group_misfit_task_load = 0;
+
+- /* Check if task fits in the group */
+- if (sd->flags & SD_ASYM_CPUCAPACITY &&
+- !task_fits_capacity(p, group->sgc->max_capacity)) {
+- sgs->group_misfit_task_load = 1;
+ }
+
+ sgs->group_capacity = group->sgc->capacity;
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index ee2ecc081422e..7f40d87e8f509 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -539,10 +539,12 @@ static u64 update_triggers(struct psi_group *group, u64 now)
+
+ /* Calculate growth since last update */
+ growth = window_update(&t->win, now, total[t->state]);
+- if (growth < t->threshold)
+- continue;
++ if (!t->pending_event) {
++ if (growth < t->threshold)
++ continue;
+
+- t->pending_event = true;
++ t->pending_event = true;
++ }
+ }
+ /* Limit event signaling to once per window */
+ if (now < t->last_event_time + t->win.size)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index a4a20046e586e..d6d488e8eb554 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2979,6 +2979,23 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
+ #ifdef CONFIG_UCLAMP_TASK
+ unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
+
++static inline unsigned long uclamp_rq_get(struct rq *rq,
++ enum uclamp_id clamp_id)
++{
++ return READ_ONCE(rq->uclamp[clamp_id].value);
++}
++
++static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
++ unsigned int value)
++{
++ WRITE_ONCE(rq->uclamp[clamp_id].value, value);
++}
++
++static inline bool uclamp_rq_is_idle(struct rq *rq)
++{
++ return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
++}
++
+ /**
+ * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
+ * @rq: The rq to clamp against. Must not be NULL.
+@@ -3014,12 +3031,12 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
+ * Ignore last runnable task's max clamp, as this task will
+ * reset it. Similarly, no need to read the rq's min clamp.
+ */
+- if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
++ if (uclamp_rq_is_idle(rq))
+ goto out;
+ }
+
+- min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
+- max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
++ min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN));
++ max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX));
+ out:
+ /*
+ * Since CPU's {min,max}_util clamps are MAX aggregated considering
+@@ -3060,6 +3077,15 @@ static inline bool uclamp_is_used(void)
+ return static_branch_likely(&sched_uclamp_used);
+ }
+ #else /* CONFIG_UCLAMP_TASK */
++static inline unsigned long uclamp_eff_value(struct task_struct *p,
++ enum uclamp_id clamp_id)
++{
++ if (clamp_id == UCLAMP_MIN)
++ return 0;
++
++ return SCHED_CAPACITY_SCALE;
++}
++
+ static inline
+ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
+ struct task_struct *p)
+@@ -3073,6 +3099,25 @@ static inline bool uclamp_is_used(void)
+ {
+ return false;
+ }
++
++static inline unsigned long uclamp_rq_get(struct rq *rq,
++ enum uclamp_id clamp_id)
++{
++ if (clamp_id == UCLAMP_MIN)
++ return 0;
++
++ return SCHED_CAPACITY_SCALE;
++}
++
++static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
++ unsigned int value)
++{
++}
++
++static inline bool uclamp_rq_is_idle(struct rq *rq)
++{
++ return false;
++}
+ #endif /* CONFIG_UCLAMP_TASK */
+
+ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index a995ea1ef849a..a66cff5a18579 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -1548,7 +1548,8 @@ blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
+
+ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
+ {
+- if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
++ if ((iter->ent->type != TRACE_BLK) ||
++ !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
+ return TRACE_TYPE_UNHANDLED;
+
+ return print_one_line(iter, true);
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 1c82478e8dffe..b6e5724a9ea35 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -6438,7 +6438,7 @@ enable:
+ if (se)
+ se->ref++;
+ out:
+- if (ret == 0)
++ if (ret == 0 && glob[0])
+ hist_err_clear();
+
+ return ret;
+diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
+index 539b08ae70207..9cb53182bb31c 100644
+--- a/kernel/trace/trace_events_user.c
++++ b/kernel/trace/trace_events_user.c
+@@ -1359,6 +1359,7 @@ put_user_lock:
+ put_user:
+ user_event_destroy_fields(user);
+ user_event_destroy_validators(user);
++ kfree(user->call.print_fmt);
+ kfree(user);
+ return ret;
+ }
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 337d797a71416..6f8e5dd1dcd0c 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -437,6 +437,7 @@ static int object_cpu_offline(unsigned int cpu)
+ struct debug_percpu_free *percpu_pool;
+ struct hlist_node *tmp;
+ struct debug_obj *obj;
++ unsigned long flags;
+
+ /* Remote access is safe as the CPU is dead already */
+ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
+@@ -444,6 +445,12 @@ static int object_cpu_offline(unsigned int cpu)
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ }
++
++ raw_spin_lock_irqsave(&pool_lock, flags);
++ obj_pool_used -= percpu_pool->obj_free;
++ debug_objects_freed += percpu_pool->obj_free;
++ raw_spin_unlock_irqrestore(&pool_lock, flags);
++
+ percpu_pool->obj_free = 0;
+
+ return 0;
+@@ -1318,6 +1325,8 @@ static int __init debug_objects_replace_static_objects(void)
+ hlist_add_head(&obj->node, &objects);
+ }
+
++ debug_objects_allocated += i;
++
+ /*
+ * debug_objects_mem_init() is now called early that only one CPU is up
+ * and interrupts have been disabled, so it is safe to replace the
+@@ -1386,6 +1395,7 @@ void __init debug_objects_mem_init(void)
+ debug_objects_enabled = 0;
+ kmem_cache_destroy(obj_cache);
+ pr_warn("out of memory.\n");
++ return;
+ } else
+ debug_objects_selftest();
+
+diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
+index 5f4b07b56cd9c..9738664386088 100644
+--- a/lib/fonts/fonts.c
++++ b/lib/fonts/fonts.c
+@@ -135,8 +135,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
+ if (res > 20)
+ c += 20 - res;
+
+- if ((font_w & (1 << (f->width - 1))) &&
+- (font_h & (1 << (f->height - 1))))
++ if ((font_w & (1U << (f->width - 1))) &&
++ (font_h & (1U << (f->height - 1))))
+ c += 1000;
+
+ if (c > cc) {
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index df352f6ccc240..fe21bf276d91c 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -2989,7 +2989,9 @@ static int mas_spanning_rebalance(struct ma_state *mas,
+ mast->free = &free;
+ mast->destroy = &destroy;
+ l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
+- if (!(mast->orig_l->min && mast->orig_r->max == ULONG_MAX) &&
++
++ /* Check if this is not root and has sufficient data. */
++ if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
+ unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
+ mast_spanning_rebalance(mast);
+
+diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
+index 21016b32d3131..2b24ea6c94979 100644
+--- a/lib/notifier-error-inject.c
++++ b/lib/notifier-error-inject.c
+@@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val)
+ return 0;
+ }
+
+-DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
++DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set,
+ "%lld\n");
+
+ static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index c82b65947ce68..1c5a2adb16ef5 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -1491,6 +1491,7 @@ static int __init test_firmware_init(void)
+
+ rc = misc_register(&test_fw_misc_device);
+ if (rc) {
++ __test_firmware_config_free();
+ kfree(test_fw_config);
+ pr_err("could not register misc device: %d\n", rc);
+ return rc;
+diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
+index f425f169ef089..497fc93ccf9ec 100644
+--- a/lib/test_maple_tree.c
++++ b/lib/test_maple_tree.c
+@@ -2498,6 +2498,25 @@ static noinline void check_dup(struct maple_tree *mt)
+ }
+ }
+
++static noinline void check_bnode_min_spanning(struct maple_tree *mt)
++{
++ int i = 50;
++ MA_STATE(mas, mt, 0, 0);
++
++ mt_set_non_kernel(9999);
++ mas_lock(&mas);
++ do {
++ mas_set_range(&mas, i*10, i*10+9);
++ mas_store(&mas, check_bnode_min_spanning);
++ } while (i--);
++
++ mas_set_range(&mas, 240, 509);
++ mas_store(&mas, NULL);
++ mas_unlock(&mas);
++ mas_destroy(&mas);
++ mt_set_non_kernel(0);
++}
++
+ static DEFINE_MTREE(tree);
+ static int maple_tree_seed(void)
+ {
+@@ -2742,6 +2761,10 @@ static int maple_tree_seed(void)
+ check_dup(&tree);
+ mtree_destroy(&tree);
+
++ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
++ check_bnode_min_spanning(&tree);
++ mtree_destroy(&tree);
++
+ #if defined(BENCH)
+ skip:
+ #endif
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 1f6da31dd9a50..ca1603524bbe0 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1344,7 +1344,7 @@ move_freelist_tail(struct list_head *freelist, struct page *freepage)
+ }
+
+ static void
+-fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
++fast_isolate_around(struct compact_control *cc, unsigned long pfn)
+ {
+ unsigned long start_pfn, end_pfn;
+ struct page *page;
+@@ -1365,21 +1365,13 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
+ if (!page)
+ return;
+
+- /* Scan before */
+- if (start_pfn != pfn) {
+- isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
+- if (cc->nr_freepages >= cc->nr_migratepages)
+- return;
+- }
+-
+- /* Scan after */
+- start_pfn = pfn + nr_isolated;
+- if (start_pfn < end_pfn)
+- isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
++ isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
+
+ /* Skip this pageblock in the future as it's full or nearly full */
+ if (cc->nr_freepages < cc->nr_migratepages)
+ set_pageblock_skip(page);
++
++ return;
+ }
+
+ /* Search orders in round-robin fashion */
+@@ -1556,7 +1548,7 @@ fast_isolate_freepages(struct compact_control *cc)
+ return cc->free_pfn;
+
+ low_pfn = page_to_pfn(page);
+- fast_isolate_around(cc, low_pfn, nr_isolated);
++ fast_isolate_around(cc, low_pfn);
+ return low_pfn;
+ }
+
+diff --git a/mm/gup.c b/mm/gup.c
+index 3b7bc2c1fd44c..eb8d7baf9e4d3 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1065,6 +1065,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
+ if (!(vm_flags & VM_WRITE)) {
+ if (!(gup_flags & FOLL_FORCE))
+ return -EFAULT;
++ /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
++ if (is_vm_hugetlb_page(vma))
++ return -EFAULT;
+ /*
+ * We used to let the write,force case do COW in a
+ * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
+diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
+index 35f6b6e6a908c..3807502766a3e 100644
+--- a/mm/kmsan/hooks.c
++++ b/mm/kmsan/hooks.c
+@@ -260,6 +260,7 @@ void kmsan_handle_urb(const struct urb *urb, bool is_out)
+ urb->transfer_buffer_length,
+ /*checked*/ false);
+ }
++EXPORT_SYMBOL_GPL(kmsan_handle_urb);
+
+ static void kmsan_handle_dma_page(const void *addr, size_t size,
+ enum dma_data_direction dir)
+diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
+index 9a29ea2dbfb9b..1328636cbd6cd 100644
+--- a/mm/kmsan/kmsan_test.c
++++ b/mm/kmsan/kmsan_test.c
+@@ -22,6 +22,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/string.h>
+ #include <linux/tracepoint.h>
++#include <linux/vmalloc.h>
+ #include <trace/events/printk.h>
+
+ static DEFINE_PER_CPU(int, per_cpu_var);
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 61aa9aedb7289..02c8a712282f1 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1540,6 +1540,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
+ * the home node for vmas we already updated before.
+ */
+ if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
++ mpol_put(new);
+ err = -EOPNOTSUPP;
+ break;
+ }
+diff --git a/mm/mremap.c b/mm/mremap.c
+index e465ffe279bb0..fe587c5d65913 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -1016,7 +1016,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+ long pages = (new_len - old_len) >> PAGE_SHIFT;
+ unsigned long extension_start = addr + old_len;
+ unsigned long extension_end = addr + new_len;
+- pgoff_t extension_pgoff = vma->vm_pgoff + (old_len >> PAGE_SHIFT);
++ pgoff_t extension_pgoff = vma->vm_pgoff +
++ ((extension_start - vma->vm_start) >> PAGE_SHIFT);
+
+ if (vma->vm_flags & VM_ACCOUNT) {
+ if (security_vm_enough_memory_mm(mm, pages)) {
+diff --git a/net/802/mrp.c b/net/802/mrp.c
+index 155f74d8b14f4..6c927d4b35f06 100644
+--- a/net/802/mrp.c
++++ b/net/802/mrp.c
+@@ -606,7 +606,10 @@ static void mrp_join_timer(struct timer_list *t)
+ spin_unlock(&app->lock);
+
+ mrp_queue_xmit(app);
+- mrp_join_timer_arm(app);
++ spin_lock(&app->lock);
++ if (likely(app->active))
++ mrp_join_timer_arm(app);
++ spin_unlock(&app->lock);
+ }
+
+ static void mrp_periodic_timer_arm(struct mrp_applicant *app)
+@@ -620,11 +623,12 @@ static void mrp_periodic_timer(struct timer_list *t)
+ struct mrp_applicant *app = from_timer(app, t, periodic_timer);
+
+ spin_lock(&app->lock);
+- mrp_mad_event(app, MRP_EVENT_PERIODIC);
+- mrp_pdu_queue(app);
++ if (likely(app->active)) {
++ mrp_mad_event(app, MRP_EVENT_PERIODIC);
++ mrp_pdu_queue(app);
++ mrp_periodic_timer_arm(app);
++ }
+ spin_unlock(&app->lock);
+-
+- mrp_periodic_timer_arm(app);
+ }
+
+ static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
+@@ -872,6 +876,7 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
+ app->dev = dev;
+ app->app = appl;
+ app->mad = RB_ROOT;
++ app->active = true;
+ spin_lock_init(&app->lock);
+ skb_queue_head_init(&app->queue);
+ rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
+@@ -900,6 +905,9 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
+
+ RCU_INIT_POINTER(port->applicants[appl->type], NULL);
+
++ spin_lock_bh(&app->lock);
++ app->active = false;
++ spin_unlock_bh(&app->lock);
+ /* Delete timer and generate a final TX event to flush out
+ * all pending messages before the applicant is gone.
+ */
+diff --git a/net/9p/client.c b/net/9p/client.c
+index aaa37b07e30a5..b554f8357f967 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -297,6 +297,11 @@ p9_tag_alloc(struct p9_client *c, int8_t type, uint t_size, uint r_size,
+ p9pdu_reset(&req->rc);
+ req->t_err = 0;
+ req->status = REQ_STATUS_ALLOC;
++ /* refcount needs to be set to 0 before inserting into the idr
++ * so p9_tag_lookup does not accept a request that is not fully
++ * initialized. refcount_set to 2 below will mark request ready.
++ */
++ refcount_set(&req->refcount, 0);
+ init_waitqueue_head(&req->wq);
+ INIT_LIST_HEAD(&req->req_list);
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index a6c12863a2532..8aab2e882958c 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1881,7 +1881,7 @@ static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
+ continue;
+
+ /* Check if all CIS(s) belonging to a CIG are ready */
+- if (conn->link->state != BT_CONNECTED ||
++ if (!conn->link || conn->link->state != BT_CONNECTED ||
+ conn->state != BT_CONNECT) {
+ cmd.cp.num_cis = 0;
+ break;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index d97fac4f71303..b65c3aabcd536 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2660,7 +2660,7 @@ int hci_register_dev(struct hci_dev *hdev)
+
+ error = hci_register_suspend_notifier(hdev);
+ if (error)
+- goto err_wqueue;
++ BT_WARN("register suspend notifier failed error:%d\n", error);
+
+ queue_work(hdev->req_workqueue, &hdev->power_on);
+
+@@ -3985,7 +3985,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
+ else
+ *req_complete = bt_cb(skb)->hci.req_complete;
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ }
+ spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+ }
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 1fc693122a47a..3a68d9bc43b8f 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -4261,7 +4261,7 @@ static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
+ /* Get MWS transport configuration if the HCI command is supported */
+ static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
+ {
+- if (!(hdev->commands[30] & 0x08))
++ if (!mws_transport_config_capable(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
+diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
+index 469a0c95b6e8a..53a796ac078c3 100644
+--- a/net/bluetooth/lib.c
++++ b/net/bluetooth/lib.c
+@@ -170,7 +170,7 @@ __u8 bt_status(int err)
+ case -EMLINK:
+ return 0x09;
+
+- case EALREADY:
++ case -EALREADY:
+ return 0x0b;
+
+ case -EBUSY:
+@@ -191,7 +191,7 @@ __u8 bt_status(int err)
+ case -ECONNABORTED:
+ return 0x16;
+
+- case ELOOP:
++ case -ELOOP:
+ return 0x17;
+
+ case -EPROTONOSUPPORT:
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index a92e7e485feba..0dd30a3beb776 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -8859,7 +8859,7 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
+ * extra parameters we don't know about will be ignored in this request.
+ */
+ if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
+- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ flags = __le32_to_cpu(cp->flags);
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index 7324764384b67..8d6fce9005bdd 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -590,7 +590,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
+
+ ret = rfcomm_dlc_send_frag(d, frag);
+ if (ret < 0) {
+- kfree_skb(frag);
++ dev_kfree_skb_irq(frag);
+ goto unlock;
+ }
+
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index fcb3e6c5e03c0..6094ef7cffcd2 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -980,9 +980,6 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
+ {
+ struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
+
+- if (!skb->len)
+- return -EINVAL;
+-
+ if (!__skb)
+ return 0;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3be256051e99b..70e06853ba255 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10379,24 +10379,16 @@ void netdev_run_todo(void)
+ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ const struct net_device_stats *netdev_stats)
+ {
+-#if BITS_PER_LONG == 64
+- BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
+- memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
+- /* zero out counters that only exist in rtnl_link_stats64 */
+- memset((char *)stats64 + sizeof(*netdev_stats), 0,
+- sizeof(*stats64) - sizeof(*netdev_stats));
+-#else
+- size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
+- const unsigned long *src = (const unsigned long *)netdev_stats;
++ size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
++ const atomic_long_t *src = (atomic_long_t *)netdev_stats;
+ u64 *dst = (u64 *)stats64;
+
+ BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
+ for (i = 0; i < n; i++)
+- dst[i] = src[i];
++ dst[i] = atomic_long_read(&src[i]);
+ /* zero out counters that only exist in rtnl_link_stats64 */
+ memset((char *)stats64 + n * sizeof(u64), 0,
+ sizeof(*stats64) - n * sizeof(u64));
+-#endif
+ }
+ EXPORT_SYMBOL(netdev_stats_to_stats64);
+
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+index 89baa7c0938b9..2aa77d4b80d0a 100644
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -1505,10 +1505,13 @@ static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
+ continue;
+ }
+
++ devl_lock(devlink);
+ err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI);
++ devl_unlock(devlink);
+ devlink_put(devlink);
++
+ if (err)
+ goto out;
+ idx++;
+@@ -11435,8 +11438,10 @@ void devl_region_destroy(struct devlink_region *region)
+ devl_assert_locked(devlink);
+
+ /* Free all snapshots of region */
++ mutex_lock(&region->snapshot_lock);
+ list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
+ devlink_region_snapshot_del(region, snapshot);
++ mutex_unlock(&region->snapshot_lock);
+
+ list_del(&region->list);
+ mutex_destroy(&region->snapshot_lock);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index bb0136e7a8e42..a368edd9057c7 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -80,6 +80,7 @@
+ #include <net/tls.h>
+ #include <net/xdp.h>
+ #include <net/mptcp.h>
++#include <net/netfilter/nf_conntrack_bpf.h>
+
+ static const struct bpf_func_proto *
+ bpf_sk_base_func_proto(enum bpf_func_id func_id);
+@@ -2124,8 +2125,17 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
+ {
+ unsigned int mlen = skb_network_offset(skb);
+
++ if (unlikely(skb->len <= mlen)) {
++ kfree_skb(skb);
++ return -ERANGE;
++ }
++
+ if (mlen) {
+ __skb_pull(skb, mlen);
++ if (unlikely(!skb->len)) {
++ kfree_skb(skb);
++ return -ERANGE;
++ }
+
+ /* At ingress, the mac header has already been pulled once.
+ * At egress, skb_pospull_rcsum has to be done in case that
+@@ -2145,7 +2155,7 @@ static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
+ u32 flags)
+ {
+ /* Verify that a link layer header is carried */
+- if (unlikely(skb->mac_header >= skb->network_header)) {
++ if (unlikely(skb->mac_header >= skb->network_header || skb->len == 0)) {
+ kfree_skb(skb);
+ return -ERANGE;
+ }
+@@ -7983,6 +7993,19 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+ default:
+ return bpf_sk_base_func_proto(func_id);
+ }
++
++#if IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)
++ /* The nf_conn___init type is used in the NF_CONNTRACK kfuncs. The
++ * kfuncs are defined in two different modules, and we want to be able
++ * to use them interchangably with the same BTF type ID. Because modules
++ * can't de-duplicate BTF IDs between each other, we need the type to be
++ * referenced in the vmlinux BTF or the verifier will get confused about
++ * the different types. So we add this dummy type reference which will
++ * be included in vmlinux BTF, allowing both modules to refer to the
++ * same type ID.
++ */
++ BTF_TYPE_EMIT(struct nf_conn___init);
++#endif
+ }
+
+ const struct bpf_func_proto bpf_sock_map_update_proto __weak;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 88fa40571d0c7..759bede0b3dd6 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2416,6 +2416,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
+ insp = list;
+ } else {
+ /* Eaten partially. */
++ if (skb_is_gso(skb) && !list->head_frag &&
++ skb_headlen(list))
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+
+ if (skb_shared(list)) {
+ /* Sucks! We need to fork list. :-( */
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index e6b9ced3eda82..53d0251788aa2 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -886,13 +886,16 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
+ ret = sk_psock_map_verd(ret, msg->sk_redir);
+ psock->apply_bytes = msg->apply_bytes;
+ if (ret == __SK_REDIRECT) {
+- if (psock->sk_redir)
++ if (psock->sk_redir) {
+ sock_put(psock->sk_redir);
+- psock->sk_redir = msg->sk_redir;
+- if (!psock->sk_redir) {
++ psock->sk_redir = NULL;
++ }
++ if (!msg->sk_redir) {
+ ret = __SK_DROP;
+ goto out;
+ }
++ psock->redir_ingress = sk_msg_to_ingress(msg);
++ psock->sk_redir = msg->sk_redir;
+ sock_hold(psock->sk_redir);
+ }
+ out:
+diff --git a/net/core/sock.c b/net/core/sock.c
+index a3ba0358c77c0..30407b2dd2ac4 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1436,7 +1436,7 @@ set_sndbuf:
+ break;
+ }
+ case SO_INCOMING_CPU:
+- WRITE_ONCE(sk->sk_incoming_cpu, val);
++ reuseport_update_incoming_cpu(sk, val);
+ break;
+
+ case SO_CNX_ADVICE:
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 81beb16ab1ebf..22fa2c5bc6ec9 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -349,11 +349,13 @@ static void sock_map_free(struct bpf_map *map)
+
+ sk = xchg(psk, NULL);
+ if (sk) {
++ sock_hold(sk);
+ lock_sock(sk);
+ rcu_read_lock();
+ sock_map_unref(sk, psk);
+ rcu_read_unlock();
+ release_sock(sk);
++ sock_put(sk);
+ }
+ }
+
+diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
+index fb90e1e00773b..5a165286e4d8e 100644
+--- a/net/core/sock_reuseport.c
++++ b/net/core/sock_reuseport.c
+@@ -37,6 +37,70 @@ void reuseport_has_conns_set(struct sock *sk)
+ }
+ EXPORT_SYMBOL(reuseport_has_conns_set);
+
++static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse)
++{
++ /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */
++ WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1);
++}
++
++static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse)
++{
++ /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */
++ WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1);
++}
++
++static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
++{
++ if (sk->sk_incoming_cpu >= 0)
++ __reuseport_get_incoming_cpu(reuse);
++}
++
++static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
++{
++ if (sk->sk_incoming_cpu >= 0)
++ __reuseport_put_incoming_cpu(reuse);
++}
++
++void reuseport_update_incoming_cpu(struct sock *sk, int val)
++{
++ struct sock_reuseport *reuse;
++ int old_sk_incoming_cpu;
++
++ if (unlikely(!rcu_access_pointer(sk->sk_reuseport_cb))) {
++ /* Paired with REAE_ONCE() in sk_incoming_cpu_update()
++ * and compute_score().
++ */
++ WRITE_ONCE(sk->sk_incoming_cpu, val);
++ return;
++ }
++
++ spin_lock_bh(&reuseport_lock);
++
++ /* This must be done under reuseport_lock to avoid a race with
++ * reuseport_grow(), which accesses sk->sk_incoming_cpu without
++ * lock_sock() when detaching a shutdown()ed sk.
++ *
++ * Paired with READ_ONCE() in reuseport_select_sock_by_hash().
++ */
++ old_sk_incoming_cpu = sk->sk_incoming_cpu;
++ WRITE_ONCE(sk->sk_incoming_cpu, val);
++
++ reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
++ lockdep_is_held(&reuseport_lock));
++
++ /* reuseport_grow() has detached a closed sk. */
++ if (!reuse)
++ goto out;
++
++ if (old_sk_incoming_cpu < 0 && val >= 0)
++ __reuseport_get_incoming_cpu(reuse);
++ else if (old_sk_incoming_cpu >= 0 && val < 0)
++ __reuseport_put_incoming_cpu(reuse);
++
++out:
++ spin_unlock_bh(&reuseport_lock);
++}
++
+ static int reuseport_sock_index(struct sock *sk,
+ const struct sock_reuseport *reuse,
+ bool closed)
+@@ -64,6 +128,7 @@ static void __reuseport_add_sock(struct sock *sk,
+ /* paired with smp_rmb() in reuseport_(select|migrate)_sock() */
+ smp_wmb();
+ reuse->num_socks++;
++ reuseport_get_incoming_cpu(sk, reuse);
+ }
+
+ static bool __reuseport_detach_sock(struct sock *sk,
+@@ -76,6 +141,7 @@ static bool __reuseport_detach_sock(struct sock *sk,
+
+ reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
+ reuse->num_socks--;
++ reuseport_put_incoming_cpu(sk, reuse);
+
+ return true;
+ }
+@@ -86,6 +152,7 @@ static void __reuseport_add_closed_sock(struct sock *sk,
+ reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk;
+ /* paired with READ_ONCE() in inet_csk_bind_conflict() */
+ WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1);
++ reuseport_get_incoming_cpu(sk, reuse);
+ }
+
+ static bool __reuseport_detach_closed_sock(struct sock *sk,
+@@ -99,6 +166,7 @@ static bool __reuseport_detach_closed_sock(struct sock *sk,
+ reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
+ /* paired with READ_ONCE() in inet_csk_bind_conflict() */
+ WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1);
++ reuseport_put_incoming_cpu(sk, reuse);
+
+ return true;
+ }
+@@ -166,6 +234,7 @@ int reuseport_alloc(struct sock *sk, bool bind_inany)
+ reuse->bind_inany = bind_inany;
+ reuse->socks[0] = sk;
+ reuse->num_socks = 1;
++ reuseport_get_incoming_cpu(sk, reuse);
+ rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
+
+ out:
+@@ -209,6 +278,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
+ more_reuse->reuseport_id = reuse->reuseport_id;
+ more_reuse->bind_inany = reuse->bind_inany;
+ more_reuse->has_conns = reuse->has_conns;
++ more_reuse->incoming_cpu = reuse->incoming_cpu;
+
+ memcpy(more_reuse->socks, reuse->socks,
+ reuse->num_socks * sizeof(struct sock *));
+@@ -458,18 +528,32 @@ static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
+ static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
+ u32 hash, u16 num_socks)
+ {
++ struct sock *first_valid_sk = NULL;
+ int i, j;
+
+ i = j = reciprocal_scale(hash, num_socks);
+- while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
++ do {
++ struct sock *sk = reuse->socks[i];
++
++ if (sk->sk_state != TCP_ESTABLISHED) {
++ /* Paired with WRITE_ONCE() in __reuseport_(get|put)_incoming_cpu(). */
++ if (!READ_ONCE(reuse->incoming_cpu))
++ return sk;
++
++ /* Paired with WRITE_ONCE() in reuseport_update_incoming_cpu(). */
++ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
++ return sk;
++
++ if (!first_valid_sk)
++ first_valid_sk = sk;
++ }
++
+ i++;
+ if (i >= num_socks)
+ i = 0;
+- if (i == j)
+- return NULL;
+- }
++ } while (i != j);
+
+- return reuse->socks[i];
++ return first_valid_sk;
+ }
+
+ /**
+diff --git a/net/core/stream.c b/net/core/stream.c
+index 75fded8495f5b..516895f482356 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -196,6 +196,12 @@ void sk_stream_kill_queues(struct sock *sk)
+ /* First the read buffer. */
+ __skb_queue_purge(&sk->sk_receive_queue);
+
++ /* Next, the error queue.
++ * We need to use queue lock, because other threads might
++ * add packets to the queue without socket lock being held.
++ */
++ skb_queue_purge(&sk->sk_error_queue);
++
+ /* Next, the write queue. */
+ WARN_ON_ONCE(!skb_queue_empty(&sk->sk_write_queue));
+
+diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
+index 34e5ec5d3e236..89371b16416e2 100644
+--- a/net/dsa/tag_8021q.c
++++ b/net/dsa/tag_8021q.c
+@@ -398,6 +398,7 @@ static void dsa_tag_8021q_teardown(struct dsa_switch *ds)
+ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto)
+ {
+ struct dsa_8021q_context *ctx;
++ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+@@ -410,7 +411,15 @@ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto)
+
+ ds->tag_8021q_ctx = ctx;
+
+- return dsa_tag_8021q_setup(ds);
++ err = dsa_tag_8021q_setup(ds);
++ if (err)
++ goto err_free;
++
++ return 0;
++
++err_free:
++ kfree(ctx);
++ return err;
+ }
+ EXPORT_SYMBOL_GPL(dsa_tag_8021q_register);
+
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 57e7238a4136b..81fe2422fe58a 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -2008,7 +2008,8 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
+ } else {
+ /* Driver expects to be called at twice the frequency in rc */
+ int n = rc * 2, interval = HZ / n;
+- u64 count = n * id.data, i = 0;
++ u64 count = mul_u32_u32(n, id.data);
++ u64 i = 0;
+
+ do {
+ rtnl_lock();
+diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
+index de476a4176314..1a195efc79cd1 100644
+--- a/net/hsr/hsr_debugfs.c
++++ b/net/hsr/hsr_debugfs.c
+@@ -9,7 +9,6 @@
+ #include <linux/module.h>
+ #include <linux/errno.h>
+ #include <linux/debugfs.h>
+-#include <linux/jhash.h>
+ #include "hsr_main.h"
+ #include "hsr_framereg.h"
+
+@@ -21,7 +20,6 @@ hsr_node_table_show(struct seq_file *sfp, void *data)
+ {
+ struct hsr_priv *priv = (struct hsr_priv *)sfp->private;
+ struct hsr_node *node;
+- int i;
+
+ seq_printf(sfp, "Node Table entries for (%s) device\n",
+ (priv->prot_version == PRP_V1 ? "PRP" : "HSR"));
+@@ -33,28 +31,22 @@ hsr_node_table_show(struct seq_file *sfp, void *data)
+ seq_puts(sfp, "DAN-H\n");
+
+ rcu_read_lock();
+-
+- for (i = 0 ; i < priv->hash_buckets; i++) {
+- hlist_for_each_entry_rcu(node, &priv->node_db[i], mac_list) {
+- /* skip self node */
+- if (hsr_addr_is_self(priv, node->macaddress_A))
+- continue;
+- seq_printf(sfp, "%pM ", &node->macaddress_A[0]);
+- seq_printf(sfp, "%pM ", &node->macaddress_B[0]);
+- seq_printf(sfp, "%10lx, ",
+- node->time_in[HSR_PT_SLAVE_A]);
+- seq_printf(sfp, "%10lx, ",
+- node->time_in[HSR_PT_SLAVE_B]);
+- seq_printf(sfp, "%14x, ", node->addr_B_port);
+-
+- if (priv->prot_version == PRP_V1)
+- seq_printf(sfp, "%5x, %5x, %5x\n",
+- node->san_a, node->san_b,
+- (node->san_a == 0 &&
+- node->san_b == 0));
+- else
+- seq_printf(sfp, "%5x\n", 1);
+- }
++ list_for_each_entry_rcu(node, &priv->node_db, mac_list) {
++ /* skip self node */
++ if (hsr_addr_is_self(priv, node->macaddress_A))
++ continue;
++ seq_printf(sfp, "%pM ", &node->macaddress_A[0]);
++ seq_printf(sfp, "%pM ", &node->macaddress_B[0]);
++ seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]);
++ seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]);
++ seq_printf(sfp, "%14x, ", node->addr_B_port);
++
++ if (priv->prot_version == PRP_V1)
++ seq_printf(sfp, "%5x, %5x, %5x\n",
++ node->san_a, node->san_b,
++ (node->san_a == 0 && node->san_b == 0));
++ else
++ seq_printf(sfp, "%5x\n", 1);
+ }
+ rcu_read_unlock();
+ return 0;
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 6ffef47e9be55..b1e86a7265b32 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -219,7 +219,9 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ skb->dev = master->dev;
+ skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
++ spin_lock_bh(&hsr->seqnr_lock);
+ hsr_forward_skb(skb, master);
++ spin_unlock_bh(&hsr->seqnr_lock);
+ } else {
+ dev_core_stats_tx_dropped_inc(dev);
+ dev_kfree_skb_any(skb);
+@@ -278,7 +280,6 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+ __u8 type = HSR_TLV_LIFE_CHECK;
+ struct hsr_sup_payload *hsr_sp;
+ struct hsr_sup_tag *hsr_stag;
+- unsigned long irqflags;
+ struct sk_buff *skb;
+
+ *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+@@ -299,7 +300,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+ set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version);
+
+ /* From HSRv1 on we have separate supervision sequence numbers. */
+- spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
++ spin_lock_bh(&hsr->seqnr_lock);
+ if (hsr->prot_version > 0) {
+ hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
+ hsr->sup_sequence_nr++;
+@@ -307,7 +308,6 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+ hsr_stag->sequence_nr = htons(hsr->sequence_nr);
+ hsr->sequence_nr++;
+ }
+- spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
+
+ hsr_stag->tlv.HSR_TLV_type = type;
+ /* TODO: Why 12 in HSRv0? */
+@@ -318,11 +318,13 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+ hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
+ ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
+
+- if (skb_put_padto(skb, ETH_ZLEN))
++ if (skb_put_padto(skb, ETH_ZLEN)) {
++ spin_unlock_bh(&hsr->seqnr_lock);
+ return;
++ }
+
+ hsr_forward_skb(skb, master);
+-
++ spin_unlock_bh(&hsr->seqnr_lock);
+ return;
+ }
+
+@@ -332,7 +334,6 @@ static void send_prp_supervision_frame(struct hsr_port *master,
+ struct hsr_priv *hsr = master->hsr;
+ struct hsr_sup_payload *hsr_sp;
+ struct hsr_sup_tag *hsr_stag;
+- unsigned long irqflags;
+ struct sk_buff *skb;
+
+ skb = hsr_init_skb(master);
+@@ -347,7 +348,7 @@ static void send_prp_supervision_frame(struct hsr_port *master,
+ set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0));
+
+ /* From HSRv1 on we have separate supervision sequence numbers. */
+- spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
++ spin_lock_bh(&hsr->seqnr_lock);
+ hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
+ hsr->sup_sequence_nr++;
+ hsr_stag->tlv.HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD;
+@@ -358,13 +359,12 @@ static void send_prp_supervision_frame(struct hsr_port *master,
+ ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
+
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+- spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
++ spin_unlock_bh(&hsr->seqnr_lock);
+ return;
+ }
+
+- spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
+-
+ hsr_forward_skb(skb, master);
++ spin_unlock_bh(&hsr->seqnr_lock);
+ }
+
+ /* Announce (supervision frame) timer function
+@@ -444,7 +444,7 @@ void hsr_dev_setup(struct net_device *dev)
+ dev->header_ops = &hsr_header_ops;
+ dev->netdev_ops = &hsr_device_ops;
+ SET_NETDEV_DEVTYPE(dev, &hsr_type);
+- dev->priv_flags |= IFF_NO_QUEUE;
++ dev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
+
+ dev->needs_free_netdev = true;
+
+@@ -485,16 +485,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ {
+ bool unregister = false;
+ struct hsr_priv *hsr;
+- int res, i;
++ int res;
+
+ hsr = netdev_priv(hsr_dev);
+ INIT_LIST_HEAD(&hsr->ports);
+- INIT_HLIST_HEAD(&hsr->self_node_db);
+- hsr->hash_buckets = HSR_HSIZE;
+- get_random_bytes(&hsr->hash_seed, sizeof(hsr->hash_seed));
+- for (i = 0; i < hsr->hash_buckets; i++)
+- INIT_HLIST_HEAD(&hsr->node_db[i]);
+-
++ INIT_LIST_HEAD(&hsr->node_db);
++ INIT_LIST_HEAD(&hsr->self_node_db);
+ spin_lock_init(&hsr->list_lock);
+
+ eth_hw_addr_set(hsr_dev, slave[0]->dev_addr);
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index 56bb27d67a2ee..629daacc96071 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -500,7 +500,6 @@ static void handle_std_frame(struct sk_buff *skb,
+ {
+ struct hsr_port *port = frame->port_rcv;
+ struct hsr_priv *hsr = port->hsr;
+- unsigned long irqflags;
+
+ frame->skb_hsr = NULL;
+ frame->skb_prp = NULL;
+@@ -510,10 +509,9 @@ static void handle_std_frame(struct sk_buff *skb,
+ frame->is_from_san = true;
+ } else {
+ /* Sequence nr for the master node */
+- spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
++ lockdep_assert_held(&hsr->seqnr_lock);
+ frame->sequence_nr = hsr->sequence_nr;
+ hsr->sequence_nr++;
+- spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
+ }
+ }
+
+@@ -571,23 +569,20 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+ struct ethhdr *ethhdr;
+ __be16 proto;
+ int ret;
+- u32 hash;
+
+ /* Check if skb contains ethhdr */
+ if (skb->mac_len < sizeof(struct ethhdr))
+ return -EINVAL;
+
+ memset(frame, 0, sizeof(*frame));
+-
+- ethhdr = (struct ethhdr *)skb_mac_header(skb);
+- hash = hsr_mac_hash(port->hsr, ethhdr->h_source);
+ frame->is_supervision = is_supervision_frame(port->hsr, skb);
+- frame->node_src = hsr_get_node(port, &hsr->node_db[hash], skb,
++ frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
+ frame->is_supervision,
+ port->type);
+ if (!frame->node_src)
+ return -1; /* Unknown node and !is_supervision, or no mem */
+
++ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ frame->is_vlan = false;
+ proto = ethhdr->h_proto;
+
+@@ -617,11 +612,13 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
+ {
+ struct hsr_frame_info frame;
+
++ rcu_read_lock();
+ if (fill_frame_info(&frame, skb, port) < 0)
+ goto out_drop;
+
+ hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
+ hsr_forward_do(&frame);
++ rcu_read_unlock();
+ /* Gets called for ingress frames as well as egress from master port.
+ * So check and increment stats for master port only here.
+ */
+@@ -636,6 +633,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
+ return;
+
+ out_drop:
++ rcu_read_unlock();
+ port->dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ }
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 584e217887997..39a6088080e93 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -15,37 +15,10 @@
+ #include <linux/etherdevice.h>
+ #include <linux/slab.h>
+ #include <linux/rculist.h>
+-#include <linux/jhash.h>
+ #include "hsr_main.h"
+ #include "hsr_framereg.h"
+ #include "hsr_netlink.h"
+
+-#ifdef CONFIG_LOCKDEP
+-int lockdep_hsr_is_held(spinlock_t *lock)
+-{
+- return lockdep_is_held(lock);
+-}
+-#endif
+-
+-u32 hsr_mac_hash(struct hsr_priv *hsr, const unsigned char *addr)
+-{
+- u32 hash = jhash(addr, ETH_ALEN, hsr->hash_seed);
+-
+- return reciprocal_scale(hash, hsr->hash_buckets);
+-}
+-
+-struct hsr_node *hsr_node_get_first(struct hlist_head *head, spinlock_t *lock)
+-{
+- struct hlist_node *first;
+-
+- first = rcu_dereference_bh_check(hlist_first_rcu(head),
+- lockdep_hsr_is_held(lock));
+- if (first)
+- return hlist_entry(first, struct hsr_node, mac_list);
+-
+- return NULL;
+-}
+-
+ /* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
+ * false otherwise.
+ */
+@@ -67,7 +40,8 @@ bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
+ {
+ struct hsr_node *node;
+
+- node = hsr_node_get_first(&hsr->self_node_db, &hsr->list_lock);
++ node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node,
++ mac_list);
+ if (!node) {
+ WARN_ONCE(1, "HSR: No self node\n");
+ return false;
+@@ -83,12 +57,12 @@ bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
+
+ /* Search for mac entry. Caller must hold rcu read lock.
+ */
+-static struct hsr_node *find_node_by_addr_A(struct hlist_head *node_db,
++static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
+ const unsigned char addr[ETH_ALEN])
+ {
+ struct hsr_node *node;
+
+- hlist_for_each_entry_rcu(node, node_db, mac_list) {
++ list_for_each_entry_rcu(node, node_db, mac_list) {
+ if (ether_addr_equal(node->macaddress_A, addr))
+ return node;
+ }
+@@ -103,7 +77,7 @@ int hsr_create_self_node(struct hsr_priv *hsr,
+ const unsigned char addr_a[ETH_ALEN],
+ const unsigned char addr_b[ETH_ALEN])
+ {
+- struct hlist_head *self_node_db = &hsr->self_node_db;
++ struct list_head *self_node_db = &hsr->self_node_db;
+ struct hsr_node *node, *oldnode;
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+@@ -114,13 +88,14 @@ int hsr_create_self_node(struct hsr_priv *hsr,
+ ether_addr_copy(node->macaddress_B, addr_b);
+
+ spin_lock_bh(&hsr->list_lock);
+- oldnode = hsr_node_get_first(self_node_db, &hsr->list_lock);
++ oldnode = list_first_or_null_rcu(self_node_db,
++ struct hsr_node, mac_list);
+ if (oldnode) {
+- hlist_replace_rcu(&oldnode->mac_list, &node->mac_list);
++ list_replace_rcu(&oldnode->mac_list, &node->mac_list);
+ spin_unlock_bh(&hsr->list_lock);
+ kfree_rcu(oldnode, rcu_head);
+ } else {
+- hlist_add_tail_rcu(&node->mac_list, self_node_db);
++ list_add_tail_rcu(&node->mac_list, self_node_db);
+ spin_unlock_bh(&hsr->list_lock);
+ }
+
+@@ -129,25 +104,25 @@ int hsr_create_self_node(struct hsr_priv *hsr,
+
+ void hsr_del_self_node(struct hsr_priv *hsr)
+ {
+- struct hlist_head *self_node_db = &hsr->self_node_db;
++ struct list_head *self_node_db = &hsr->self_node_db;
+ struct hsr_node *node;
+
+ spin_lock_bh(&hsr->list_lock);
+- node = hsr_node_get_first(self_node_db, &hsr->list_lock);
++ node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
+ if (node) {
+- hlist_del_rcu(&node->mac_list);
++ list_del_rcu(&node->mac_list);
+ kfree_rcu(node, rcu_head);
+ }
+ spin_unlock_bh(&hsr->list_lock);
+ }
+
+-void hsr_del_nodes(struct hlist_head *node_db)
++void hsr_del_nodes(struct list_head *node_db)
+ {
+ struct hsr_node *node;
+- struct hlist_node *tmp;
++ struct hsr_node *tmp;
+
+- hlist_for_each_entry_safe(node, tmp, node_db, mac_list)
+- kfree_rcu(node, rcu_head);
++ list_for_each_entry_safe(node, tmp, node_db, mac_list)
++ kfree(node);
+ }
+
+ void prp_handle_san_frame(bool san, enum hsr_port_type port,
+@@ -168,7 +143,7 @@ void prp_handle_san_frame(bool san, enum hsr_port_type port,
+ * originating from the newly added node.
+ */
+ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+- struct hlist_head *node_db,
++ struct list_head *node_db,
+ unsigned char addr[],
+ u16 seq_out, bool san,
+ enum hsr_port_type rx_port)
+@@ -182,6 +157,7 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+ return NULL;
+
+ ether_addr_copy(new_node->macaddress_A, addr);
++ spin_lock_init(&new_node->seq_out_lock);
+
+ /* We are only interested in time diffs here, so use current jiffies
+ * as initialization. (0 could trigger an spurious ring error warning).
+@@ -198,14 +174,14 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+ hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
+
+ spin_lock_bh(&hsr->list_lock);
+- hlist_for_each_entry_rcu(node, node_db, mac_list,
+- lockdep_hsr_is_held(&hsr->list_lock)) {
++ list_for_each_entry_rcu(node, node_db, mac_list,
++ lockdep_is_held(&hsr->list_lock)) {
+ if (ether_addr_equal(node->macaddress_A, addr))
+ goto out;
+ if (ether_addr_equal(node->macaddress_B, addr))
+ goto out;
+ }
+- hlist_add_tail_rcu(&new_node->mac_list, node_db);
++ list_add_tail_rcu(&new_node->mac_list, node_db);
+ spin_unlock_bh(&hsr->list_lock);
+ return new_node;
+ out:
+@@ -225,7 +201,7 @@ void prp_update_san_info(struct hsr_node *node, bool is_sup)
+
+ /* Get the hsr_node from which 'skb' was sent.
+ */
+-struct hsr_node *hsr_get_node(struct hsr_port *port, struct hlist_head *node_db,
++struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
+ struct sk_buff *skb, bool is_sup,
+ enum hsr_port_type rx_port)
+ {
+@@ -241,7 +217,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct hlist_head *node_db,
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+- hlist_for_each_entry_rcu(node, node_db, mac_list) {
++ list_for_each_entry_rcu(node, node_db, mac_list) {
+ if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
+ if (hsr->proto_ops->update_san_info)
+ hsr->proto_ops->update_san_info(node, is_sup);
+@@ -291,12 +267,11 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+ struct hsr_sup_tlv *hsr_sup_tlv;
+ struct hsr_node *node_real;
+ struct sk_buff *skb = NULL;
+- struct hlist_head *node_db;
++ struct list_head *node_db;
+ struct ethhdr *ethhdr;
+ int i;
+ unsigned int pull_size = 0;
+ unsigned int total_pull_size = 0;
+- u32 hash;
+
+ /* Here either frame->skb_hsr or frame->skb_prp should be
+ * valid as supervision frame always will have protocol
+@@ -334,13 +309,11 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+ hsr_sp = (struct hsr_sup_payload *)skb->data;
+
+ /* Merge node_curr (registered on macaddress_B) into node_real */
+- node_db = port_rcv->hsr->node_db;
+- hash = hsr_mac_hash(hsr, hsr_sp->macaddress_A);
+- node_real = find_node_by_addr_A(&node_db[hash], hsr_sp->macaddress_A);
++ node_db = &port_rcv->hsr->node_db;
++ node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
+ if (!node_real)
+ /* No frame received from AddrA of this node yet */
+- node_real = hsr_add_node(hsr, &node_db[hash],
+- hsr_sp->macaddress_A,
++ node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
+ HSR_SEQNR_START - 1, true,
+ port_rcv->type);
+ if (!node_real)
+@@ -374,14 +347,14 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+ hsr_sp = (struct hsr_sup_payload *)skb->data;
+
+ /* Check if redbox mac and node mac are equal. */
+- if (!ether_addr_equal(node_real->macaddress_A,
+- hsr_sp->macaddress_A)) {
++ if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) {
+ /* This is a redbox supervision frame for a VDAN! */
+ goto done;
+ }
+ }
+
+ ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
++ spin_lock_bh(&node_real->seq_out_lock);
+ for (i = 0; i < HSR_PT_PORTS; i++) {
+ if (!node_curr->time_in_stale[i] &&
+ time_after(node_curr->time_in[i], node_real->time_in[i])) {
+@@ -392,12 +365,16 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+ if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
+ node_real->seq_out[i] = node_curr->seq_out[i];
+ }
++ spin_unlock_bh(&node_real->seq_out_lock);
+ node_real->addr_B_port = port_rcv->type;
+
+ spin_lock_bh(&hsr->list_lock);
+- hlist_del_rcu(&node_curr->mac_list);
++ if (!node_curr->removed) {
++ list_del_rcu(&node_curr->mac_list);
++ node_curr->removed = true;
++ kfree_rcu(node_curr, rcu_head);
++ }
+ spin_unlock_bh(&hsr->list_lock);
+- kfree_rcu(node_curr, rcu_head);
+
+ done:
+ /* Push back here */
+@@ -433,7 +410,6 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+ struct hsr_port *port)
+ {
+ struct hsr_node *node_dst;
+- u32 hash;
+
+ if (!skb_mac_header_was_set(skb)) {
+ WARN_ONCE(1, "%s: Mac header not set\n", __func__);
+@@ -443,8 +419,7 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+ if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
+ return;
+
+- hash = hsr_mac_hash(port->hsr, eth_hdr(skb)->h_dest);
+- node_dst = find_node_by_addr_A(&port->hsr->node_db[hash],
++ node_dst = find_node_by_addr_A(&port->hsr->node_db,
+ eth_hdr(skb)->h_dest);
+ if (!node_dst) {
+ if (net_ratelimit())
+@@ -484,13 +459,17 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+ u16 sequence_nr)
+ {
++ spin_lock_bh(&node->seq_out_lock);
+ if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
+ time_is_after_jiffies(node->time_out[port->type] +
+- msecs_to_jiffies(HSR_ENTRY_FORGET_TIME)))
++ msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) {
++ spin_unlock_bh(&node->seq_out_lock);
+ return 1;
++ }
+
+ node->time_out[port->type] = jiffies;
+ node->seq_out[port->type] = sequence_nr;
++ spin_unlock_bh(&node->seq_out_lock);
+ return 0;
+ }
+
+@@ -520,71 +499,60 @@ static struct hsr_port *get_late_port(struct hsr_priv *hsr,
+ void hsr_prune_nodes(struct timer_list *t)
+ {
+ struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
+- struct hlist_node *tmp;
+ struct hsr_node *node;
++ struct hsr_node *tmp;
+ struct hsr_port *port;
+ unsigned long timestamp;
+ unsigned long time_a, time_b;
+- int i;
+
+ spin_lock_bh(&hsr->list_lock);
++ list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
++ /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
++ * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
++ * the master port. Thus the master node will be repeatedly
++ * pruned leading to packet loss.
++ */
++ if (hsr_addr_is_self(hsr, node->macaddress_A))
++ continue;
++
++ /* Shorthand */
++ time_a = node->time_in[HSR_PT_SLAVE_A];
++ time_b = node->time_in[HSR_PT_SLAVE_B];
++
++ /* Check for timestamps old enough to risk wrap-around */
++ if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
++ node->time_in_stale[HSR_PT_SLAVE_A] = true;
++ if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
++ node->time_in_stale[HSR_PT_SLAVE_B] = true;
++
++ /* Get age of newest frame from node.
++ * At least one time_in is OK here; nodes get pruned long
++ * before both time_ins can get stale
++ */
++ timestamp = time_a;
++ if (node->time_in_stale[HSR_PT_SLAVE_A] ||
++ (!node->time_in_stale[HSR_PT_SLAVE_B] &&
++ time_after(time_b, time_a)))
++ timestamp = time_b;
++
++ /* Warn of ring error only as long as we get frames at all */
++ if (time_is_after_jiffies(timestamp +
++ msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
++ rcu_read_lock();
++ port = get_late_port(hsr, node);
++ if (port)
++ hsr_nl_ringerror(hsr, node->macaddress_A, port);
++ rcu_read_unlock();
++ }
+
+- for (i = 0; i < hsr->hash_buckets; i++) {
+- hlist_for_each_entry_safe(node, tmp, &hsr->node_db[i],
+- mac_list) {
+- /* Don't prune own node.
+- * Neither time_in[HSR_PT_SLAVE_A]
+- * nor time_in[HSR_PT_SLAVE_B], will ever be updated
+- * for the master port. Thus the master node will be
+- * repeatedly pruned leading to packet loss.
+- */
+- if (hsr_addr_is_self(hsr, node->macaddress_A))
+- continue;
+-
+- /* Shorthand */
+- time_a = node->time_in[HSR_PT_SLAVE_A];
+- time_b = node->time_in[HSR_PT_SLAVE_B];
+-
+- /* Check for timestamps old enough to
+- * risk wrap-around
+- */
+- if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
+- node->time_in_stale[HSR_PT_SLAVE_A] = true;
+- if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
+- node->time_in_stale[HSR_PT_SLAVE_B] = true;
+-
+- /* Get age of newest frame from node.
+- * At least one time_in is OK here; nodes get pruned
+- * long before both time_ins can get stale
+- */
+- timestamp = time_a;
+- if (node->time_in_stale[HSR_PT_SLAVE_A] ||
+- (!node->time_in_stale[HSR_PT_SLAVE_B] &&
+- time_after(time_b, time_a)))
+- timestamp = time_b;
+-
+- /* Warn of ring error only as long as we get
+- * frames at all
+- */
+- if (time_is_after_jiffies(timestamp +
+- msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
+- rcu_read_lock();
+- port = get_late_port(hsr, node);
+- if (port)
+- hsr_nl_ringerror(hsr,
+- node->macaddress_A,
+- port);
+- rcu_read_unlock();
+- }
+-
+- /* Prune old entries */
+- if (time_is_before_jiffies(timestamp +
+- msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
+- hsr_nl_nodedown(hsr, node->macaddress_A);
+- hlist_del_rcu(&node->mac_list);
+- /* Note that we need to free this
+- * entry later:
+- */
++ /* Prune old entries */
++ if (time_is_before_jiffies(timestamp +
++ msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
++ hsr_nl_nodedown(hsr, node->macaddress_A);
++ if (!node->removed) {
++ list_del_rcu(&node->mac_list);
++ node->removed = true;
++ /* Note that we need to free this entry later: */
+ kfree_rcu(node, rcu_head);
+ }
+ }
+@@ -600,20 +568,17 @@ void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
+ unsigned char addr[ETH_ALEN])
+ {
+ struct hsr_node *node;
+- u32 hash;
+-
+- hash = hsr_mac_hash(hsr, addr);
+
+ if (!_pos) {
+- node = hsr_node_get_first(&hsr->node_db[hash],
+- &hsr->list_lock);
++ node = list_first_or_null_rcu(&hsr->node_db,
++ struct hsr_node, mac_list);
+ if (node)
+ ether_addr_copy(addr, node->macaddress_A);
+ return node;
+ }
+
+ node = _pos;
+- hlist_for_each_entry_continue_rcu(node, mac_list) {
++ list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
+ ether_addr_copy(addr, node->macaddress_A);
+ return node;
+ }
+@@ -633,11 +598,8 @@ int hsr_get_node_data(struct hsr_priv *hsr,
+ struct hsr_node *node;
+ struct hsr_port *port;
+ unsigned long tdiff;
+- u32 hash;
+-
+- hash = hsr_mac_hash(hsr, addr);
+
+- node = find_node_by_addr_A(&hsr->node_db[hash], addr);
++ node = find_node_by_addr_A(&hsr->node_db, addr);
+ if (!node)
+ return -ENOENT;
+
+diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
+index f3762e9e42b54..b23556251d621 100644
+--- a/net/hsr/hsr_framereg.h
++++ b/net/hsr/hsr_framereg.h
+@@ -28,17 +28,9 @@ struct hsr_frame_info {
+ bool is_from_san;
+ };
+
+-#ifdef CONFIG_LOCKDEP
+-int lockdep_hsr_is_held(spinlock_t *lock);
+-#else
+-#define lockdep_hsr_is_held(lock) 1
+-#endif
+-
+-u32 hsr_mac_hash(struct hsr_priv *hsr, const unsigned char *addr);
+-struct hsr_node *hsr_node_get_first(struct hlist_head *head, spinlock_t *lock);
+ void hsr_del_self_node(struct hsr_priv *hsr);
+-void hsr_del_nodes(struct hlist_head *node_db);
+-struct hsr_node *hsr_get_node(struct hsr_port *port, struct hlist_head *node_db,
++void hsr_del_nodes(struct list_head *node_db);
++struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
+ struct sk_buff *skb, bool is_sup,
+ enum hsr_port_type rx_port);
+ void hsr_handle_sup_frame(struct hsr_frame_info *frame);
+@@ -76,7 +68,9 @@ void prp_handle_san_frame(bool san, enum hsr_port_type port,
+ void prp_update_san_info(struct hsr_node *node, bool is_sup);
+
+ struct hsr_node {
+- struct hlist_node mac_list;
++ struct list_head mac_list;
++ /* Protect R/W access to seq_out */
++ spinlock_t seq_out_lock;
+ unsigned char macaddress_A[ETH_ALEN];
+ unsigned char macaddress_B[ETH_ALEN];
+ /* Local slave through which AddrB frames are received from this node */
+@@ -88,6 +82,7 @@ struct hsr_node {
+ bool san_a;
+ bool san_b;
+ u16 seq_out[HSR_PT_PORTS];
++ bool removed;
+ struct rcu_head rcu_head;
+ };
+
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index b158ba409f9a4..16ae9fb09ccd2 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -47,9 +47,6 @@
+
+ #define HSR_V1_SUP_LSDUSIZE 52
+
+-#define HSR_HSIZE_SHIFT 8
+-#define HSR_HSIZE BIT(HSR_HSIZE_SHIFT)
+-
+ /* The helper functions below assumes that 'path' occupies the 4 most
+ * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
+ * equivalently, the 4 most significant bits of HSR tag byte 14).
+@@ -188,8 +185,8 @@ struct hsr_proto_ops {
+ struct hsr_priv {
+ struct rcu_head rcu_head;
+ struct list_head ports;
+- struct hlist_head node_db[HSR_HSIZE]; /* Known HSR nodes */
+- struct hlist_head self_node_db; /* MACs of slaves */
++ struct list_head node_db; /* Known HSR nodes */
++ struct list_head self_node_db; /* MACs of slaves */
+ struct timer_list announce_timer; /* Supervision frame dispatch */
+ struct timer_list prune_timer;
+ int announce_count;
+@@ -199,8 +196,6 @@ struct hsr_priv {
+ spinlock_t seqnr_lock; /* locking for sequence_nr */
+ spinlock_t list_lock; /* locking for node list */
+ struct hsr_proto_ops *proto_ops;
+- u32 hash_buckets;
+- u32 hash_seed;
+ #define PRP_LAN_ID 0x5 /* 0x1010 for A and 0x1011 for B. Bit 0 is set
+ * based on SLAVE_A or SLAVE_B
+ */
+diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
+index 7174a90929002..78fe40eb9f012 100644
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -105,7 +105,6 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
+ static void hsr_dellink(struct net_device *dev, struct list_head *head)
+ {
+ struct hsr_priv *hsr = netdev_priv(dev);
+- int i;
+
+ del_timer_sync(&hsr->prune_timer);
+ del_timer_sync(&hsr->announce_timer);
+@@ -114,8 +113,7 @@ static void hsr_dellink(struct net_device *dev, struct list_head *head)
+ hsr_del_ports(hsr);
+
+ hsr_del_self_node(hsr);
+- for (i = 0; i < hsr->hash_buckets; i++)
+- hsr_del_nodes(&hsr->node_db[i]);
++ hsr_del_nodes(&hsr->node_db);
+
+ unregister_netdevice_queue(dev, head);
+ }
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 0da6794113308..92d4237862518 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -522,9 +522,9 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+ /* Make sure we are allowed to bind here. */
+ if (snum || !(inet->bind_address_no_port ||
+ (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
+- if (sk->sk_prot->get_port(sk, snum)) {
++ err = sk->sk_prot->get_port(sk, snum);
++ if (err) {
+ inet->inet_saddr = inet->inet_rcv_saddr = 0;
+- err = -EADDRINUSE;
+ goto out_release_sock;
+ }
+ if (!(flags & BIND_FROM_BPF)) {
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 4e84ed21d16fe..4a34bc7cb15ed 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -471,11 +471,11 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
+ bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
+ bool found_port = false, check_bind_conflict = true;
+ bool bhash_created = false, bhash2_created = false;
++ int ret = -EADDRINUSE, port = snum, l3mdev;
+ struct inet_bind_hashbucket *head, *head2;
+ struct inet_bind2_bucket *tb2 = NULL;
+ struct inet_bind_bucket *tb = NULL;
+ bool head2_lock_acquired = false;
+- int ret = 1, port = snum, l3mdev;
+ struct net *net = sock_net(sk);
+
+ l3mdev = inet_sk_bound_l3mdev(sk);
+@@ -1186,7 +1186,7 @@ int inet_csk_listen_start(struct sock *sk)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+- int err = -EADDRINUSE;
++ int err;
+
+ reqsk_queue_alloc(&icsk->icsk_accept_queue);
+
+@@ -1202,7 +1202,8 @@ int inet_csk_listen_start(struct sock *sk)
+ * after validation is complete.
+ */
+ inet_sk_state_store(sk, TCP_LISTEN);
+- if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
++ err = sk->sk_prot->get_port(sk, inet->inet_num);
++ if (!err) {
+ inet->inet_sport = htons(inet->inet_num);
+
+ sk_dst_reset(sk);
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 04b4ec07bb06c..409ec2a1f95b0 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -143,7 +143,7 @@ next_port:
+
+ fail:
+ spin_unlock(&ping_table.lock);
+- return 1;
++ return -EADDRINUSE;
+ }
+ EXPORT_SYMBOL_GPL(ping_get_port);
+
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index cf9c3e8f7ccbf..94aad3870c5fc 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -45,8 +45,11 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
+ tmp->sg.end = i;
+ if (apply) {
+ apply_bytes -= size;
+- if (!apply_bytes)
++ if (!apply_bytes) {
++ if (sge->length)
++ sk_msg_iter_var_prev(i);
+ break;
++ }
+ }
+ } while (i != msg->sg.end);
+
+@@ -131,10 +134,9 @@ static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
+ return ret;
+ }
+
+-int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
+- u32 bytes, int flags)
++int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
++ struct sk_msg *msg, u32 bytes, int flags)
+ {
+- bool ingress = sk_msg_to_ingress(msg);
+ struct sk_psock *psock = sk_psock_get(sk);
+ int ret;
+
+@@ -276,10 +278,10 @@ msg_bytes_ready:
+ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+ struct sk_msg *msg, int *copied, int flags)
+ {
+- bool cork = false, enospc = sk_msg_full(msg);
++ bool cork = false, enospc = sk_msg_full(msg), redir_ingress;
+ struct sock *sk_redir;
+ u32 tosend, origsize, sent, delta = 0;
+- u32 eval = __SK_NONE;
++ u32 eval;
+ int ret;
+
+ more_data:
+@@ -310,6 +312,7 @@ more_data:
+ tosend = msg->sg.size;
+ if (psock->apply_bytes && psock->apply_bytes < tosend)
+ tosend = psock->apply_bytes;
++ eval = __SK_NONE;
+
+ switch (psock->eval) {
+ case __SK_PASS:
+@@ -321,6 +324,7 @@ more_data:
+ sk_msg_apply_bytes(psock, tosend);
+ break;
+ case __SK_REDIRECT:
++ redir_ingress = psock->redir_ingress;
+ sk_redir = psock->sk_redir;
+ sk_msg_apply_bytes(psock, tosend);
+ if (!psock->apply_bytes) {
+@@ -337,7 +341,8 @@ more_data:
+ release_sock(sk);
+
+ origsize = msg->sg.size;
+- ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
++ ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
++ msg, tosend, flags);
+ sent = origsize - msg->sg.size;
+
+ if (eval == __SK_REDIRECT)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 6a320a614e547..2eaf47e23b221 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -232,16 +232,16 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
+ int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ unsigned int hash2_nulladdr)
+ {
+- struct udp_hslot *hslot, *hslot2;
+ struct udp_table *udptable = sk->sk_prot->h.udp_table;
+- int error = 1;
++ struct udp_hslot *hslot, *hslot2;
+ struct net *net = sock_net(sk);
++ int error = -EADDRINUSE;
+
+ if (!snum) {
++ DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
++ unsigned short first, last;
+ int low, high, remaining;
+ unsigned int rand;
+- unsigned short first, last;
+- DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
+
+ inet_get_local_port_range(net, &low, &high);
+ remaining = (high - low) + 1;
+@@ -2518,10 +2518,13 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
+ __be16 rmt_port, __be32 rmt_addr,
+ int dif, int sdif)
+ {
+- struct sock *sk, *result;
+ unsigned short hnum = ntohs(loc_port);
+- unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
+- struct udp_hslot *hslot = &udp_table.hash[slot];
++ struct sock *sk, *result;
++ struct udp_hslot *hslot;
++ unsigned int slot;
++
++ slot = udp_hashfn(net, hnum, udp_table.mask);
++ hslot = &udp_table.hash[slot];
+
+ /* Do not bother scanning a too big list */
+ if (hslot->count > 10)
+@@ -2549,14 +2552,18 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
+ __be16 rmt_port, __be32 rmt_addr,
+ int dif, int sdif)
+ {
+- unsigned short hnum = ntohs(loc_port);
+- unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
+- unsigned int slot2 = hash2 & udp_table.mask;
+- struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
+ INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
+- const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
++ unsigned short hnum = ntohs(loc_port);
++ unsigned int hash2, slot2;
++ struct udp_hslot *hslot2;
++ __portpair ports;
+ struct sock *sk;
+
++ hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
++ slot2 = hash2 & udp_table.mask;
++ hslot2 = &udp_table.hash2[slot2];
++ ports = INET_COMBINED_PORTS(rmt_port, hnum);
++
+ udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+ if (inet_match(net, sk, acookie, ports, dif, sdif))
+ return sk;
+@@ -2957,10 +2964,10 @@ EXPORT_SYMBOL(udp_prot);
+
+ static struct sock *udp_get_first(struct seq_file *seq, int start)
+ {
+- struct sock *sk;
+- struct udp_seq_afinfo *afinfo;
+ struct udp_iter_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
++ struct udp_seq_afinfo *afinfo;
++ struct sock *sk;
+
+ if (state->bpf_seq_afinfo)
+ afinfo = state->bpf_seq_afinfo;
+@@ -2991,9 +2998,9 @@ found:
+
+ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
+ {
+- struct udp_seq_afinfo *afinfo;
+ struct udp_iter_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
++ struct udp_seq_afinfo *afinfo;
+
+ if (state->bpf_seq_afinfo)
+ afinfo = state->bpf_seq_afinfo;
+@@ -3049,8 +3056,8 @@ EXPORT_SYMBOL(udp_seq_next);
+
+ void udp_seq_stop(struct seq_file *seq, void *v)
+ {
+- struct udp_seq_afinfo *afinfo;
+ struct udp_iter_state *state = seq->private;
++ struct udp_seq_afinfo *afinfo;
+
+ if (state->bpf_seq_afinfo)
+ afinfo = state->bpf_seq_afinfo;
+diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
+index 8242c8947340e..5f8104cf082d0 100644
+--- a/net/ipv4/udp_tunnel_core.c
++++ b/net/ipv4/udp_tunnel_core.c
+@@ -176,6 +176,7 @@ EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
+ void udp_tunnel_sock_release(struct socket *sock)
+ {
+ rcu_assign_sk_user_data(sock->sk, NULL);
++ synchronize_rcu();
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sock_release(sock);
+ }
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 0241910049825..7b0cd54da452b 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -409,10 +409,10 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+ /* Make sure we are allowed to bind here. */
+ if (snum || !(inet->bind_address_no_port ||
+ (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
+- if (sk->sk_prot->get_port(sk, snum)) {
++ err = sk->sk_prot->get_port(sk, snum);
++ if (err) {
+ sk->sk_ipv6only = saved_ipv6only;
+ inet_reset_saddr(sk);
+- err = -EADDRINUSE;
+ goto out;
+ }
+ if (!(flags & BIND_FROM_BPF)) {
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 5ecb56522f9d6..ba28aeb7cade0 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -42,24 +42,29 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
+ {
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
++ int oif = sk->sk_bound_dev_if;
+
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_proto = sk->sk_protocol;
+ fl6->daddr = sk->sk_v6_daddr;
+ fl6->saddr = np->saddr;
+- fl6->flowi6_oif = sk->sk_bound_dev_if;
+ fl6->flowi6_mark = sk->sk_mark;
+ fl6->fl6_dport = inet->inet_dport;
+ fl6->fl6_sport = inet->inet_sport;
+ fl6->flowlabel = np->flow_label;
+ fl6->flowi6_uid = sk->sk_uid;
+
+- if (!fl6->flowi6_oif)
+- fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
++ if (!oif)
++ oif = np->sticky_pktinfo.ipi6_ifindex;
+
+- if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
+- fl6->flowi6_oif = np->mcast_oif;
++ if (!oif) {
++ if (ipv6_addr_is_multicast(&fl6->daddr))
++ oif = np->mcast_oif;
++ else
++ oif = np->ucast_oif;
++ }
+
++ fl6->flowi6_oif = oif;
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
+ }
+
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 5703d3cbea9ba..70d81bba50939 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -694,7 +694,7 @@ static int ipip6_rcv(struct sk_buff *skb)
+ skb->dev = tunnel->dev;
+
+ if (packet_is_spoofed(skb, iph, tunnel)) {
+- tunnel->dev->stats.rx_errors++;
++ DEV_STATS_INC(tunnel->dev, rx_errors);
+ goto out;
+ }
+
+@@ -714,8 +714,8 @@ static int ipip6_rcv(struct sk_buff *skb)
+ net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+ &iph->saddr, iph->tos);
+ if (err > 1) {
+- ++tunnel->dev->stats.rx_frame_errors;
+- ++tunnel->dev->stats.rx_errors;
++ DEV_STATS_INC(tunnel->dev, rx_frame_errors);
++ DEV_STATS_INC(tunnel->dev, rx_errors);
+ goto out;
+ }
+ }
+@@ -942,7 +942,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
+ if (!rt) {
+ rt = ip_route_output_flow(tunnel->net, &fl4, NULL);
+ if (IS_ERR(rt)) {
+- dev->stats.tx_carrier_errors++;
++ DEV_STATS_INC(dev, tx_carrier_errors);
+ goto tx_error_icmp;
+ }
+ dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr);
+@@ -950,14 +950,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
+
+ if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+ ip_rt_put(rt);
+- dev->stats.tx_carrier_errors++;
++ DEV_STATS_INC(dev, tx_carrier_errors);
+ goto tx_error_icmp;
+ }
+ tdev = rt->dst.dev;
+
+ if (tdev == dev) {
+ ip_rt_put(rt);
+- dev->stats.collisions++;
++ DEV_STATS_INC(dev, collisions);
+ goto tx_error;
+ }
+
+@@ -970,7 +970,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
+ mtu = dst_mtu(&rt->dst) - t_hlen;
+
+ if (mtu < IPV4_MIN_MTU) {
+- dev->stats.collisions++;
++ DEV_STATS_INC(dev, collisions);
+ ip_rt_put(rt);
+ goto tx_error;
+ }
+@@ -1009,7 +1009,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
+ struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+ if (!new_skb) {
+ ip_rt_put(rt);
+- dev->stats.tx_dropped++;
++ DEV_STATS_INC(dev, tx_dropped);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+@@ -1039,7 +1039,7 @@ tx_error_icmp:
+ dst_link_failure(skb);
+ tx_error:
+ kfree_skb(skb);
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ return NETDEV_TX_OK;
+ }
+
+@@ -1058,7 +1058,7 @@ static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb,
+ return NETDEV_TX_OK;
+ tx_error:
+ kfree_skb(skb);
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ return NETDEV_TX_OK;
+ }
+
+@@ -1087,7 +1087,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
+ return NETDEV_TX_OK;
+
+ tx_err:
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index bc65e5b7195b3..98a64e8d9bdaa 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1063,12 +1063,16 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
+ int dif, int sdif)
+ {
+ unsigned short hnum = ntohs(loc_port);
+- unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
+- unsigned int slot2 = hash2 & udp_table.mask;
+- struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
+- const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
++ unsigned int hash2, slot2;
++ struct udp_hslot *hslot2;
++ __portpair ports;
+ struct sock *sk;
+
++ hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
++ slot2 = hash2 & udp_table.mask;
++ hslot2 = &udp_table.hash2[slot2];
++ ports = INET_COMBINED_PORTS(rmt_port, hnum);
++
+ udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+ if (sk->sk_state == TCP_ESTABLISHED &&
+ inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 687b4c878d4ad..8c8ef87997a8a 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -576,7 +576,7 @@ static struct ieee80211_key *
+ ieee80211_lookup_key(struct ieee80211_sub_if_data *sdata, int link_id,
+ u8 key_idx, bool pairwise, const u8 *mac_addr)
+ {
+- struct ieee80211_local *local = sdata->local;
++ struct ieee80211_local *local __maybe_unused = sdata->local;
+ struct ieee80211_link_data *link = &sdata->deflink;
+ struct ieee80211_key *key;
+
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index a842f2e1c2309..de7b8a4d4bbbb 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -390,6 +390,7 @@ struct ieee80211_mgd_auth_data {
+ bool done, waiting;
+ bool peer_confirmed;
+ bool timeout_started;
++ int link_id;
+
+ u8 ap_addr[ETH_ALEN] __aligned(2);
+
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index dd9ac1f7d2ea6..46f08ec5ed760 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -2258,6 +2258,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+
+ ret = cfg80211_register_netdevice(ndev);
+ if (ret) {
++ ieee80211_if_free(ndev);
+ free_netdev(ndev);
+ return ret;
+ }
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index d8484cd870de5..0125b3e6175b7 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -5033,6 +5033,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_rx_assoc_resp resp = {
+ .uapsd_queues = -1,
+ };
++ u8 ap_mld_addr[ETH_ALEN] __aligned(2);
+ unsigned int link_id;
+
+ sdata_assert_lock(sdata);
+@@ -5199,6 +5200,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
+ resp.uapsd_queues |= ieee80211_ac_to_qos_mask[ac];
+ }
+
++ if (sdata->vif.valid_links) {
++ ether_addr_copy(ap_mld_addr, sdata->vif.cfg.ap_addr);
++ resp.ap_mld_addr = ap_mld_addr;
++ }
++
+ ieee80211_destroy_assoc_data(sdata,
+ status_code == WLAN_STATUS_SUCCESS ?
+ ASSOC_SUCCESS :
+@@ -5208,8 +5214,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
+ resp.len = len;
+ resp.req_ies = ifmgd->assoc_req_ies;
+ resp.req_ies_len = ifmgd->assoc_req_ies_len;
+- if (sdata->vif.valid_links)
+- resp.ap_mld_addr = sdata->vif.cfg.ap_addr;
+ cfg80211_rx_assoc_resp(sdata->dev, &resp);
+ notify_driver:
+ drv_mgd_complete_tx(sdata->local, sdata, &info);
+@@ -6640,6 +6644,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
+ req->ap_mld_addr ?: req->bss->bssid,
+ ETH_ALEN);
+ auth_data->bss = req->bss;
++ auth_data->link_id = req->link_id;
+
+ if (req->auth_data_len >= 4) {
+ if (req->auth_type == NL80211_AUTHTYPE_SAE) {
+@@ -6658,7 +6663,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
+ * removal and re-addition of the STA entry in
+ * ieee80211_prep_connection().
+ */
+- cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss;
++ cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss &&
++ ifmgd->auth_data->link_id == req->link_id;
+
+ if (req->ie && req->ie_len) {
+ memcpy(&auth_data->data[auth_data->data_len],
+@@ -6982,7 +6988,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+
+ /* keep sta info, bssid if matching */
+ match = ether_addr_equal(ifmgd->auth_data->ap_addr,
+- assoc_data->ap_addr);
++ assoc_data->ap_addr) &&
++ ifmgd->auth_data->link_id == req->link_id;
+ ieee80211_destroy_auth_data(sdata, match);
+ }
+
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 874f2a4d831d0..cc10ee1ff8e93 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2973,7 +2973,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
+
+ if (pre_conf_link_id != link_id &&
+ link_id != IEEE80211_LINK_UNSPECIFIED) {
+-#ifdef CPTCFG_MAC80211_VERBOSE_DEBUG
++#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ net_info_ratelimited("%s: dropped frame to %pM with bad link ID request (%d vs. %d)\n",
+ sdata->name, hdr.addr1,
+ pre_conf_link_id, link_id);
+diff --git a/net/mctp/device.c b/net/mctp/device.c
+index 99a3bda8852f8..acb97b2574289 100644
+--- a/net/mctp/device.c
++++ b/net/mctp/device.c
+@@ -429,12 +429,6 @@ static void mctp_unregister(struct net_device *dev)
+ struct mctp_dev *mdev;
+
+ mdev = mctp_dev_get_rtnl(dev);
+- if (mdev && !mctp_known(dev)) {
+- // Sanity check, should match what was set in mctp_register
+- netdev_warn(dev, "%s: BUG mctp_ptr set for unknown type %d",
+- __func__, dev->type);
+- return;
+- }
+ if (!mdev)
+ return;
+
+@@ -451,14 +445,8 @@ static int mctp_register(struct net_device *dev)
+ struct mctp_dev *mdev;
+
+ /* Already registered? */
+- mdev = rtnl_dereference(dev->mctp_ptr);
+-
+- if (mdev) {
+- if (!mctp_known(dev))
+- netdev_warn(dev, "%s: BUG mctp_ptr set for unknown type %d",
+- __func__, dev->type);
++ if (rtnl_dereference(dev->mctp_ptr))
+ return 0;
+- }
+
+ /* only register specific types */
+ if (!mctp_known(dev))
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 51ad557a525b5..b5ae419661b80 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -132,21 +132,21 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
+
+ s = this_cpu_ptr(dest->stats.cpustats);
+ u64_stats_update_begin(&s->syncp);
+- s->cnt.inpkts++;
+- s->cnt.inbytes += skb->len;
++ u64_stats_inc(&s->cnt.inpkts);
++ u64_stats_add(&s->cnt.inbytes, skb->len);
+ u64_stats_update_end(&s->syncp);
+
+ svc = rcu_dereference(dest->svc);
+ s = this_cpu_ptr(svc->stats.cpustats);
+ u64_stats_update_begin(&s->syncp);
+- s->cnt.inpkts++;
+- s->cnt.inbytes += skb->len;
++ u64_stats_inc(&s->cnt.inpkts);
++ u64_stats_add(&s->cnt.inbytes, skb->len);
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(ipvs->tot_stats.cpustats);
+ u64_stats_update_begin(&s->syncp);
+- s->cnt.inpkts++;
+- s->cnt.inbytes += skb->len;
++ u64_stats_inc(&s->cnt.inpkts);
++ u64_stats_add(&s->cnt.inbytes, skb->len);
+ u64_stats_update_end(&s->syncp);
+
+ local_bh_enable();
+@@ -168,21 +168,21 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
+
+ s = this_cpu_ptr(dest->stats.cpustats);
+ u64_stats_update_begin(&s->syncp);
+- s->cnt.outpkts++;
+- s->cnt.outbytes += skb->len;
++ u64_stats_inc(&s->cnt.outpkts);
++ u64_stats_add(&s->cnt.outbytes, skb->len);
+ u64_stats_update_end(&s->syncp);
+
+ svc = rcu_dereference(dest->svc);
+ s = this_cpu_ptr(svc->stats.cpustats);
+ u64_stats_update_begin(&s->syncp);
+- s->cnt.outpkts++;
+- s->cnt.outbytes += skb->len;
++ u64_stats_inc(&s->cnt.outpkts);
++ u64_stats_add(&s->cnt.outbytes, skb->len);
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(ipvs->tot_stats.cpustats);
+ u64_stats_update_begin(&s->syncp);
+- s->cnt.outpkts++;
+- s->cnt.outbytes += skb->len;
++ u64_stats_inc(&s->cnt.outpkts);
++ u64_stats_add(&s->cnt.outbytes, skb->len);
+ u64_stats_update_end(&s->syncp);
+
+ local_bh_enable();
+@@ -200,17 +200,17 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
+
+ s = this_cpu_ptr(cp->dest->stats.cpustats);
+ u64_stats_update_begin(&s->syncp);
+- s->cnt.conns++;
++ u64_stats_inc(&s->cnt.conns);
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(svc->stats.cpustats);
+ u64_stats_update_begin(&s->syncp);
+- s->cnt.conns++;
++ u64_stats_inc(&s->cnt.conns);
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(ipvs->tot_stats.cpustats);
+ u64_stats_update_begin(&s->syncp);
+- s->cnt.conns++;
++ u64_stats_inc(&s->cnt.conns);
+ u64_stats_update_end(&s->syncp);
+
+ local_bh_enable();
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 988222fff9f02..03af6a2ffd567 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2297,11 +2297,11 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
+
+ do {
+ start = u64_stats_fetch_begin_irq(&u->syncp);
+- conns = u->cnt.conns;
+- inpkts = u->cnt.inpkts;
+- outpkts = u->cnt.outpkts;
+- inbytes = u->cnt.inbytes;
+- outbytes = u->cnt.outbytes;
++ conns = u64_stats_read(&u->cnt.conns);
++ inpkts = u64_stats_read(&u->cnt.inpkts);
++ outpkts = u64_stats_read(&u->cnt.outpkts);
++ inbytes = u64_stats_read(&u->cnt.inbytes);
++ outbytes = u64_stats_read(&u->cnt.outbytes);
+ } while (u64_stats_fetch_retry_irq(&u->syncp, start));
+
+ seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n",
+diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
+index 9a1a7af6a186a..f53150d82a92d 100644
+--- a/net/netfilter/ipvs/ip_vs_est.c
++++ b/net/netfilter/ipvs/ip_vs_est.c
+@@ -67,11 +67,11 @@ static void ip_vs_read_cpu_stats(struct ip_vs_kstats *sum,
+ if (add) {
+ do {
+ start = u64_stats_fetch_begin(&s->syncp);
+- conns = s->cnt.conns;
+- inpkts = s->cnt.inpkts;
+- outpkts = s->cnt.outpkts;
+- inbytes = s->cnt.inbytes;
+- outbytes = s->cnt.outbytes;
++ conns = u64_stats_read(&s->cnt.conns);
++ inpkts = u64_stats_read(&s->cnt.inpkts);
++ outpkts = u64_stats_read(&s->cnt.outpkts);
++ inbytes = u64_stats_read(&s->cnt.inbytes);
++ outbytes = u64_stats_read(&s->cnt.outbytes);
+ } while (u64_stats_fetch_retry(&s->syncp, start));
+ sum->conns += conns;
+ sum->inpkts += inpkts;
+@@ -82,11 +82,11 @@ static void ip_vs_read_cpu_stats(struct ip_vs_kstats *sum,
+ add = true;
+ do {
+ start = u64_stats_fetch_begin(&s->syncp);
+- sum->conns = s->cnt.conns;
+- sum->inpkts = s->cnt.inpkts;
+- sum->outpkts = s->cnt.outpkts;
+- sum->inbytes = s->cnt.inbytes;
+- sum->outbytes = s->cnt.outbytes;
++ sum->conns = u64_stats_read(&s->cnt.conns);
++ sum->inpkts = u64_stats_read(&s->cnt.inpkts);
++ sum->outpkts = u64_stats_read(&s->cnt.outpkts);
++ sum->inbytes = u64_stats_read(&s->cnt.inbytes);
++ sum->outbytes = u64_stats_read(&s->cnt.outbytes);
+ } while (u64_stats_fetch_retry(&s->syncp, start));
+ }
+ }
+diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
+index 61e3b05cf02c3..1020d67600a95 100644
+--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
++++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
+@@ -129,6 +129,56 @@ static void icmpv6_error_log(const struct sk_buff *skb,
+ nf_l4proto_log_invalid(skb, state, IPPROTO_ICMPV6, "%s", msg);
+ }
+
++static noinline_for_stack int
++nf_conntrack_icmpv6_redirect(struct nf_conn *tmpl, struct sk_buff *skb,
++ unsigned int dataoff,
++ const struct nf_hook_state *state)
++{
++ u8 hl = ipv6_hdr(skb)->hop_limit;
++ union nf_inet_addr outer_daddr;
++ union {
++ struct nd_opt_hdr nd_opt;
++ struct rd_msg rd_msg;
++ } tmp;
++ const struct nd_opt_hdr *nd_opt;
++ const struct rd_msg *rd_msg;
++
++ rd_msg = skb_header_pointer(skb, dataoff, sizeof(*rd_msg), &tmp.rd_msg);
++ if (!rd_msg) {
++ icmpv6_error_log(skb, state, "short redirect");
++ return -NF_ACCEPT;
++ }
++
++ if (rd_msg->icmph.icmp6_code != 0)
++ return NF_ACCEPT;
++
++ if (hl != 255 || !(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
++ icmpv6_error_log(skb, state, "invalid saddr or hoplimit for redirect");
++ return -NF_ACCEPT;
++ }
++
++ dataoff += sizeof(*rd_msg);
++
++ /* warning: rd_msg no longer usable after this call */
++ nd_opt = skb_header_pointer(skb, dataoff, sizeof(*nd_opt), &tmp.nd_opt);
++ if (!nd_opt || nd_opt->nd_opt_len == 0) {
++ icmpv6_error_log(skb, state, "redirect without options");
++ return -NF_ACCEPT;
++ }
++
++ /* We could call ndisc_parse_options(), but it would need
++ * skb_linearize() and a bit more work.
++ */
++ if (nd_opt->nd_opt_type != ND_OPT_REDIRECT_HDR)
++ return NF_ACCEPT;
++
++ memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr,
++ sizeof(outer_daddr.ip6));
++ dataoff += 8;
++ return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
++ IPPROTO_ICMPV6, &outer_daddr);
++}
++
+ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+@@ -159,6 +209,9 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+ return NF_ACCEPT;
+ }
+
++ if (icmp6h->icmp6_type == NDISC_REDIRECT)
++ return nf_conntrack_icmpv6_redirect(tmpl, skb, dataoff, state);
++
+ /* is not error message ? */
+ if (icmp6h->icmp6_type >= 128)
+ return NF_ACCEPT;
+diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
+index 0fdcdb2c9ae43..4d9b99abe37d6 100644
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -383,12 +383,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
+ const __be32 *addr, const __be32 *mask)
+ {
+ struct flow_action_entry *entry;
+- int i, j;
++ int i;
+
+- for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
++ for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) {
+ entry = flow_action_entry_next(flow_rule);
+ flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+- offset + i, &addr[j], mask);
++ offset + i * sizeof(u32), &addr[i], mask);
+ }
+ }
+
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 8b84869eb2ac7..fa0f1952d7637 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -948,6 +948,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ struct sw_flow_mask mask;
+ struct sk_buff *reply;
+ struct datapath *dp;
++ struct sw_flow_key *key;
+ struct sw_flow_actions *acts;
+ struct sw_flow_match match;
+ u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
+@@ -975,24 +976,26 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ }
+
+ /* Extract key. */
+- ovs_match_init(&match, &new_flow->key, false, &mask);
++ key = kzalloc(sizeof(*key), GFP_KERNEL);
++ if (!key) {
++ error = -ENOMEM;
++ goto err_kfree_key;
++ }
++
++ ovs_match_init(&match, key, false, &mask);
+ error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
+ a[OVS_FLOW_ATTR_MASK], log);
+ if (error)
+ goto err_kfree_flow;
+
++ ovs_flow_mask_key(&new_flow->key, key, true, &mask);
++
+ /* Extract flow identifier. */
+ error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
+- &new_flow->key, log);
++ key, log);
+ if (error)
+ goto err_kfree_flow;
+
+- /* unmasked key is needed to match when ufid is not used. */
+- if (ovs_identifier_is_key(&new_flow->id))
+- match.key = new_flow->id.unmasked_key;
+-
+- ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
+-
+ /* Validate actions. */
+ error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
+ &new_flow->key, &acts, log);
+@@ -1019,7 +1022,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ if (ovs_identifier_is_ufid(&new_flow->id))
+ flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
+ if (!flow)
+- flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
++ flow = ovs_flow_tbl_lookup(&dp->table, key);
+ if (likely(!flow)) {
+ rcu_assign_pointer(new_flow->sf_acts, acts);
+
+@@ -1089,6 +1092,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+
+ if (reply)
+ ovs_notify(&dp_flow_genl_family, reply, info);
++
++ kfree(key);
+ return 0;
+
+ err_unlock_ovs:
+@@ -1098,6 +1103,8 @@ err_kfree_acts:
+ ovs_nla_free_flow_actions(acts);
+ err_kfree_flow:
+ ovs_flow_free(new_flow, false);
++err_kfree_key:
++ kfree(key);
+ error:
+ return error;
+ }
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 4a07ab094a84e..ead5418c126e3 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -2309,7 +2309,7 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size)
+
+ WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
+
+- sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
++ sfa = kmalloc(kmalloc_size_roundup(sizeof(*sfa) + size), GFP_KERNEL);
+ if (!sfa)
+ return ERR_PTR(-ENOMEM);
+
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index 9683617db7049..08c117bc083ec 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -93,7 +93,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
+ *_hard_ack = hard_ack;
+ *_top = top;
+
+- pkt->ack.bufferSpace = htons(8);
++ pkt->ack.bufferSpace = htons(0);
+ pkt->ack.maxSkew = htons(0);
+ pkt->ack.firstPacket = htonl(hard_ack + 1);
+ pkt->ack.previousPacket = htonl(call->ackr_highest_seq);
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 3c3a626459deb..d4e4e94f4f987 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -716,7 +716,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ if (call->tx_total_len != -1 ||
+ call->tx_pending ||
+ call->tx_top != 0)
+- goto error_put;
++ goto out_put_unlock;
+ call->tx_total_len = p.call.tx_total_len;
+ }
+ }
+diff --git a/net/sched/ematch.c b/net/sched/ematch.c
+index 4ce6813618515..5c1235e6076ae 100644
+--- a/net/sched/ematch.c
++++ b/net/sched/ematch.c
+@@ -255,6 +255,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
+ * the value carried.
+ */
+ if (em_hdr->flags & TCF_EM_SIMPLE) {
++ if (em->ops->datalen > 0)
++ goto errout;
+ if (data_len < sizeof(u32))
+ goto errout;
+ em->data = *(u32 *) data;
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index b46a416787ec3..43ebf090029d7 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -84,17 +84,18 @@ static struct ctl_table sctp_table[] = {
+ { /* sentinel */ }
+ };
+
++/* The following index defines are used in sctp_sysctl_net_register().
++ * If you add new items to the sctp_net_table, please ensure that
++ * the index values of these defines hold the same meaning indicated by
++ * their macro names when they appear in sctp_net_table.
++ */
++#define SCTP_RTO_MIN_IDX 0
++#define SCTP_RTO_MAX_IDX 1
++#define SCTP_PF_RETRANS_IDX 2
++#define SCTP_PS_RETRANS_IDX 3
++
+ static struct ctl_table sctp_net_table[] = {
+- {
+- .procname = "rto_initial",
+- .data = &init_net.sctp.rto_initial,
+- .maxlen = sizeof(unsigned int),
+- .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
+- .extra1 = SYSCTL_ONE,
+- .extra2 = &timer_max
+- },
+- {
++ [SCTP_RTO_MIN_IDX] = {
+ .procname = "rto_min",
+ .data = &init_net.sctp.rto_min,
+ .maxlen = sizeof(unsigned int),
+@@ -103,7 +104,7 @@ static struct ctl_table sctp_net_table[] = {
+ .extra1 = SYSCTL_ONE,
+ .extra2 = &init_net.sctp.rto_max
+ },
+- {
++ [SCTP_RTO_MAX_IDX] = {
+ .procname = "rto_max",
+ .data = &init_net.sctp.rto_max,
+ .maxlen = sizeof(unsigned int),
+@@ -112,6 +113,33 @@ static struct ctl_table sctp_net_table[] = {
+ .extra1 = &init_net.sctp.rto_min,
+ .extra2 = &timer_max
+ },
++ [SCTP_PF_RETRANS_IDX] = {
++ .procname = "pf_retrans",
++ .data = &init_net.sctp.pf_retrans,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = &init_net.sctp.ps_retrans,
++ },
++ [SCTP_PS_RETRANS_IDX] = {
++ .procname = "ps_retrans",
++ .data = &init_net.sctp.ps_retrans,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &init_net.sctp.pf_retrans,
++ .extra2 = &ps_retrans_max,
++ },
++ {
++ .procname = "rto_initial",
++ .data = &init_net.sctp.rto_initial,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = SYSCTL_ONE,
++ .extra2 = &timer_max
++ },
+ {
+ .procname = "rto_alpha_exp_divisor",
+ .data = &init_net.sctp.rto_alpha,
+@@ -207,24 +235,6 @@ static struct ctl_table sctp_net_table[] = {
+ .extra1 = SYSCTL_ONE,
+ .extra2 = SYSCTL_INT_MAX,
+ },
+- {
+- .procname = "pf_retrans",
+- .data = &init_net.sctp.pf_retrans,
+- .maxlen = sizeof(int),
+- .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
+- .extra1 = SYSCTL_ZERO,
+- .extra2 = &init_net.sctp.ps_retrans,
+- },
+- {
+- .procname = "ps_retrans",
+- .data = &init_net.sctp.ps_retrans,
+- .maxlen = sizeof(int),
+- .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
+- .extra1 = &init_net.sctp.pf_retrans,
+- .extra2 = &ps_retrans_max,
+- },
+ {
+ .procname = "sndbuf_policy",
+ .data = &init_net.sctp.sndbuf_policy,
+@@ -586,6 +596,11 @@ int sctp_sysctl_net_register(struct net *net)
+ for (i = 0; table[i].data; i++)
+ table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+
++ table[SCTP_RTO_MIN_IDX].extra2 = &net->sctp.rto_max;
++ table[SCTP_RTO_MAX_IDX].extra1 = &net->sctp.rto_min;
++ table[SCTP_PF_RETRANS_IDX].extra2 = &net->sctp.ps_retrans;
++ table[SCTP_PS_RETRANS_IDX].extra1 = &net->sctp.pf_retrans;
++
+ net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
+ if (net->sctp.sysctl_header == NULL) {
+ kfree(table);
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index bcd74dddbe2db..9a5db285d4ae5 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1162,18 +1162,23 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+ return res;
+
+ inlen = svc_getnl(argv);
+- if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
++ if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) {
++ kfree(in_handle->data);
+ return SVC_DENIED;
++ }
+
+ pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
+ in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
+- if (!in_token->pages)
++ if (!in_token->pages) {
++ kfree(in_handle->data);
+ return SVC_DENIED;
++ }
+ in_token->page_base = 0;
+ in_token->page_len = inlen;
+ for (i = 0; i < pages; i++) {
+ in_token->pages[i] = alloc_page(GFP_KERNEL);
+ if (!in_token->pages[i]) {
++ kfree(in_handle->data);
+ gss_free_in_token_pages(in_token);
+ return SVC_DENIED;
+ }
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 993acf38af870..0b0b9f1eed469 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1442,7 +1442,7 @@ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
+ break;
+ default:
+ err = -EAFNOSUPPORT;
+- goto out;
++ goto out_release;
+ }
+ if (err < 0) {
+ dprintk("RPC: can't bind UDP socket (%d)\n", err);
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 44b87e4274b42..b098fde373abf 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -831,7 +831,7 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt,
+ return req;
+
+ out3:
+- kfree(req->rl_sendbuf);
++ rpcrdma_regbuf_free(req->rl_sendbuf);
+ out2:
+ kfree(req);
+ out1:
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 264cf367e2656..9ed9786341259 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -792,7 +792,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
+ struct sk_psock *psock;
+ struct sock *sk_redir;
+ struct tls_rec *rec;
+- bool enospc, policy;
++ bool enospc, policy, redir_ingress;
+ int err = 0, send;
+ u32 delta = 0;
+
+@@ -837,6 +837,7 @@ more_data:
+ }
+ break;
+ case __SK_REDIRECT:
++ redir_ingress = psock->redir_ingress;
+ sk_redir = psock->sk_redir;
+ memcpy(&msg_redir, msg, sizeof(*msg));
+ if (msg->apply_bytes < send)
+@@ -846,7 +847,8 @@ more_data:
+ sk_msg_return_zero(sk, msg, send);
+ msg->sg.size -= send;
+ release_sock(sk);
+- err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
++ err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
++ &msg_redir, send, flags);
+ lock_sock(sk);
+ if (err < 0) {
+ *copied -= sk_msg_free_nocharge(sk, &msg_redir);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index b3545fc680979..f0c2293f1d3b8 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1999,13 +1999,20 @@ restart_locked:
+ unix_state_lock(sk);
+
+ err = 0;
+- if (unix_peer(sk) == other) {
++ if (sk->sk_type == SOCK_SEQPACKET) {
++ /* We are here only when racing with unix_release_sock()
++ * is clearing @other. Never change state to TCP_CLOSE
++ * unlike SOCK_DGRAM wants.
++ */
++ unix_state_unlock(sk);
++ err = -EPIPE;
++ } else if (unix_peer(sk) == other) {
+ unix_peer(sk) = NULL;
+ unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+
++ sk->sk_state = TCP_CLOSE;
+ unix_state_unlock(sk);
+
+- sk->sk_state = TCP_CLOSE;
+ unix_dgram_disconnected(sk, other);
+ sock_put(other);
+ err = -ECONNREFUSED;
+@@ -3738,6 +3745,7 @@ static int __init af_unix_init(void)
+ rc = proto_register(&unix_stream_proto, 1);
+ if (rc != 0) {
+ pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
++ proto_unregister(&unix_dgram_proto);
+ goto out;
+ }
+
+diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
+index 842c94286d316..36eb16a40745d 100644
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -1711,7 +1711,11 @@ static int vmci_transport_dgram_enqueue(
+ if (!dg)
+ return -ENOMEM;
+
+- memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
++ err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
++ if (err) {
++ kfree(dg);
++ return err;
++ }
+
+ dg->dst = vmci_make_handle(remote_addr->svm_cid,
+ remote_addr->svm_port);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 597c522365146..d2321c6833985 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3868,6 +3868,9 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
+ struct cfg80211_chan_def chandef = {};
+ int ret;
+
++ if (!link)
++ goto nla_put_failure;
++
+ if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id))
+ goto nla_put_failure;
+ if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN,
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index c3d950d294329..4f3f31244e8ba 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -4311,8 +4311,10 @@ static int __init regulatory_init_db(void)
+ return -EINVAL;
+
+ err = load_builtin_regdb_keys();
+- if (err)
++ if (err) {
++ platform_device_unregister(reg_pdev);
+ return err;
++ }
+
+ /* We always try to get an update for the static regdomain */
+ err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
+diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
+index ac370e638fa3d..281dc964de8da 100644
+--- a/samples/bpf/xdp1_user.c
++++ b/samples/bpf/xdp1_user.c
+@@ -51,7 +51,7 @@ static void poll_stats(int map_fd, int interval)
+
+ sleep(interval);
+
+- while (bpf_map_get_next_key(map_fd, &key, &key) != -1) {
++ while (bpf_map_get_next_key(map_fd, &key, &key) == 0) {
+ __u64 sum = 0;
+
+ assert(bpf_map_lookup_elem(map_fd, &key, values) == 0);
+diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c
+index 3332ba6bb95fb..67804ecf7ce37 100644
+--- a/samples/bpf/xdp2_kern.c
++++ b/samples/bpf/xdp2_kern.c
+@@ -112,6 +112,10 @@ int xdp_prog1(struct xdp_md *ctx)
+
+ if (ipproto == IPPROTO_UDP) {
+ swap_src_dst_mac(data);
++
++ if (bpf_xdp_store_bytes(ctx, 0, pkt, sizeof(pkt)))
++ return rc;
++
+ rc = XDP_TX;
+ }
+
+diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
+index 9ec93d90e8a5a..4eb7aa11cfbb2 100644
+--- a/samples/vfio-mdev/mdpy-fb.c
++++ b/samples/vfio-mdev/mdpy-fb.c
+@@ -109,7 +109,7 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
+
+ ret = pci_request_regions(pdev, "mdpy-fb");
+ if (ret < 0)
+- return ret;
++ goto err_disable_dev;
+
+ pci_read_config_dword(pdev, MDPY_FORMAT_OFFSET, &format);
+ pci_read_config_dword(pdev, MDPY_WIDTH_OFFSET, &width);
+@@ -191,6 +191,9 @@ err_release_fb:
+ err_release_regions:
+ pci_release_regions(pdev);
+
++err_disable_dev:
++ pci_disable_device(pdev);
++
+ return ret;
+ }
+
+@@ -199,7 +202,10 @@ static void mdpy_fb_remove(struct pci_dev *pdev)
+ struct fb_info *info = pci_get_drvdata(pdev);
+
+ unregister_framebuffer(info);
++ iounmap(info->screen_base);
+ framebuffer_release(info);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
+ }
+
+ static struct pci_device_id mdpy_fb_pci_table[] = {
+diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
+index d766b7d0ffd13..53baa95cb644f 100644
+--- a/security/Kconfig.hardening
++++ b/security/Kconfig.hardening
+@@ -257,6 +257,9 @@ config INIT_ON_FREE_DEFAULT_ON
+
+ config CC_HAS_ZERO_CALL_USED_REGS
+ def_bool $(cc-option,-fzero-call-used-regs=used-gpr)
++ # https://github.com/ClangBuiltLinux/linux/issues/1766
++ # https://github.com/llvm/llvm-project/issues/59242
++ depends on !CC_IS_CLANG || CLANG_VERSION > 150006
+
+ config ZERO_CALL_USED_REGS
+ bool "Enable register zeroing on function exit"
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index d066ccc219e2d..7160e7aa58b94 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -868,8 +868,10 @@ static struct multi_transaction *multi_transaction_new(struct file *file,
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&t->count);
+- if (copy_from_user(t->data, buf, size))
++ if (copy_from_user(t->data, buf, size)) {
++ put_multi_transaction(t);
+ return ERR_PTR(-EFAULT);
++ }
+
+ return t;
+ }
+diff --git a/security/apparmor/label.c b/security/apparmor/label.c
+index 0f36ee9074381..a67c5897ee254 100644
+--- a/security/apparmor/label.c
++++ b/security/apparmor/label.c
+@@ -197,15 +197,18 @@ static bool vec_is_stale(struct aa_profile **vec, int n)
+ return false;
+ }
+
+-static long union_vec_flags(struct aa_profile **vec, int n, long mask)
++static long accum_vec_flags(struct aa_profile **vec, int n)
+ {
+- long u = 0;
++ long u = FLAG_UNCONFINED;
+ int i;
+
+ AA_BUG(!vec);
+
+ for (i = 0; i < n; i++) {
+- u |= vec[i]->label.flags & mask;
++ u |= vec[i]->label.flags & (FLAG_DEBUG1 | FLAG_DEBUG2 |
++ FLAG_STALE);
++ if (!(u & vec[i]->label.flags & FLAG_UNCONFINED))
++ u &= ~FLAG_UNCONFINED;
+ }
+
+ return u;
+@@ -1097,8 +1100,7 @@ static struct aa_label *label_merge_insert(struct aa_label *new,
+ else if (k == b->size)
+ return aa_get_label(b);
+ }
+- new->flags |= union_vec_flags(new->vec, new->size, FLAG_UNCONFINED |
+- FLAG_DEBUG1 | FLAG_DEBUG2);
++ new->flags |= accum_vec_flags(new->vec, new->size);
+ ls = labels_set(new);
+ write_lock_irqsave(&ls->lock, flags);
+ label = __label_insert(labels_set(new), new, false);
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index f56070270c69d..1e2f40db15c58 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -1194,10 +1194,10 @@ static int apparmor_inet_conn_request(const struct sock *sk, struct sk_buff *skb
+ #endif
+
+ /*
+- * The cred blob is a pointer to, not an instance of, an aa_task_ctx.
++ * The cred blob is a pointer to, not an instance of, an aa_label.
+ */
+ struct lsm_blob_sizes apparmor_blob_sizes __lsm_ro_after_init = {
+- .lbs_cred = sizeof(struct aa_task_ctx *),
++ .lbs_cred = sizeof(struct aa_label *),
+ .lbs_file = sizeof(struct aa_file_ctx),
+ .lbs_task = sizeof(struct aa_task_ctx),
+ };
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index 499c0209b6a46..fbdfcef91c616 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -1170,7 +1170,7 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
+
+ if (!name) {
+ /* remove namespace - can only happen if fqname[0] == ':' */
+- mutex_lock_nested(&ns->parent->lock, ns->level);
++ mutex_lock_nested(&ns->parent->lock, ns->parent->level);
+ __aa_bump_ns_revision(ns);
+ __aa_remove_ns(ns);
+ mutex_unlock(&ns->parent->lock);
+diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
+index 43beaad083feb..78700d94b4533 100644
+--- a/security/apparmor/policy_ns.c
++++ b/security/apparmor/policy_ns.c
+@@ -134,7 +134,7 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name)
+ return ns;
+
+ fail_unconfined:
+- kfree_sensitive(ns->base.hname);
++ aa_policy_destroy(&ns->base);
+ fail_ns:
+ kfree_sensitive(ns);
+ return NULL;
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 55d31bac4f35b..9d26bbb901338 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -972,7 +972,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
+ * if not specified use previous version
+ * Mask off everything that is not kernel abi version
+ */
+- if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
++ if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v8)) {
+ audit_iface(NULL, NULL, NULL, "unsupported interface version",
+ e, error);
+ return error;
+diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
+index 8a82a6c7f48a4..f2193c531f4a4 100644
+--- a/security/integrity/digsig.c
++++ b/security/integrity/digsig.c
+@@ -126,6 +126,7 @@ int __init integrity_init_keyring(const unsigned int id)
+ {
+ struct key_restriction *restriction;
+ key_perm_t perm;
++ int ret;
+
+ perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW
+ | KEY_USR_READ | KEY_USR_SEARCH;
+@@ -154,7 +155,10 @@ int __init integrity_init_keyring(const unsigned int id)
+ perm |= KEY_USR_WRITE;
+
+ out:
+- return __integrity_init_keyring(id, perm, restriction);
++ ret = __integrity_init_keyring(id, perm, restriction);
++ if (ret)
++ kfree(restriction);
++ return ret;
+ }
+
+ static int __init integrity_add_key(const unsigned int id, const void *data,
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index a8802b8da946b..2edff7f58c25c 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -398,12 +398,6 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+
+ nentry->lsm[i].type = entry->lsm[i].type;
+ nentry->lsm[i].args_p = entry->lsm[i].args_p;
+- /*
+- * Remove the reference from entry so that the associated
+- * memory will not be freed during a later call to
+- * ima_lsm_free_rule(entry).
+- */
+- entry->lsm[i].args_p = NULL;
+
+ ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
+ nentry->lsm[i].args_p,
+@@ -417,6 +411,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+
+ static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+ {
++ int i;
+ struct ima_rule_entry *nentry;
+
+ nentry = ima_lsm_copy_rule(entry);
+@@ -431,7 +426,8 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+ * references and the entry itself. All other memory references will now
+ * be owned by nentry.
+ */
+- ima_lsm_free_rule(entry);
++ for (i = 0; i < MAX_LSM_RULES; i++)
++ ima_filter_rule_free(entry->lsm[i].rule);
+ kfree(entry);
+
+ return 0;
+@@ -549,6 +545,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ const char *func_data)
+ {
+ int i;
++ bool result = false;
++ struct ima_rule_entry *lsm_rule = rule;
++ bool rule_reinitialized = false;
+
+ if ((rule->flags & IMA_FUNC) &&
+ (rule->func != func && func != POST_SETATTR))
+@@ -610,35 +609,55 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ int rc = 0;
+ u32 osid;
+
+- if (!rule->lsm[i].rule) {
+- if (!rule->lsm[i].args_p)
++ if (!lsm_rule->lsm[i].rule) {
++ if (!lsm_rule->lsm[i].args_p)
+ continue;
+ else
+ return false;
+ }
++
++retry:
+ switch (i) {
+ case LSM_OBJ_USER:
+ case LSM_OBJ_ROLE:
+ case LSM_OBJ_TYPE:
+ security_inode_getsecid(inode, &osid);
+- rc = ima_filter_rule_match(osid, rule->lsm[i].type,
++ rc = ima_filter_rule_match(osid, lsm_rule->lsm[i].type,
+ Audit_equal,
+- rule->lsm[i].rule);
++ lsm_rule->lsm[i].rule);
+ break;
+ case LSM_SUBJ_USER:
+ case LSM_SUBJ_ROLE:
+ case LSM_SUBJ_TYPE:
+- rc = ima_filter_rule_match(secid, rule->lsm[i].type,
++ rc = ima_filter_rule_match(secid, lsm_rule->lsm[i].type,
+ Audit_equal,
+- rule->lsm[i].rule);
++ lsm_rule->lsm[i].rule);
+ break;
+ default:
+ break;
+ }
+- if (!rc)
+- return false;
++
++ if (rc == -ESTALE && !rule_reinitialized) {
++ lsm_rule = ima_lsm_copy_rule(rule);
++ if (lsm_rule) {
++ rule_reinitialized = true;
++ goto retry;
++ }
++ }
++ if (!rc) {
++ result = false;
++ goto out;
++ }
+ }
+- return true;
++ result = true;
++
++out:
++ if (rule_reinitialized) {
++ for (i = 0; i < MAX_LSM_RULES; i++)
++ ima_filter_rule_free(lsm_rule->lsm[i].rule);
++ kfree(lsm_rule);
++ }
++ return result;
+ }
+
+ /*
+diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
+index c25079faa2088..195ac18f09275 100644
+--- a/security/integrity/ima/ima_template.c
++++ b/security/integrity/ima/ima_template.c
+@@ -245,11 +245,11 @@ int template_desc_init_fields(const char *template_fmt,
+ }
+
+ if (fields && num_fields) {
+- *fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL);
++ *fields = kmalloc_array(i, sizeof(**fields), GFP_KERNEL);
+ if (*fields == NULL)
+ return -ENOMEM;
+
+- memcpy(*fields, found_fields, i * sizeof(*fields));
++ memcpy(*fields, found_fields, i * sizeof(**fields));
+ *num_fields = i;
+ }
+
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index e05cfc2e49aeb..1e313982af02a 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -627,7 +627,7 @@ static struct encrypted_key_payload *encrypted_key_alloc(struct key *key,
+ pr_err("encrypted key: instantiation of keys using provided decrypted data is disabled since CONFIG_USER_DECRYPTED_DATA is set to false\n");
+ return ERR_PTR(-EINVAL);
+ }
+- if (strlen(decrypted_data) != decrypted_datalen) {
++ if (strlen(decrypted_data) != decrypted_datalen * 2) {
+ pr_err("encrypted key: decrypted data provided does not match decrypted data length provided\n");
+ return ERR_PTR(-EINVAL);
+ }
+@@ -791,8 +791,8 @@ static int encrypted_init(struct encrypted_key_payload *epayload,
+ ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv);
+ } else if (decrypted_data) {
+ get_random_bytes(epayload->iv, ivsize);
+- memcpy(epayload->decrypted_data, decrypted_data,
+- epayload->decrypted_datalen);
++ ret = hex2bin(epayload->decrypted_data, decrypted_data,
++ epayload->decrypted_datalen);
+ } else {
+ get_random_bytes(epayload->iv, ivsize);
+ get_random_bytes(epayload->decrypted_data, epayload->decrypted_datalen);
+diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
+index de41621f4998e..110a5ab2b46bc 100644
+--- a/security/loadpin/loadpin.c
++++ b/security/loadpin/loadpin.c
+@@ -122,21 +122,11 @@ static void loadpin_sb_free_security(struct super_block *mnt_sb)
+ }
+ }
+
+-static int loadpin_read_file(struct file *file, enum kernel_read_file_id id,
+- bool contents)
++static int loadpin_check(struct file *file, enum kernel_read_file_id id)
+ {
+ struct super_block *load_root;
+ const char *origin = kernel_read_file_id_str(id);
+
+- /*
+- * If we will not know that we'll be seeing the full contents
+- * then we cannot trust a load will be complete and unchanged
+- * off disk. Treat all contents=false hooks as if there were
+- * no associated file struct.
+- */
+- if (!contents)
+- file = NULL;
+-
+ /* If the file id is excluded, ignore the pinning. */
+ if ((unsigned int)id < ARRAY_SIZE(ignore_read_file_id) &&
+ ignore_read_file_id[id]) {
+@@ -192,9 +182,25 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id,
+ return 0;
+ }
+
++static int loadpin_read_file(struct file *file, enum kernel_read_file_id id,
++ bool contents)
++{
++ /*
++ * LoadPin only cares about the _origin_ of a file, not its
++ * contents, so we can ignore the "are full contents available"
++ * argument here.
++ */
++ return loadpin_check(file, id);
++}
++
+ static int loadpin_load_data(enum kernel_load_data_id id, bool contents)
+ {
+- return loadpin_read_file(NULL, (enum kernel_read_file_id) id, contents);
++ /*
++ * LoadPin only cares about the _origin_ of a file, not its
++ * contents, so a NULL file is passed, and we can ignore the
++ * state of "contents".
++ */
++ return loadpin_check(NULL, (enum kernel_read_file_id) id);
+ }
+
+ static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = {
+diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
+index ba095558b6d16..7268304009ada 100644
+--- a/sound/core/memalloc.c
++++ b/sound/core/memalloc.c
+@@ -720,7 +720,6 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
+ struct snd_dma_sg_fallback {
+ size_t count;
+ struct page **pages;
+- dma_addr_t *addrs;
+ };
+
+ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
+@@ -732,38 +731,49 @@ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
+ for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
+ do_free_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
+ kvfree(sgbuf->pages);
+- kvfree(sgbuf->addrs);
+ kfree(sgbuf);
+ }
+
+ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
+ {
+ struct snd_dma_sg_fallback *sgbuf;
+- struct page **pages;
+- size_t i, count;
++ struct page **pagep, *curp;
++ size_t chunk, npages;
++ dma_addr_t addr;
+ void *p;
+ bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
+
+ sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
+ if (!sgbuf)
+ return NULL;
+- count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+- pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
+- if (!pages)
+- goto error;
+- sgbuf->pages = pages;
+- sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
+- if (!sgbuf->addrs)
++ size = PAGE_ALIGN(size);
++ sgbuf->count = size >> PAGE_SHIFT;
++ sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
++ if (!sgbuf->pages)
+ goto error;
+
+- for (i = 0; i < count; sgbuf->count++, i++) {
+- p = do_alloc_pages(dmab->dev.dev, PAGE_SIZE, &sgbuf->addrs[i], wc);
+- if (!p)
+- goto error;
+- sgbuf->pages[i] = virt_to_page(p);
++ pagep = sgbuf->pages;
++ chunk = size;
++ while (size > 0) {
++ chunk = min(size, chunk);
++ p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
++ if (!p) {
++ if (chunk <= PAGE_SIZE)
++ goto error;
++ chunk >>= 1;
++ chunk = PAGE_SIZE << get_order(chunk);
++ continue;
++ }
++
++ size -= chunk;
++ /* fill pages */
++ npages = chunk >> PAGE_SHIFT;
++ curp = virt_to_page(p);
++ while (npages--)
++ *pagep++ = curp++;
+ }
+
+- p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
++ p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
+ if (!p)
+ goto error;
+ dmab->private_data = sgbuf;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 33769ca78cc8f..9238abbfb2d62 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1432,8 +1432,10 @@ static int snd_pcm_do_start(struct snd_pcm_substream *substream,
+ static void snd_pcm_undo_start(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
+ {
+- if (substream->runtime->trigger_master == substream)
++ if (substream->runtime->trigger_master == substream) {
+ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
++ substream->runtime->stop_operating = true;
++ }
+ }
+
+ static void snd_pcm_post_start(struct snd_pcm_substream *substream,
+diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
+index d3bc9e8c407dc..f0d34cf70c3e0 100644
+--- a/sound/drivers/mts64.c
++++ b/sound/drivers/mts64.c
+@@ -815,6 +815,9 @@ static void snd_mts64_interrupt(void *private)
+ u8 status, data;
+ struct snd_rawmidi_substream *substream;
+
++ if (!mts)
++ return;
++
+ spin_lock(&mts->lock);
+ ret = mts64_read(mts->pardev->port);
+ data = ret & 0x00ff;
+diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
+index bb31b7fe867d6..477a5b4b50bcb 100644
+--- a/sound/pci/asihpi/hpioctl.c
++++ b/sound/pci/asihpi/hpioctl.c
+@@ -361,7 +361,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
+ pci_dev->device, pci_dev->subsystem_vendor,
+ pci_dev->subsystem_device, pci_dev->devfn);
+
+- if (pci_enable_device(pci_dev) < 0) {
++ if (pcim_enable_device(pci_dev) < 0) {
+ dev_err(&pci_dev->dev,
+ "pci_enable_device failed, disabling device\n");
+ return -EIO;
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index b4d1e658c5560..edd653ece70d7 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2886,7 +2886,8 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec)
+ snd_hdac_enter_pm(&codec->core);
+ if (codec->patch_ops.suspend)
+ codec->patch_ops.suspend(codec);
+- hda_cleanup_all_streams(codec);
++ if (!codec->no_stream_clean_at_suspend)
++ hda_cleanup_all_streams(codec);
+ state = hda_set_power_state(codec, AC_PWRST_D3);
+ update_power_acct(codec, true);
+ snd_hdac_leave_pm(&codec->core);
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 21edf7a619f07..386dd9d9143f9 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -167,6 +167,7 @@ struct hdmi_spec {
+ struct hdmi_ops ops;
+
+ bool dyn_pin_out;
++ bool static_pcm_mapping;
+ /* hdmi interrupt trigger control flag for Nvidia codec */
+ bool hdmi_intr_trig_ctrl;
+ bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */
+@@ -1525,13 +1526,16 @@ static void update_eld(struct hda_codec *codec,
+ */
+ pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
+
+- if (eld->eld_valid) {
+- hdmi_attach_hda_pcm(spec, per_pin);
+- hdmi_pcm_setup_pin(spec, per_pin);
+- } else {
+- hdmi_pcm_reset_pin(spec, per_pin);
+- hdmi_detach_hda_pcm(spec, per_pin);
++ if (!spec->static_pcm_mapping) {
++ if (eld->eld_valid) {
++ hdmi_attach_hda_pcm(spec, per_pin);
++ hdmi_pcm_setup_pin(spec, per_pin);
++ } else {
++ hdmi_pcm_reset_pin(spec, per_pin);
++ hdmi_detach_hda_pcm(spec, per_pin);
++ }
+ }
++
+ /* if pcm_idx == -1, it means this is in monitor connection event
+ * we can get the correct pcm_idx now.
+ */
+@@ -1738,6 +1742,7 @@ static void silent_stream_enable(struct hda_codec *codec,
+
+ switch (spec->silent_stream_type) {
+ case SILENT_STREAM_KAE:
++ silent_stream_enable_i915(codec, per_pin);
+ silent_stream_set_kae(codec, per_pin, true);
+ break;
+ case SILENT_STREAM_I915:
+@@ -1975,6 +1980,7 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
+ static const struct snd_pci_quirk force_connect_list[] = {
+ SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
+ SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
++ SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+ SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+ SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
+ {}
+@@ -2279,8 +2285,8 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
+ struct hdmi_spec *spec = codec->spec;
+ int idx, pcm_num;
+
+- /* limit the PCM devices to the codec converters */
+- pcm_num = spec->num_cvts;
++ /* limit the PCM devices to the codec converters or available PINs */
++ pcm_num = min(spec->num_cvts, spec->num_pins);
+ codec_dbg(codec, "hdmi: pcm_num set to %d\n", pcm_num);
+
+ for (idx = 0; idx < pcm_num; idx++) {
+@@ -2377,6 +2383,11 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+ struct hdmi_eld *pin_eld = &per_pin->sink_eld;
+
++ if (spec->static_pcm_mapping) {
++ hdmi_attach_hda_pcm(spec, per_pin);
++ hdmi_pcm_setup_pin(spec, per_pin);
++ }
++
+ pin_eld->eld_valid = false;
+ hdmi_present_sense(per_pin, 0);
+ }
+@@ -2878,9 +2889,33 @@ static int i915_hsw_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
+ hda_nid_t pin_nid, int dev_id, u32 stream_tag,
+ int format)
+ {
++ struct hdmi_spec *spec = codec->spec;
++ int pin_idx = pin_id_to_pin_index(codec, pin_nid, dev_id);
++ struct hdmi_spec_per_pin *per_pin;
++ int res;
++
++ if (pin_idx < 0)
++ per_pin = NULL;
++ else
++ per_pin = get_pin(spec, pin_idx);
++
+ haswell_verify_D0(codec, cvt_nid, pin_nid);
+- return hdmi_setup_stream(codec, cvt_nid, pin_nid, dev_id,
+- stream_tag, format);
++
++ if (spec->silent_stream_type == SILENT_STREAM_KAE && per_pin && per_pin->silent_stream) {
++ silent_stream_set_kae(codec, per_pin, false);
++ /* wait for pending transfers in codec to clear */
++ usleep_range(100, 200);
++ }
++
++ res = hdmi_setup_stream(codec, cvt_nid, pin_nid, dev_id,
++ stream_tag, format);
++
++ if (spec->silent_stream_type == SILENT_STREAM_KAE && per_pin && per_pin->silent_stream) {
++ usleep_range(100, 200);
++ silent_stream_set_kae(codec, per_pin, true);
++ }
++
++ return res;
+ }
+
+ /* pin_cvt_fixup ops override for HSW+ and VLV+ */
+@@ -2900,6 +2935,88 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
+ }
+ }
+
++#ifdef CONFIG_PM
++static int i915_adlp_hdmi_suspend(struct hda_codec *codec)
++{
++ struct hdmi_spec *spec = codec->spec;
++ bool silent_streams = false;
++ int pin_idx, res;
++
++ res = generic_hdmi_suspend(codec);
++
++ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
++ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
++
++ if (per_pin->silent_stream) {
++ silent_streams = true;
++ break;
++ }
++ }
++
++ if (silent_streams && spec->silent_stream_type == SILENT_STREAM_KAE) {
++ /*
++ * stream-id should remain programmed when codec goes
++ * to runtime suspend
++ */
++ codec->no_stream_clean_at_suspend = 1;
++
++ /*
++ * the system might go to S3, in which case keep-alive
++ * must be reprogrammed upon resume
++ */
++ codec->forced_resume = 1;
++
++ codec_dbg(codec, "HDMI: KAE active at suspend\n");
++ } else {
++ codec->no_stream_clean_at_suspend = 0;
++ codec->forced_resume = 0;
++ }
++
++ return res;
++}
++
++static int i915_adlp_hdmi_resume(struct hda_codec *codec)
++{
++ struct hdmi_spec *spec = codec->spec;
++ int pin_idx, res;
++
++ res = generic_hdmi_resume(codec);
++
++ /* KAE not programmed at suspend, nothing to do here */
++ if (!codec->no_stream_clean_at_suspend)
++ return res;
++
++ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
++ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
++
++ /*
++ * If system was in suspend with monitor connected,
++ * the codec setting may have been lost. Re-enable
++ * keep-alive.
++ */
++ if (per_pin->silent_stream) {
++ unsigned int param;
++
++ param = snd_hda_codec_read(codec, per_pin->cvt_nid, 0,
++ AC_VERB_GET_CONV, 0);
++ if (!param) {
++ codec_dbg(codec, "HDMI: KAE: restore stream id\n");
++ silent_stream_enable_i915(codec, per_pin);
++ }
++
++ param = snd_hda_codec_read(codec, per_pin->cvt_nid, 0,
++ AC_VERB_GET_DIGI_CONVERT_1, 0);
++ if (!(param & (AC_DIG3_KAE << 16))) {
++ codec_dbg(codec, "HDMI: KAE: restore DIG3_KAE\n");
++ silent_stream_set_kae(codec, per_pin, true);
++ }
++ }
++ }
++
++ return res;
++}
++#endif
++
+ /* precondition and allocation for Intel codecs */
+ static int alloc_intel_hdmi(struct hda_codec *codec)
+ {
+@@ -3030,8 +3147,14 @@ static int patch_i915_adlp_hdmi(struct hda_codec *codec)
+ if (!res) {
+ spec = codec->spec;
+
+- if (spec->silent_stream_type)
++ if (spec->silent_stream_type) {
+ spec->silent_stream_type = SILENT_STREAM_KAE;
++
++#ifdef CONFIG_PM
++ codec->patch_ops.resume = i915_adlp_hdmi_resume;
++ codec->patch_ops.suspend = i915_adlp_hdmi_suspend;
++#endif
++ }
+ }
+
+ return res;
+@@ -4305,6 +4428,8 @@ static int patch_atihdmi(struct hda_codec *codec)
+
+ spec = codec->spec;
+
++ spec->static_pcm_mapping = true;
++
+ spec->ops.pin_get_eld = atihdmi_pin_get_eld;
+ spec->ops.pin_setup_infoframe = atihdmi_pin_setup_infoframe;
+ spec->ops.pin_hbr_setup = atihdmi_pin_hbr_setup;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e5c0363856664..f5f640851fdcb 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9354,6 +9354,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -10960,6 +10962,17 @@ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+ }
+ }
+
++static void alc897_fixup_lenovo_headset_mode(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
++ spec->gen.hp_automute_hook = alc897_hp_automute_hook;
++ }
++}
++
+ static const struct coef_fw alc668_coefs[] = {
+ WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
+ WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
+@@ -11043,6 +11056,8 @@ enum {
+ ALC897_FIXUP_LENOVO_HEADSET_MIC,
+ ALC897_FIXUP_HEADSET_MIC_PIN,
+ ALC897_FIXUP_HP_HSMIC_VERB,
++ ALC897_FIXUP_LENOVO_HEADSET_MODE,
++ ALC897_FIXUP_HEADSET_MIC_PIN2,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -11469,6 +11484,19 @@ static const struct hda_fixup alc662_fixups[] = {
+ { }
+ },
+ },
++ [ALC897_FIXUP_LENOVO_HEADSET_MODE] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc897_fixup_lenovo_headset_mode,
++ },
++ [ALC897_FIXUP_HEADSET_MIC_PIN2] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -11521,6 +11549,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
+diff --git a/sound/soc/amd/acp/acp-platform.c b/sound/soc/amd/acp/acp-platform.c
+index 85a81add4ef9f..447612a7a7627 100644
+--- a/sound/soc/amd/acp/acp-platform.c
++++ b/sound/soc/amd/acp/acp-platform.c
+@@ -184,10 +184,6 @@ static int acp_dma_open(struct snd_soc_component *component, struct snd_pcm_subs
+
+ stream->substream = substream;
+
+- spin_lock_irq(&adata->acp_lock);
+- list_add_tail(&stream->list, &adata->stream_list);
+- spin_unlock_irq(&adata->acp_lock);
+-
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ runtime->hw = acp_pcm_hardware_playback;
+ else
+@@ -203,6 +199,10 @@ static int acp_dma_open(struct snd_soc_component *component, struct snd_pcm_subs
+
+ writel(1, ACP_EXTERNAL_INTR_ENB(adata));
+
++ spin_lock_irq(&adata->acp_lock);
++ list_add_tail(&stream->list, &adata->stream_list);
++ spin_unlock_irq(&adata->acp_lock);
++
+ return ret;
+ }
+
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index d9715bea965e1..1f0b5527c5949 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -213,6 +213,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "TIMI"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
++ }
++ },
+ {}
+ };
+
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
+index 767463e82665c..89059a673cf09 100644
+--- a/sound/soc/codecs/pcm512x.c
++++ b/sound/soc/codecs/pcm512x.c
+@@ -1634,7 +1634,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ if (val > 6) {
+ dev_err(dev, "Invalid pll-in\n");
+ ret = -EINVAL;
+- goto err_clk;
++ goto err_pm;
+ }
+ pcm512x->pll_in = val;
+ }
+@@ -1643,7 +1643,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ if (val > 6) {
+ dev_err(dev, "Invalid pll-out\n");
+ ret = -EINVAL;
+- goto err_clk;
++ goto err_pm;
+ }
+ pcm512x->pll_out = val;
+ }
+@@ -1652,12 +1652,12 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ dev_err(dev,
+ "Error: both pll-in and pll-out, or none\n");
+ ret = -EINVAL;
+- goto err_clk;
++ goto err_pm;
+ }
+ if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) {
+ dev_err(dev, "Error: pll-in == pll-out\n");
+ ret = -EINVAL;
+- goto err_clk;
++ goto err_pm;
+ }
+ }
+ #endif
+diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
+index a2ce52dafea84..cea26f3a02b6a 100644
+--- a/sound/soc/codecs/rt298.c
++++ b/sound/soc/codecs/rt298.c
+@@ -1166,6 +1166,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Geminilake")
+ }
+ },
++ {
++ .ident = "Intel Kabylake R RVP",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
++ }
++ },
+ { }
+ };
+
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
+index ebac6caeb40ad..a230f441559a6 100644
+--- a/sound/soc/codecs/rt5670.c
++++ b/sound/soc/codecs/rt5670.c
+@@ -3311,8 +3311,6 @@ static int rt5670_i2c_probe(struct i2c_client *i2c)
+ if (ret < 0)
+ goto err;
+
+- pm_runtime_put(&i2c->dev);
+-
+ return 0;
+ err:
+ pm_runtime_disable(&i2c->dev);
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index d3cfd3788f2ab..8fe9a75d12357 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -3853,7 +3853,12 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
+ } else {
+ dev_dbg(component->dev, "Jack not detected\n");
+
++ /* Release wm8994->accdet_lock to avoid deadlock:
++ * cancel_delayed_work_sync() takes wm8994->mic_work internal
++ * lock and wm1811_mic_work takes wm8994->accdet_lock */
++ mutex_unlock(&wm8994->accdet_lock);
+ cancel_delayed_work_sync(&wm8994->mic_work);
++ mutex_lock(&wm8994->accdet_lock);
+
+ snd_soc_component_update_bits(component, WM8958_MICBIAS2,
+ WM8958_MICB2_DISCH, WM8958_MICB2_DISCH);
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index c7b10bbfba7ea..0ddb6362fcc52 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -7,7 +7,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+-#include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -1392,7 +1392,7 @@ static int wsa883x_probe(struct sdw_slave *pdev,
+ }
+
+ wsa883x->sd_n = devm_gpiod_get_optional(&pdev->dev, "powerdown",
+- GPIOD_FLAGS_BIT_NONEXCLUSIVE);
++ GPIOD_FLAGS_BIT_NONEXCLUSIVE | GPIOD_OUT_HIGH);
+ if (IS_ERR(wsa883x->sd_n)) {
+ dev_err(&pdev->dev, "Shutdown Control GPIO not found\n");
+ ret = PTR_ERR(wsa883x->sd_n);
+@@ -1411,7 +1411,7 @@ static int wsa883x_probe(struct sdw_slave *pdev,
+ pdev->prop.simple_clk_stop_capable = true;
+ pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop;
+ pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+- gpiod_direction_output(wsa883x->sd_n, 1);
++ gpiod_direction_output(wsa883x->sd_n, 0);
+
+ wsa883x->regmap = devm_regmap_init_sdw(pdev, &wsa883x_regmap_config);
+ if (IS_ERR(wsa883x->regmap)) {
+diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
+index fe7cf972d44ce..5daa824a4ffcf 100644
+--- a/sound/soc/generic/audio-graph-card.c
++++ b/sound/soc/generic/audio-graph-card.c
+@@ -485,8 +485,10 @@ static int __graph_for_each_link(struct asoc_simple_priv *priv,
+ of_node_put(codec_ep);
+ of_node_put(codec_port);
+
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(cpu_ep);
+ return ret;
++ }
+
+ codec_port_old = codec_port;
+ }
+diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
+index d2ca710ac3fa4..ac799de4f7fda 100644
+--- a/sound/soc/intel/Kconfig
++++ b/sound/soc/intel/Kconfig
+@@ -177,7 +177,7 @@ config SND_SOC_INTEL_SKYLAKE_COMMON
+ select SND_HDA_DSP_LOADER
+ select SND_SOC_TOPOLOGY
+ select SND_SOC_INTEL_SST
+- select SND_SOC_HDAC_HDA if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
++ select SND_SOC_HDAC_HDA
+ select SND_SOC_ACPI_INTEL_MATCH
+ select SND_INTEL_DSP_CONFIG
+ help
+diff --git a/sound/soc/intel/avs/boards/rt298.c b/sound/soc/intel/avs/boards/rt298.c
+index b28d36872dcba..58c9d9edecf0a 100644
+--- a/sound/soc/intel/avs/boards/rt298.c
++++ b/sound/soc/intel/avs/boards/rt298.c
+@@ -6,6 +6,7 @@
+ // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+ //
+
++#include <linux/dmi.h>
+ #include <linux/module.h>
+ #include <sound/jack.h>
+ #include <sound/pcm.h>
+@@ -14,6 +15,16 @@
+ #include <sound/soc-acpi.h>
+ #include "../../../codecs/rt298.h"
+
++static const struct dmi_system_id kblr_dmi_table[] = {
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_BOARD_NAME, "Kabylake R DDR4 RVP"),
++ },
++ },
++ {}
++};
++
+ static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
+@@ -96,9 +107,15 @@ avs_rt298_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_param
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ unsigned int clk_freq;
+ int ret;
+
+- ret = snd_soc_dai_set_sysclk(codec_dai, RT298_SCLK_S_PLL, 19200000, SND_SOC_CLOCK_IN);
++ if (dmi_first_match(kblr_dmi_table))
++ clk_freq = 24000000;
++ else
++ clk_freq = 19200000;
++
++ ret = snd_soc_dai_set_sysclk(codec_dai, RT298_SCLK_S_PLL, clk_freq, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(rtd->dev, "Set codec sysclk failed: %d\n", ret);
+
+@@ -139,7 +156,10 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+- dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
++ if (dmi_first_match(kblr_dmi_table))
++ dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
++ else
++ dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ dl->init = avs_rt298_codec_init;
+ dl->be_hw_params_fixup = avs_rt298_be_fixup;
+ dl->ops = &avs_rt298_ops;
+diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
+index bb0719c58ca49..4f93639ce4887 100644
+--- a/sound/soc/intel/avs/core.c
++++ b/sound/soc/intel/avs/core.c
+@@ -440,7 +440,7 @@ static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ if (bus->mlcap)
+ snd_hdac_ext_bus_get_ml_capabilities(bus);
+
+- if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
++ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+diff --git a/sound/soc/intel/avs/ipc.c b/sound/soc/intel/avs/ipc.c
+index 020d85c7520de..306f0dc4eaf58 100644
+--- a/sound/soc/intel/avs/ipc.c
++++ b/sound/soc/intel/avs/ipc.c
+@@ -123,7 +123,10 @@ static void avs_dsp_recovery(struct avs_dev *adev)
+ if (!substream || !substream->runtime)
+ continue;
+
++ /* No need for _irq() as we are in nonatomic context. */
++ snd_pcm_stream_lock(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
++ snd_pcm_stream_unlock(substream);
+ }
+ }
+ }
+@@ -192,7 +195,8 @@ static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header)
+ /* update size in case of LARGE_CONFIG_GET */
+ if (msg.msg_target == AVS_MOD_MSG &&
+ msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET)
+- ipc->rx.size = msg.ext.large_config.data_off_size;
++ ipc->rx.size = min_t(u32, AVS_MAILBOX_SIZE,
++ msg.ext.large_config.data_off_size);
+
+ memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size);
+ trace_avs_msg_payload(ipc->rx.data, ipc->rx.size);
+diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
+index 70713e4b07dc1..773e5d1d87d46 100644
+--- a/sound/soc/intel/boards/sof_es8336.c
++++ b/sound/soc/intel/boards/sof_es8336.c
+@@ -783,7 +783,7 @@ static int sof_es8336_remove(struct platform_device *pdev)
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
+
+- cancel_delayed_work(&priv->pcm_pop_work);
++ cancel_delayed_work_sync(&priv->pcm_pop_work);
+ gpiod_put(priv->gpio_speakers);
+ device_remove_software_node(priv->codec_dev);
+ put_device(priv->codec_dev);
+diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
+index 3312b57e3c0cb..7f058acd221f0 100644
+--- a/sound/soc/intel/skylake/skl.c
++++ b/sound/soc/intel/skylake/skl.c
+@@ -1116,7 +1116,10 @@ static void skl_shutdown(struct pci_dev *pci)
+ if (!skl->init_done)
+ return;
+
+- snd_hdac_stop_streams_and_chip(bus);
++ snd_hdac_stop_streams(bus);
++ snd_hdac_ext_bus_link_power_down_all(bus);
++ skl_dsp_sleep(skl->dsp);
++
+ list_for_each_entry(s, &bus->stream_list, list) {
+ stream = stream_to_hdac_ext_stream(s);
+ snd_hdac_ext_stream_decouple(bus, stream, false);
+diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
+index d884bb7c0fc74..1c28b41e43112 100644
+--- a/sound/soc/mediatek/common/mtk-btcvsd.c
++++ b/sound/soc/mediatek/common/mtk-btcvsd.c
+@@ -1038,11 +1038,9 @@ static int mtk_pcm_btcvsd_copy(struct snd_soc_component *component,
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+- mtk_btcvsd_snd_write(bt, buf, count);
++ return mtk_btcvsd_snd_write(bt, buf, count);
+ else
+- mtk_btcvsd_snd_read(bt, buf, count);
+-
+- return 0;
++ return mtk_btcvsd_snd_read(bt, buf, count);
+ }
+
+ /* kcontrol */
+diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+index dcaeeeb8aac70..bc155dd937e0b 100644
+--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
++++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+@@ -1070,16 +1070,6 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+
+ afe->dev = &pdev->dev;
+
+- irq_id = platform_get_irq(pdev, 0);
+- if (irq_id <= 0)
+- return irq_id < 0 ? irq_id : -ENXIO;
+- ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
+- 0, "Afe_ISR_Handle", (void *)afe);
+- if (ret) {
+- dev_err(afe->dev, "could not request_irq\n");
+- return ret;
+- }
+-
+ afe->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(afe->base_addr))
+ return PTR_ERR(afe->base_addr);
+@@ -1185,6 +1175,16 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_cleanup_components;
+
++ irq_id = platform_get_irq(pdev, 0);
++ if (irq_id <= 0)
++ return irq_id < 0 ? irq_id : -ENXIO;
++ ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
++ 0, "Afe_ISR_Handle", (void *)afe);
++ if (ret) {
++ dev_err(afe->dev, "could not request_irq\n");
++ goto err_pm_disable;
++ }
++
+ dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n");
+ return 0;
+
+diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+index 12f40c81b101e..f803f121659de 100644
+--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
++++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+@@ -200,14 +200,16 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
+ if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+ mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
+ if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+ mt8173_rt5650_rt5514_codec_conf[0].dlc.of_node =
+ mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node;
+@@ -216,6 +218,7 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+
++out:
+ of_node_put(platform_node);
+ return ret;
+ }
+diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
+index a860852236779..48c14be5e3db7 100644
+--- a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
++++ b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
+@@ -677,8 +677,10 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
+ }
+
+ card = (struct snd_soc_card *)of_device_get_match_data(&pdev->dev);
+- if (!card)
++ if (!card) {
++ of_node_put(platform_node);
+ return -EINVAL;
++ }
+ card->dev = &pdev->dev;
+
+ ec_codec = of_parse_phandle(pdev->dev.of_node, "mediatek,ec-codec", 0);
+@@ -767,8 +769,10 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+- if (!priv)
+- return -ENOMEM;
++ if (!priv) {
++ ret = -ENOMEM;
++ goto out;
++ }
+
+ snd_soc_card_set_drvdata(card, priv);
+
+@@ -776,7 +780,8 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
+ if (IS_ERR(priv->pinctrl)) {
+ dev_err(&pdev->dev, "%s devm_pinctrl_get failed\n",
+ __func__);
+- return PTR_ERR(priv->pinctrl);
++ ret = PTR_ERR(priv->pinctrl);
++ goto out;
+ }
+
+ for (i = 0; i < PIN_STATE_MAX; i++) {
+@@ -809,6 +814,7 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+
++out:
+ of_node_put(platform_node);
+ of_node_put(ec_codec);
+ of_node_put(hdmi_codec);
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c
+index cfca6bdee8345..90ec0d0a83927 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c
+@@ -192,7 +192,7 @@ static int mt8186_mt6366_da7219_max98357_hdmi_init(struct snd_soc_pcm_runtime *r
+ struct mt8186_mt6366_da7219_max98357_priv *priv = soc_card_data->mach_priv;
+ int ret;
+
+- ret = mt8186_dai_i2s_set_share(afe, "I2S3", "I2S2");
++ ret = mt8186_dai_i2s_set_share(afe, "I2S2", "I2S3");
+ if (ret) {
+ dev_err(rtd->dev, "Failed to set up shared clocks\n");
+ return ret;
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+index 2414c5b77233c..60fa55d0c91f0 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+@@ -168,7 +168,7 @@ static int mt8186_mt6366_rt1019_rt5682s_hdmi_init(struct snd_soc_pcm_runtime *rt
+ struct mt8186_mt6366_rt1019_rt5682s_priv *priv = soc_card_data->mach_priv;
+ int ret;
+
+- ret = mt8186_dai_i2s_set_share(afe, "I2S3", "I2S2");
++ ret = mt8186_dai_i2s_set_share(afe, "I2S2", "I2S3");
+ if (ret) {
+ dev_err(rtd->dev, "Failed to set up shared clocks\n");
+ return ret;
+diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
+index 5d520e18e512f..99b245e3079a2 100644
+--- a/sound/soc/pxa/mmp-pcm.c
++++ b/sound/soc/pxa/mmp-pcm.c
+@@ -98,7 +98,7 @@ static bool filter(struct dma_chan *chan, void *param)
+
+ devname = kasprintf(GFP_KERNEL, "%s.%d", dma_data->dma_res->name,
+ dma_data->ssp_id);
+- if ((strcmp(dev_name(chan->device->dev), devname) == 0) &&
++ if (devname && (strcmp(dev_name(chan->device->dev), devname) == 0) &&
+ (chan->chan_id == dma_data->dma_res->start)) {
+ found = true;
+ }
+diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
+index 8c7398bc1ca89..96a6d4731e6fd 100644
+--- a/sound/soc/qcom/Kconfig
++++ b/sound/soc/qcom/Kconfig
+@@ -2,6 +2,7 @@
+ menuconfig SND_SOC_QCOM
+ tristate "ASoC support for QCOM platforms"
+ depends on ARCH_QCOM || COMPILE_TEST
++ imply SND_SOC_QCOM_COMMON
+ help
+ Say Y or M if you want to add support to use audio devices
+ in Qualcomm Technologies SOC-based platforms.
+@@ -59,13 +60,14 @@ config SND_SOC_STORM
+ config SND_SOC_APQ8016_SBC
+ tristate "SoC Audio support for APQ8016 SBC platforms"
+ select SND_SOC_LPASS_APQ8016
+- select SND_SOC_QCOM_COMMON
++ depends on SND_SOC_QCOM_COMMON
+ help
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8016 SOC-based systems.
+ Say Y if you want to use audio devices on MI2S.
+
+ config SND_SOC_QCOM_COMMON
++ depends on SOUNDWIRE
+ tristate
+
+ config SND_SOC_QDSP6_COMMON
+@@ -142,7 +144,7 @@ config SND_SOC_MSM8996
+ depends on QCOM_APR
+ depends on COMMON_CLK
+ select SND_SOC_QDSP6
+- select SND_SOC_QCOM_COMMON
++ depends on SND_SOC_QCOM_COMMON
+ help
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8096 SoC-based systems.
+@@ -153,7 +155,7 @@ config SND_SOC_SDM845
+ depends on QCOM_APR && I2C && SOUNDWIRE
+ depends on COMMON_CLK
+ select SND_SOC_QDSP6
+- select SND_SOC_QCOM_COMMON
++ depends on SND_SOC_QCOM_COMMON
+ select SND_SOC_RT5663
+ select SND_SOC_MAX98927
+ imply SND_SOC_CROS_EC_CODEC
+@@ -167,7 +169,7 @@ config SND_SOC_SM8250
+ depends on QCOM_APR && SOUNDWIRE
+ depends on COMMON_CLK
+ select SND_SOC_QDSP6
+- select SND_SOC_QCOM_COMMON
++ depends on SND_SOC_QCOM_COMMON
+ help
+ To add support for audio on Qualcomm Technologies Inc.
+ SM8250 SoC-based systems.
+@@ -178,7 +180,7 @@ config SND_SOC_SC8280XP
+ depends on QCOM_APR && SOUNDWIRE
+ depends on COMMON_CLK
+ select SND_SOC_QDSP6
+- select SND_SOC_QCOM_COMMON
++ depends on SND_SOC_QCOM_COMMON
+ help
+ To add support for audio on Qualcomm Technologies Inc.
+ SC8280XP SoC-based systems.
+@@ -188,7 +190,7 @@ config SND_SOC_SC7180
+ tristate "SoC Machine driver for SC7180 boards"
+ depends on I2C && GPIOLIB
+ depends on SOUNDWIRE || SOUNDWIRE=n
+- select SND_SOC_QCOM_COMMON
++ depends on SND_SOC_QCOM_COMMON
+ select SND_SOC_LPASS_SC7180
+ select SND_SOC_MAX98357A
+ select SND_SOC_RT5682_I2C
+@@ -202,7 +204,7 @@ config SND_SOC_SC7180
+ config SND_SOC_SC7280
+ tristate "SoC Machine driver for SC7280 boards"
+ depends on I2C && SOUNDWIRE
+- select SND_SOC_QCOM_COMMON
++ depends on SND_SOC_QCOM_COMMON
+ select SND_SOC_LPASS_SC7280
+ select SND_SOC_MAX98357A
+ select SND_SOC_WCD938X_SDW
+diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
+index 69dd3b504e209..49c74c1662a3f 100644
+--- a/sound/soc/qcom/common.c
++++ b/sound/soc/qcom/common.c
+@@ -180,7 +180,6 @@ err_put_np:
+ }
+ EXPORT_SYMBOL_GPL(qcom_snd_parse_of);
+
+-#if IS_ENABLED(CONFIG_SOUNDWIRE)
+ int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+ struct sdw_stream_runtime *sruntime,
+ bool *stream_prepared)
+@@ -294,7 +293,6 @@ int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
+-#endif
+
+ int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_jack *jack, bool *jack_setup)
+diff --git a/sound/soc/qcom/common.h b/sound/soc/qcom/common.h
+index c5472a642de08..3ef5bb6d12df7 100644
+--- a/sound/soc/qcom/common.h
++++ b/sound/soc/qcom/common.h
+@@ -11,7 +11,6 @@ int qcom_snd_parse_of(struct snd_soc_card *card);
+ int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_jack *jack, bool *jack_setup);
+
+-#if IS_ENABLED(CONFIG_SOUNDWIRE)
+ int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+ struct sdw_stream_runtime *runtime,
+ bool *stream_prepared);
+@@ -21,26 +20,4 @@ int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+ int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+ struct sdw_stream_runtime *sruntime,
+ bool *stream_prepared);
+-#else
+-static inline int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+- struct sdw_stream_runtime *runtime,
+- bool *stream_prepared)
+-{
+- return -ENOTSUPP;
+-}
+-
+-static inline int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+- struct snd_pcm_hw_params *params,
+- struct sdw_stream_runtime **psruntime)
+-{
+- return -ENOTSUPP;
+-}
+-
+-static inline int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+- struct sdw_stream_runtime *sruntime,
+- bool *stream_prepared)
+-{
+- return -ENOTSUPP;
+-}
+-#endif
+ #endif
+diff --git a/sound/soc/qcom/lpass-sc7180.c b/sound/soc/qcom/lpass-sc7180.c
+index 77a556b27cf09..24a1c121cb2e9 100644
+--- a/sound/soc/qcom/lpass-sc7180.c
++++ b/sound/soc/qcom/lpass-sc7180.c
+@@ -131,6 +131,9 @@ static int sc7180_lpass_init(struct platform_device *pdev)
+
+ drvdata->clks = devm_kcalloc(dev, variant->num_clks,
+ sizeof(*drvdata->clks), GFP_KERNEL);
++ if (!drvdata->clks)
++ return -ENOMEM;
++
+ drvdata->num_clks = variant->num_clks;
+
+ for (i = 0; i < drvdata->num_clks; i++)
+diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
+index a7549f8272359..5b1e47bdc376b 100644
+--- a/sound/soc/rockchip/rockchip_pdm.c
++++ b/sound/soc/rockchip/rockchip_pdm.c
+@@ -431,6 +431,7 @@ static int rockchip_pdm_runtime_resume(struct device *dev)
+
+ ret = clk_prepare_enable(pdm->hclk);
+ if (ret) {
++ clk_disable_unprepare(pdm->clk);
+ dev_err(pdm->dev, "hclock enable failed %d\n", ret);
+ return ret;
+ }
+diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
+index 8bef572d3cbc1..5b4f004575879 100644
+--- a/sound/soc/rockchip/rockchip_spdif.c
++++ b/sound/soc/rockchip/rockchip_spdif.c
+@@ -88,6 +88,7 @@ static int __maybe_unused rk_spdif_runtime_resume(struct device *dev)
+
+ ret = clk_prepare_enable(spdif->hclk);
+ if (ret) {
++ clk_disable_unprepare(spdif->mclk);
+ dev_err(spdif->dev, "hclk clock enable failed %d\n", ret);
+ return ret;
+ }
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 310cd6fb0038a..4aaf0784940b5 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1673,6 +1673,13 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, bool keep_pending)
+ stop_urbs(ep, false, keep_pending);
+ if (ep->clock_ref)
+ atomic_dec(&ep->clock_ref->locked);
++
++ if (ep->chip->quirk_flags & QUIRK_FLAG_FORCE_IFACE_RESET &&
++ usb_pipeout(ep->pipe)) {
++ ep->need_prepare = true;
++ if (ep->iface_ref)
++ ep->iface_ref->need_setup = true;
++ }
+ }
+ }
+
+diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
+index 59faa5a9a7141..b67617b68e509 100644
+--- a/sound/usb/line6/driver.c
++++ b/sound/usb/line6/driver.c
+@@ -304,7 +304,8 @@ static void line6_data_received(struct urb *urb)
+ for (;;) {
+ done =
+ line6_midibuf_read(mb, line6->buffer_message,
+- LINE6_MIDI_MESSAGE_MAXLEN);
++ LINE6_MIDI_MESSAGE_MAXLEN,
++ LINE6_MIDIBUF_READ_RX);
+
+ if (done <= 0)
+ break;
+diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
+index ba0e2b7e8fe19..0838632c788e4 100644
+--- a/sound/usb/line6/midi.c
++++ b/sound/usb/line6/midi.c
+@@ -44,7 +44,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
+ int req, done;
+
+ for (;;) {
+- req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size);
++ req = min3(line6_midibuf_bytes_free(mb), line6->max_packet_size,
++ LINE6_FALLBACK_MAXPACKETSIZE);
+ done = snd_rawmidi_transmit_peek(substream, chunk, req);
+
+ if (done == 0)
+@@ -56,7 +57,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
+
+ for (;;) {
+ done = line6_midibuf_read(mb, chunk,
+- LINE6_FALLBACK_MAXPACKETSIZE);
++ LINE6_FALLBACK_MAXPACKETSIZE,
++ LINE6_MIDIBUF_READ_TX);
+
+ if (done == 0)
+ break;
+diff --git a/sound/usb/line6/midibuf.c b/sound/usb/line6/midibuf.c
+index 6a70463f82c4e..e7f830f7526c9 100644
+--- a/sound/usb/line6/midibuf.c
++++ b/sound/usb/line6/midibuf.c
+@@ -9,6 +9,7 @@
+
+ #include "midibuf.h"
+
++
+ static int midibuf_message_length(unsigned char code)
+ {
+ int message_length;
+@@ -20,12 +21,7 @@ static int midibuf_message_length(unsigned char code)
+
+ message_length = length[(code >> 4) - 8];
+ } else {
+- /*
+- Note that according to the MIDI specification 0xf2 is
+- the "Song Position Pointer", but this is used by Line 6
+- to send sysex messages to the host.
+- */
+- static const int length[] = { -1, 2, -1, 2, -1, -1, 1, 1, 1, 1,
++ static const int length[] = { -1, 2, 2, 2, -1, -1, 1, 1, 1, -1,
+ 1, 1, 1, -1, 1, 1
+ };
+ message_length = length[code & 0x0f];
+@@ -125,7 +121,7 @@ int line6_midibuf_write(struct midi_buffer *this, unsigned char *data,
+ }
+
+ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
+- int length)
++ int length, int read_type)
+ {
+ int bytes_used;
+ int length1, length2;
+@@ -148,9 +144,22 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
+
+ length1 = this->size - this->pos_read;
+
+- /* check MIDI command length */
+ command = this->buf[this->pos_read];
++ /*
++ PODxt always has status byte lower nibble set to 0010,
++ when it means to send 0000, so we correct if here so
++ that control/program changes come on channel 1 and
++ sysex message status byte is correct
++ */
++ if (read_type == LINE6_MIDIBUF_READ_RX) {
++ if (command == 0xb2 || command == 0xc2 || command == 0xf2) {
++ unsigned char fixed = command & 0xf0;
++ this->buf[this->pos_read] = fixed;
++ command = fixed;
++ }
++ }
+
++ /* check MIDI command length */
+ if (command & 0x80) {
+ midi_length = midibuf_message_length(command);
+ this->command_prev = command;
+diff --git a/sound/usb/line6/midibuf.h b/sound/usb/line6/midibuf.h
+index 124a8f9f7e96c..542e8d836f87d 100644
+--- a/sound/usb/line6/midibuf.h
++++ b/sound/usb/line6/midibuf.h
+@@ -8,6 +8,9 @@
+ #ifndef MIDIBUF_H
+ #define MIDIBUF_H
+
++#define LINE6_MIDIBUF_READ_TX 0
++#define LINE6_MIDIBUF_READ_RX 1
++
+ struct midi_buffer {
+ unsigned char *buf;
+ int size;
+@@ -23,7 +26,7 @@ extern void line6_midibuf_destroy(struct midi_buffer *mb);
+ extern int line6_midibuf_ignore(struct midi_buffer *mb, int length);
+ extern int line6_midibuf_init(struct midi_buffer *mb, int size, int split);
+ extern int line6_midibuf_read(struct midi_buffer *mb, unsigned char *data,
+- int length);
++ int length, int read_type);
+ extern void line6_midibuf_reset(struct midi_buffer *mb);
+ extern int line6_midibuf_write(struct midi_buffer *mb, unsigned char *data,
+ int length);
+diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
+index cd41aa7f03851..d173971e5f029 100644
+--- a/sound/usb/line6/pod.c
++++ b/sound/usb/line6/pod.c
+@@ -159,8 +159,9 @@ static struct line6_pcm_properties pod_pcm_properties = {
+ .bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
+ };
+
++
+ static const char pod_version_header[] = {
+- 0xf2, 0x7e, 0x7f, 0x06, 0x02
++ 0xf0, 0x7e, 0x7f, 0x06, 0x02
+ };
+
+ static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code,
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 8ed165f036a01..9557bd4d1bbca 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -604,6 +604,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_usb_substream *subs = runtime->private_data;
+ struct snd_usb_audio *chip = subs->stream->chip;
++ int retry = 0;
+ int ret;
+
+ ret = snd_usb_lock_shutdown(chip);
+@@ -614,6 +615,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ goto unlock;
+ }
+
++ again:
+ if (subs->sync_endpoint) {
+ ret = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
+ if (ret < 0)
+@@ -638,9 +640,16 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+
+ subs->lowlatency_playback = lowlatency_playback_available(runtime, subs);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+- !subs->lowlatency_playback)
++ !subs->lowlatency_playback) {
+ ret = start_endpoints(subs);
+-
++ /* if XRUN happens at starting streams (possibly with implicit
++ * fb case), restart again, but only try once.
++ */
++ if (ret == -EPIPE && !retry++) {
++ sync_pending_stops(subs);
++ goto again;
++ }
++ }
+ unlock:
+ snd_usb_unlock_shutdown(chip);
+ return ret;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 874fcf245747f..271884e350035 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -76,6 +76,8 @@
+ { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f0a) },
+ /* E-Mu 0204 USB */
+ { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f19) },
++/* Ktmicro Usb_audio device */
++{ USB_DEVICE_VENDOR_SPEC(0x31b2, 0x0011) },
+
+ /*
+ * Creative Technology, Ltd Live! Cam Sync HD [VF0770]
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 0f4dd3503a6a9..58b37bfc885cb 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2044,6 +2044,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ DEVICE_FLG(0x0644, 0x804a, /* TEAC UD-301 */
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY |
+ QUIRK_FLAG_IFACE_DELAY),
++ DEVICE_FLG(0x0644, 0x805f, /* TEAC Model 12 */
++ QUIRK_FLAG_FORCE_IFACE_RESET),
+ DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index e97141ef730ad..2aba508a48312 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -172,6 +172,9 @@ extern bool snd_usb_skip_validation;
+ * Don't apply implicit feedback sync mode
+ * QUIRK_FLAG_IFACE_SKIP_CLOSE
+ * Don't closed interface during setting sample rate
++ * QUIRK_FLAG_FORCE_IFACE_RESET
++ * Force an interface reset whenever stopping & restarting a stream
++ * (e.g. after xrun)
+ */
+
+ #define QUIRK_FLAG_GET_SAMPLE_RATE (1U << 0)
+@@ -194,5 +197,6 @@ extern bool snd_usb_skip_validation;
+ #define QUIRK_FLAG_GENERIC_IMPLICIT_FB (1U << 17)
+ #define QUIRK_FLAG_SKIP_IMPLICIT_FB (1U << 18)
+ #define QUIRK_FLAG_IFACE_SKIP_CLOSE (1U << 19)
++#define QUIRK_FLAG_FORCE_IFACE_RESET (1U << 20)
+
+ #endif /* __USBAUDIO_H */
+diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
+index 0cdb4f7115101..e7a11cff7245a 100644
+--- a/tools/bpf/bpftool/common.c
++++ b/tools/bpf/bpftool/common.c
+@@ -499,6 +499,7 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
+ if (err) {
+ p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
+ pinned_info.id, path, strerror(errno));
++ free(path);
+ goto out_close;
+ }
+
+diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
+index 9c50beabdd145..fddc05c667b5d 100644
+--- a/tools/lib/bpf/bpf.h
++++ b/tools/lib/bpf/bpf.h
+@@ -393,8 +393,15 @@ LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
+ __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
+ __u64 *probe_offset, __u64 *probe_addr);
+
++#ifdef __cplusplus
++/* forward-declaring enums in C++ isn't compatible with pure C enums, so
++ * instead define bpf_enable_stats() as accepting int as an input
++ */
++LIBBPF_API int bpf_enable_stats(int type);
++#else
+ enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
+ LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
++#endif
+
+ struct bpf_prog_bind_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
+index d88647da2c7fc..675a0df5c840f 100644
+--- a/tools/lib/bpf/btf.c
++++ b/tools/lib/bpf/btf.c
+@@ -3887,14 +3887,14 @@ static inline __u16 btf_fwd_kind(struct btf_type *t)
+ }
+
+ /* Check if given two types are identical ARRAY definitions */
+-static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
++static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
+ {
+ struct btf_type *t1, *t2;
+
+ t1 = btf_type_by_id(d->btf, id1);
+ t2 = btf_type_by_id(d->btf, id2);
+ if (!btf_is_array(t1) || !btf_is_array(t2))
+- return 0;
++ return false;
+
+ return btf_equal_array(t1, t2);
+ }
+@@ -3918,7 +3918,9 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
+ m1 = btf_members(t1);
+ m2 = btf_members(t2);
+ for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
+- if (m1->type != m2->type)
++ if (m1->type != m2->type &&
++ !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
++ !btf_dedup_identical_structs(d, m1->type, m2->type))
+ return false;
+ }
+ return true;
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 4221f73a74d01..0b470169729e6 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -219,6 +219,17 @@ static int btf_dump_resize(struct btf_dump *d)
+ return 0;
+ }
+
++static void btf_dump_free_names(struct hashmap *map)
++{
++ size_t bkt;
++ struct hashmap_entry *cur;
++
++ hashmap__for_each_entry(map, cur, bkt)
++ free((void *)cur->key);
++
++ hashmap__free(map);
++}
++
+ void btf_dump__free(struct btf_dump *d)
+ {
+ int i;
+@@ -237,8 +248,8 @@ void btf_dump__free(struct btf_dump *d)
+ free(d->cached_names);
+ free(d->emit_queue);
+ free(d->decl_stack);
+- hashmap__free(d->type_names);
+- hashmap__free(d->ident_names);
++ btf_dump_free_names(d->type_names);
++ btf_dump_free_names(d->ident_names);
+
+ free(d);
+ }
+@@ -1520,11 +1531,23 @@ static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id,
+ static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
+ const char *orig_name)
+ {
++ char *old_name, *new_name;
+ size_t dup_cnt = 0;
++ int err;
++
++ new_name = strdup(orig_name);
++ if (!new_name)
++ return 1;
+
+ hashmap__find(name_map, orig_name, (void **)&dup_cnt);
+ dup_cnt++;
+- hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL);
++
++ err = hashmap__set(name_map, new_name, (void *)dup_cnt,
++ (const void **)&old_name, NULL);
++ if (err)
++ free(new_name);
++
++ free(old_name);
+
+ return dup_cnt;
+ }
+@@ -1963,7 +1986,7 @@ static int btf_dump_struct_data(struct btf_dump *d,
+ {
+ const struct btf_member *m = btf_members(t);
+ __u16 n = btf_vlen(t);
+- int i, err;
++ int i, err = 0;
+
+ /* note that we increment depth before calling btf_dump_print() below;
+ * this is intentional. btf_dump_data_newline() will not print a
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 91b7106a4a735..b9a29d1053765 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -597,7 +597,7 @@ struct elf_state {
+ size_t shstrndx; /* section index for section name strings */
+ size_t strtabidx;
+ struct elf_sec_desc *secs;
+- int sec_cnt;
++ size_t sec_cnt;
+ int btf_maps_shndx;
+ __u32 btf_maps_sec_btf_id;
+ int text_shndx;
+@@ -1408,6 +1408,10 @@ static int bpf_object__check_endianness(struct bpf_object *obj)
+ static int
+ bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
+ {
++ if (!data) {
++ pr_warn("invalid license section in %s\n", obj->path);
++ return -LIBBPF_ERRNO__FORMAT;
++ }
+ /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
+ * go over allowed ELF data section buffer
+ */
+@@ -1421,7 +1425,7 @@ bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
+ {
+ __u32 kver;
+
+- if (size != sizeof(kver)) {
++ if (!data || size != sizeof(kver)) {
+ pr_warn("invalid kver section in %s\n", obj->path);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+@@ -3312,10 +3316,15 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
+ Elf64_Shdr *sh;
+
+ /* ELF section indices are 0-based, but sec #0 is special "invalid"
+- * section. e_shnum does include sec #0, so e_shnum is the necessary
+- * size of an array to keep all the sections.
++ * section. Since section count retrieved by elf_getshdrnum() does
++ * include sec #0, it is already the necessary size of an array to keep
++ * all the sections.
+ */
+- obj->efile.sec_cnt = obj->efile.ehdr->e_shnum;
++ if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
++ pr_warn("elf: failed to get the number of sections for %s: %s\n",
++ obj->path, elf_errmsg(-1));
++ return -LIBBPF_ERRNO__FORMAT;
++ }
+ obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
+ if (!obj->efile.secs)
+ return -ENOMEM;
+@@ -4106,6 +4115,9 @@ static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
+ int l = 0, r = obj->nr_programs - 1, m;
+ struct bpf_program *prog;
+
++ if (!obj->nr_programs)
++ return NULL;
++
+ while (l < r) {
+ m = l + (r - l + 1) / 2;
+ prog = &obj->programs[m];
+diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
+index e83b497c22454..49f3c3b7f6095 100644
+--- a/tools/lib/bpf/usdt.c
++++ b/tools/lib/bpf/usdt.c
+@@ -1348,25 +1348,23 @@ static int calc_pt_regs_off(const char *reg_name)
+
+ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+ {
+- char *reg_name = NULL;
++ char reg_name[16];
+ int arg_sz, len, reg_off;
+ long off;
+
+- if (sscanf(arg_str, " %d @ \[ %m[a-z0-9], %ld ] %n", &arg_sz, &reg_name, &off, &len) == 3) {
++ if (sscanf(arg_str, " %d @ \[ %15[a-z0-9], %ld ] %n", &arg_sz, reg_name, &off, &len) == 3) {
+ /* Memory dereference case, e.g., -4@[sp, 96] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+- free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+- } else if (sscanf(arg_str, " %d @ \[ %m[a-z0-9] ] %n", &arg_sz, &reg_name, &len) == 2) {
++ } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", &arg_sz, reg_name, &len) == 2) {
+ /* Memory dereference case, e.g., -4@[sp] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+- free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+@@ -1375,12 +1373,11 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+- } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
++ } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", &arg_sz, reg_name, &len) == 2) {
+ /* Register read case, e.g., -8@x4 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+- free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 43ec14c29a60c..51494c3002d91 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -207,7 +207,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
+ return false;
+
+ insn = find_insn(file, func->sec, func->offset);
+- if (!insn->func)
++ if (!insn || !insn->func)
+ return false;
+
+ func_for_each_insn(file, func, insn) {
+@@ -999,6 +999,16 @@ static const char *uaccess_safe_builtin[] = {
+ "__tsan_read_write4",
+ "__tsan_read_write8",
+ "__tsan_read_write16",
++ "__tsan_volatile_read1",
++ "__tsan_volatile_read2",
++ "__tsan_volatile_read4",
++ "__tsan_volatile_read8",
++ "__tsan_volatile_read16",
++ "__tsan_volatile_write1",
++ "__tsan_volatile_write2",
++ "__tsan_volatile_write4",
++ "__tsan_volatile_write8",
++ "__tsan_volatile_write16",
+ "__tsan_atomic8_load",
+ "__tsan_atomic16_load",
+ "__tsan_atomic32_load",
+diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
+index 18fcc52809fbf..980fe2c292752 100644
+--- a/tools/perf/Documentation/perf-annotate.txt
++++ b/tools/perf/Documentation/perf-annotate.txt
+@@ -41,7 +41,7 @@ OPTIONS
+
+ -q::
+ --quiet::
+- Do not show any message. (Suppress -v)
++ Do not show any warnings or messages. (Suppress -v)
+
+ -n::
+ --show-nr-samples::
+diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
+index be65bd55ab2aa..f3067a4af2940 100644
+--- a/tools/perf/Documentation/perf-diff.txt
++++ b/tools/perf/Documentation/perf-diff.txt
+@@ -75,7 +75,7 @@ OPTIONS
+
+ -q::
+ --quiet::
+- Do not show any message. (Suppress -v)
++ Do not show any warnings or messages. (Suppress -v)
+
+ -f::
+ --force::
+diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
+index 3b1e16563b795..4958a1ffa1cca 100644
+--- a/tools/perf/Documentation/perf-lock.txt
++++ b/tools/perf/Documentation/perf-lock.txt
+@@ -42,7 +42,7 @@ COMMON OPTIONS
+
+ -q::
+ --quiet::
+- Do not show any message. (Suppress -v)
++ Do not show any warnings or messages. (Suppress -v)
+
+ -D::
+ --dump-raw-trace::
+diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
+index 080981d38d7ba..7f8e8ba3a7872 100644
+--- a/tools/perf/Documentation/perf-probe.txt
++++ b/tools/perf/Documentation/perf-probe.txt
+@@ -57,7 +57,7 @@ OPTIONS
+
+ -q::
+ --quiet::
+- Be quiet (do not show any messages including errors).
++ Do not show any warnings or messages.
+ Can not use with -v.
+
+ -a::
+diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
+index e41ae950fdc3b..9ea6d44aca58c 100644
+--- a/tools/perf/Documentation/perf-record.txt
++++ b/tools/perf/Documentation/perf-record.txt
+@@ -282,7 +282,7 @@ OPTIONS
+
+ -q::
+ --quiet::
+- Don't print any message, useful for scripting.
++ Don't print any warnings or messages, useful for scripting.
+
+ -v::
+ --verbose::
+diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
+index 4533db2ee56bb..4fa509b159489 100644
+--- a/tools/perf/Documentation/perf-report.txt
++++ b/tools/perf/Documentation/perf-report.txt
+@@ -27,7 +27,7 @@ OPTIONS
+
+ -q::
+ --quiet::
+- Do not show any message. (Suppress -v)
++ Do not show any warnings or messages. (Suppress -v)
+
+ -n::
+ --show-nr-samples::
+diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
+index d7ff1867feda6..18abdc1dce055 100644
+--- a/tools/perf/Documentation/perf-stat.txt
++++ b/tools/perf/Documentation/perf-stat.txt
+@@ -354,8 +354,8 @@ forbids the event merging logic from sharing events between groups and
+ may be used to increase accuracy in this case.
+
+ --quiet::
+-Don't print output. This is useful with perf stat record below to only
+-write data to the perf.data file.
++Don't print output, warnings or messages. This is useful with perf stat
++record below to only write data to the perf.data file.
+
+ STAT RECORD
+ -----------
+diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
+index e78dedf9e682c..9717c6c17433c 100644
+--- a/tools/perf/bench/numa.c
++++ b/tools/perf/bench/numa.c
+@@ -16,6 +16,7 @@
+ #include <sched.h>
+ #include <stdio.h>
+ #include <assert.h>
++#include <debug.h>
+ #include <malloc.h>
+ #include <signal.h>
+ #include <stdlib.h>
+@@ -116,7 +117,6 @@ struct params {
+ long bytes_thread;
+
+ int nr_tasks;
+- bool show_quiet;
+
+ bool show_convergence;
+ bool measure_convergence;
+@@ -197,7 +197,8 @@ static const struct option options[] = {
+ OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details, "
+ "convergence is reached when each process (all its threads) is running on a single NUMA node."),
+ OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
+- OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "quiet mode"),
++ OPT_BOOLEAN('q', "quiet" , &quiet,
++ "quiet mode (do not show any warnings or messages)"),
+ OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
+
+ /* Special option string parsing callbacks: */
+@@ -1474,7 +1475,7 @@ static int init(void)
+ /* char array in count_process_nodes(): */
+ BUG_ON(g->p.nr_nodes < 0);
+
+- if (g->p.show_quiet && !g->p.show_details)
++ if (quiet && !g->p.show_details)
+ g->p.show_details = -1;
+
+ /* Some memory should be specified: */
+@@ -1553,7 +1554,7 @@ static void print_res(const char *name, double val,
+ if (!name)
+ name = "main,";
+
+- if (!g->p.show_quiet)
++ if (!quiet)
+ printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
+ else
+ printf(" %14.3f %s\n", val, txt_long);
+diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
+index f839e69492e80..517d928c00e3f 100644
+--- a/tools/perf/builtin-annotate.c
++++ b/tools/perf/builtin-annotate.c
+@@ -525,7 +525,7 @@ int cmd_annotate(int argc, const char **argv)
+ OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+- OPT_BOOLEAN('q', "quiet", &quiet, "do now show any message"),
++ OPT_BOOLEAN('q', "quiet", &quiet, "do now show any warnings or messages"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ #ifdef HAVE_GTK2_SUPPORT
+diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
+index d925096dd7f02..ed07cc6cca56c 100644
+--- a/tools/perf/builtin-diff.c
++++ b/tools/perf/builtin-diff.c
+@@ -1260,7 +1260,7 @@ static const char * const diff_usage[] = {
+ static const struct option options[] = {
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+- OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
++ OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
+ OPT_BOOLEAN('b', "baseline-only", &show_baseline_only,
+ "Show only items with match in baseline"),
+ OPT_CALLBACK('c', "compute", &compute,
+diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
+index 9722d4ab2e557..66520712a1675 100644
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -1869,7 +1869,7 @@ int cmd_lock(int argc, const char **argv)
+ "file", "vmlinux pathname"),
+ OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
+ "file", "kallsyms pathname"),
+- OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
++ OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
+ OPT_END()
+ };
+
+diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
+index f62298f5db3b4..ed73d0b89ca2d 100644
+--- a/tools/perf/builtin-probe.c
++++ b/tools/perf/builtin-probe.c
+@@ -40,7 +40,6 @@ static struct {
+ int command; /* Command short_name */
+ bool list_events;
+ bool uprobes;
+- bool quiet;
+ bool target_used;
+ int nevents;
+ struct perf_probe_event events[MAX_PROBES];
+@@ -514,8 +513,8 @@ __cmd_probe(int argc, const char **argv)
+ struct option options[] = {
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show parsed arguments, etc)"),
+- OPT_BOOLEAN('q', "quiet", &params.quiet,
+- "be quiet (do not show any messages)"),
++ OPT_BOOLEAN('q', "quiet", &quiet,
++ "be quiet (do not show any warnings or messages)"),
+ OPT_CALLBACK_DEFAULT('l', "list", NULL, "[GROUP:]EVENT",
+ "list up probe events",
+ opt_set_filter_with_command, DEFAULT_LIST_FILTER),
+@@ -613,6 +612,15 @@ __cmd_probe(int argc, const char **argv)
+
+ argc = parse_options(argc, argv, options, probe_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
++
++ if (quiet) {
++ if (verbose != 0) {
++ pr_err(" Error: -v and -q are exclusive.\n");
++ return -EINVAL;
++ }
++ verbose = -1;
++ }
++
+ if (argc > 0) {
+ if (strcmp(argv[0], "-") == 0) {
+ usage_with_options_msg(probe_usage, options,
+@@ -634,14 +642,6 @@ __cmd_probe(int argc, const char **argv)
+ if (ret)
+ return ret;
+
+- if (params.quiet) {
+- if (verbose != 0) {
+- pr_err(" Error: -v and -q are exclusive.\n");
+- return -EINVAL;
+- }
+- verbose = -1;
+- }
+-
+ if (probe_conf.max_probes == 0)
+ probe_conf.max_probes = MAX_PROBES;
+
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index e128b855dddec..59f3d98a0196d 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -3388,7 +3388,7 @@ static struct option __record_options[] = {
+ &record_parse_callchain_opt),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show counter open errors, etc)"),
+- OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
++ OPT_BOOLEAN('q', "quiet", &quiet, "don't print any warnings or messages"),
+ OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
+ "per thread counts"),
+ OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index 8361890176c23..b6d77d3da64f6 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -1222,7 +1222,7 @@ int cmd_report(int argc, const char **argv)
+ "input file name"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+- OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
++ OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 265b051579726..978fdc60b4e84 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -528,26 +528,14 @@ static int enable_counters(void)
+ return err;
+ }
+
+- if (stat_config.initial_delay < 0) {
+- pr_info(EVLIST_DISABLED_MSG);
+- return 0;
+- }
+-
+- if (stat_config.initial_delay > 0) {
+- pr_info(EVLIST_DISABLED_MSG);
+- usleep(stat_config.initial_delay * USEC_PER_MSEC);
+- }
+-
+ /*
+ * We need to enable counters only if:
+ * - we don't have tracee (attaching to task or cpu)
+ * - we have initial delay configured
+ */
+- if (!target__none(&target) || stat_config.initial_delay) {
++ if (!target__none(&target)) {
+ if (!all_counters_use_bpf)
+ evlist__enable(evsel_list);
+- if (stat_config.initial_delay > 0)
+- pr_info(EVLIST_ENABLED_MSG);
+ }
+ return 0;
+ }
+@@ -918,14 +906,27 @@ try_again_reset:
+ return err;
+ }
+
+- err = enable_counters();
+- if (err)
+- return -1;
++ if (stat_config.initial_delay) {
++ pr_info(EVLIST_DISABLED_MSG);
++ } else {
++ err = enable_counters();
++ if (err)
++ return -1;
++ }
+
+ /* Exec the command, if any */
+ if (forks)
+ evlist__start_workload(evsel_list);
+
++ if (stat_config.initial_delay > 0) {
++ usleep(stat_config.initial_delay * USEC_PER_MSEC);
++ err = enable_counters();
++ if (err)
++ return -1;
++
++ pr_info(EVLIST_ENABLED_MSG);
++ }
++
+ t0 = rdclock();
+ clock_gettime(CLOCK_MONOTONIC, &ref_time);
+
+@@ -1023,7 +1024,7 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
+ /* Do not print anything if we record to the pipe. */
+ if (STAT_RECORD && perf_stat.data.is_pipe)
+ return;
+- if (stat_config.quiet)
++ if (quiet)
+ return;
+
+ evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv);
+@@ -1273,8 +1274,8 @@ static struct option stat_options[] = {
+ "print summary for interval mode"),
+ OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
+ "don't print 'summary' for CSV summary output"),
+- OPT_BOOLEAN(0, "quiet", &stat_config.quiet,
+- "don't print output (useful with record)"),
++ OPT_BOOLEAN(0, "quiet", &quiet,
++ "don't print any output, messages or warnings (useful with record)"),
+ OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
+ "Only enable events on applying cpu with this type "
+ "for hybrid platform (e.g. core or atom)",
+@@ -2277,7 +2278,7 @@ int cmd_stat(int argc, const char **argv)
+ goto out;
+ }
+
+- if (!output && !stat_config.quiet) {
++ if (!output && !quiet) {
+ struct timespec tm;
+ mode = append_file ? "a" : "w";
+
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index d3c757769b965..3dcf6aed1ef71 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -88,6 +88,8 @@
+ # define F_LINUX_SPECIFIC_BASE 1024
+ #endif
+
++#define RAW_SYSCALL_ARGS_NUM 6
++
+ /*
+ * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
+ */
+@@ -108,7 +110,7 @@ struct syscall_fmt {
+ const char *sys_enter,
+ *sys_exit;
+ } bpf_prog_name;
+- struct syscall_arg_fmt arg[6];
++ struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
+ u8 nr_args;
+ bool errpid;
+ bool timeout;
+@@ -1226,7 +1228,7 @@ struct syscall {
+ */
+ struct bpf_map_syscall_entry {
+ bool enabled;
+- u16 string_args_len[6];
++ u16 string_args_len[RAW_SYSCALL_ARGS_NUM];
+ };
+
+ /*
+@@ -1658,7 +1660,7 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
+ {
+ int idx;
+
+- if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
++ if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
+ nr_args = sc->fmt->nr_args;
+
+ sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
+@@ -1791,11 +1793,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
+ #endif
+ sc = trace->syscalls.table + id;
+ if (sc->nonexistent)
+- return 0;
++ return -EEXIST;
+
+ if (name == NULL) {
+ sc->nonexistent = true;
+- return 0;
++ return -EEXIST;
+ }
+
+ sc->name = name;
+@@ -1809,11 +1811,18 @@ static int trace__read_syscall_info(struct trace *trace, int id)
+ sc->tp_format = trace_event__tp_format("syscalls", tp_name);
+ }
+
+- if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
+- return -ENOMEM;
+-
+- if (IS_ERR(sc->tp_format))
++ /*
++ * Fails to read trace point format via sysfs node, so the trace point
++ * doesn't exist. Set the 'nonexistent' flag as true.
++ */
++ if (IS_ERR(sc->tp_format)) {
++ sc->nonexistent = true;
+ return PTR_ERR(sc->tp_format);
++ }
++
++ if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
++ RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
++ return -ENOMEM;
+
+ sc->args = sc->tp_format->format.fields;
+ /*
+@@ -2131,11 +2140,8 @@ static struct syscall *trace__syscall_info(struct trace *trace,
+ (err = trace__read_syscall_info(trace, id)) != 0)
+ goto out_cant_read;
+
+- if (trace->syscalls.table[id].name == NULL) {
+- if (trace->syscalls.table[id].nonexistent)
+- return NULL;
++ if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
+ goto out_cant_read;
+- }
+
+ return &trace->syscalls.table[id];
+
+diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
+index 9c9ef33e0b3c6..c779554191731 100755
+--- a/tools/perf/tests/shell/stat_all_pmu.sh
++++ b/tools/perf/tests/shell/stat_all_pmu.sh
+@@ -4,17 +4,8 @@
+
+ set -e
+
+-for p in $(perf list --raw-dump pmu); do
+- # In powerpc, skip the events for hv_24x7 and hv_gpci.
+- # These events needs input values to be filled in for
+- # core, chip, partition id based on system.
+- # Example: hv_24x7/CPM_ADJUNCT_INST,domain=?,core=?/
+- # hv_gpci/event,partition_id=?/
+- # Hence skip these events for ppc.
+- if echo "$p" |grep -Eq 'hv_24x7|hv_gpci' ; then
+- echo "Skipping: Event '$p' in powerpc"
+- continue
+- fi
++# Test all PMU events; however exclude parametrized ones (name contains '?')
++for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g'); do
+ echo "Testing $p"
+ result=$(perf stat -e "$p" true 2>&1)
+ if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>" ; then
+diff --git a/tools/perf/ui/util.c b/tools/perf/ui/util.c
+index 689b27c34246c..1d38ddf01b604 100644
+--- a/tools/perf/ui/util.c
++++ b/tools/perf/ui/util.c
+@@ -15,6 +15,9 @@ static int perf_stdio__error(const char *format, va_list args)
+
+ static int perf_stdio__warning(const char *format, va_list args)
+ {
++ if (quiet)
++ return 0;
++
+ fprintf(stderr, "Warning:\n");
+ vfprintf(stderr, format, args);
+ return 0;
+@@ -45,6 +48,8 @@ int ui__warning(const char *format, ...)
+ {
+ int ret;
+ va_list args;
++ if (quiet)
++ return 0;
+
+ va_start(args, format);
+ ret = perf_eops->warning(format, args);
+diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
+index c257813e674ef..01f70b8e705a8 100644
+--- a/tools/perf/util/bpf_off_cpu.c
++++ b/tools/perf/util/bpf_off_cpu.c
+@@ -102,7 +102,7 @@ static void check_sched_switch_args(void)
+ const struct btf_type *t1, *t2, *t3;
+ u32 type_id;
+
+- type_id = btf__find_by_name_kind(btf, "bpf_trace_sched_switch",
++ type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
+ BTF_KIND_TYPEDEF);
+ if ((s32)type_id < 0)
+ return;
+diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
+index f838b23db1804..dca75cad96f68 100644
+--- a/tools/perf/util/branch.h
++++ b/tools/perf/util/branch.h
+@@ -24,9 +24,10 @@ struct branch_flags {
+ u64 abort:1;
+ u64 cycles:16;
+ u64 type:4;
++ u64 spec:2;
+ u64 new_type:4;
+ u64 priv:3;
+- u64 reserved:33;
++ u64 reserved:31;
+ };
+ };
+ };
+diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
+index 65e6c22f38e4f..190e818a07176 100644
+--- a/tools/perf/util/debug.c
++++ b/tools/perf/util/debug.c
+@@ -241,6 +241,10 @@ int perf_quiet_option(void)
+ opt++;
+ }
+
++ /* For debug variables that are used as bool types, set to 0. */
++ redirect_to_stderr = 0;
++ debug_peo_args = 0;
++
+ return 0;
+ }
+
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index ba66bb7fc1ca7..bc866d18973e4 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -704,7 +704,7 @@ static void uniquify_event_name(struct evsel *counter)
+ counter->name = new_name;
+ }
+ } else {
+- if (perf_pmu__has_hybrid()) {
++ if (evsel__is_hybrid(counter)) {
+ ret = asprintf(&new_name, "%s/%s/",
+ counter->pmu_name, counter->name);
+ } else {
+@@ -744,26 +744,14 @@ static void collect_all_aliases(struct perf_stat_config *config, struct evsel *c
+ }
+ }
+
+-static bool is_uncore(struct evsel *evsel)
+-{
+- struct perf_pmu *pmu = evsel__find_pmu(evsel);
+-
+- return pmu && pmu->is_uncore;
+-}
+-
+-static bool hybrid_uniquify(struct evsel *evsel)
+-{
+- return perf_pmu__has_hybrid() && !is_uncore(evsel);
+-}
+-
+ static bool hybrid_merge(struct evsel *counter, struct perf_stat_config *config,
+ bool check)
+ {
+- if (hybrid_uniquify(counter)) {
++ if (evsel__is_hybrid(counter)) {
+ if (check)
+- return config && config->hybrid_merge;
++ return config->hybrid_merge;
+ else
+- return config && !config->hybrid_merge;
++ return !config->hybrid_merge;
+ }
+
+ return false;
+@@ -1142,11 +1130,16 @@ static void print_metric_headers(struct perf_stat_config *config,
+ struct evlist *evlist,
+ const char *prefix, bool no_indent)
+ {
+- struct perf_stat_output_ctx out;
+ struct evsel *counter;
+ struct outstate os = {
+ .fh = config->output
+ };
++ struct perf_stat_output_ctx out = {
++ .ctx = &os,
++ .print_metric = print_metric_header,
++ .new_line = new_line_metric,
++ .force_header = true,
++ };
+ bool first = true;
+
+ if (config->json_output && !config->interval)
+@@ -1170,13 +1163,11 @@ static void print_metric_headers(struct perf_stat_config *config,
+ /* Print metrics headers only */
+ evlist__for_each_entry(evlist, counter) {
+ os.evsel = counter;
+- out.ctx = &os;
+- out.print_metric = print_metric_header;
++
+ if (!first && config->json_output)
+ fprintf(config->output, ", ");
+ first = false;
+- out.new_line = new_line_metric;
+- out.force_header = true;
++
+ perf_stat__print_shadow_stats(config, counter, 0,
+ 0,
+ &out,
+diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
+index b0899c6e002f5..35c940d7f29cd 100644
+--- a/tools/perf/util/stat.h
++++ b/tools/perf/util/stat.h
+@@ -139,7 +139,6 @@ struct perf_stat_config {
+ bool metric_no_group;
+ bool metric_no_merge;
+ bool stop_read_counter;
+- bool quiet;
+ bool iostat_run;
+ char *user_requested_cpu_list;
+ bool system_wide;
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 647b7dff8ef36..80345695b1360 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -1303,7 +1303,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
+ (!used_opd && syms_ss->adjust_symbols)) {
+ GElf_Phdr phdr;
+
+- if (elf_read_program_header(syms_ss->elf,
++ if (elf_read_program_header(runtime_ss->elf,
+ (u64)sym.st_value, &phdr)) {
+ pr_debug4("%s: failed to find program header for "
+ "symbol: %s st_value: %#" PRIx64 "\n",
+diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
+index 9213565c03117..59cec4244b3a7 100644
+--- a/tools/testing/selftests/bpf/config
++++ b/tools/testing/selftests/bpf/config
+@@ -13,6 +13,7 @@ CONFIG_CRYPTO_USER_API_HASH=y
+ CONFIG_DYNAMIC_FTRACE=y
+ CONFIG_FPROBE=y
+ CONFIG_FTRACE_SYSCALLS=y
++CONFIG_FUNCTION_ERROR_INJECTION=y
+ CONFIG_FUNCTION_TRACER=y
+ CONFIG_GENEVE=y
+ CONFIG_IKCONFIG=y
+diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
+index bec15558fd938..1f37adff7632c 100644
+--- a/tools/testing/selftests/bpf/network_helpers.c
++++ b/tools/testing/selftests/bpf/network_helpers.c
+@@ -426,6 +426,10 @@ static int setns_by_fd(int nsfd)
+ if (!ASSERT_OK(err, "mount /sys/fs/bpf"))
+ return err;
+
++ err = mount("debugfs", "/sys/kernel/debug", "debugfs", 0, NULL);
++ if (!ASSERT_OK(err, "mount /sys/kernel/debug"))
++ return err;
++
+ return 0;
+ }
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+index 3369c5ec3a17c..ecde236047fe1 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+@@ -1498,7 +1498,6 @@ static noinline int trigger_func(int arg)
+ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
+ {
+ struct bpf_iter_vma_offset *skel;
+- struct bpf_link *link;
+ char buf[16] = {};
+ int iter_fd, len;
+ int pgsz, shift;
+@@ -1513,11 +1512,11 @@ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool
+ ;
+ skel->bss->page_shift = shift;
+
+- link = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
+- if (!ASSERT_OK_PTR(link, "attach_iter"))
+- return;
++ skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
++ if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
++ goto exit;
+
+- iter_fd = bpf_iter_create(bpf_link__fd(link));
++ iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
+ if (!ASSERT_GT(iter_fd, 0, "create_iter"))
+ goto exit;
+
+@@ -1535,7 +1534,7 @@ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool
+ close(iter_fd);
+
+ exit:
+- bpf_link__destroy(link);
++ bpf_iter_vma_offset__destroy(skel);
+ }
+
+ static void test_task_vma_offset(void)
+diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
+new file mode 100644
+index 0000000000000..0613f3bb8b5e4
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
+@@ -0,0 +1,146 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <test_progs.h>
++#include <network_helpers.h>
++#include <net/if.h>
++#include "empty_skb.skel.h"
++
++#define SYS(cmd) ({ \
++ if (!ASSERT_OK(system(cmd), (cmd))) \
++ goto out; \
++})
++
++void serial_test_empty_skb(void)
++{
++ LIBBPF_OPTS(bpf_test_run_opts, tattr);
++ struct empty_skb *bpf_obj = NULL;
++ struct nstoken *tok = NULL;
++ struct bpf_program *prog;
++ char eth_hlen_pp[15];
++ char eth_hlen[14];
++ int veth_ifindex;
++ int ipip_ifindex;
++ int err;
++ int i;
++
++ struct {
++ const char *msg;
++ const void *data_in;
++ __u32 data_size_in;
++ int *ifindex;
++ int err;
++ int ret;
++ bool success_on_tc;
++ } tests[] = {
++ /* Empty packets are always rejected. */
++
++ {
++ /* BPF_PROG_RUN ETH_HLEN size check */
++ .msg = "veth empty ingress packet",
++ .data_in = NULL,
++ .data_size_in = 0,
++ .ifindex = &veth_ifindex,
++ .err = -EINVAL,
++ },
++ {
++ /* BPF_PROG_RUN ETH_HLEN size check */
++ .msg = "ipip empty ingress packet",
++ .data_in = NULL,
++ .data_size_in = 0,
++ .ifindex = &ipip_ifindex,
++ .err = -EINVAL,
++ },
++
++ /* ETH_HLEN-sized packets:
++ * - can not be redirected at LWT_XMIT
++ * - can be redirected at TC to non-tunneling dest
++ */
++
++ {
++ /* __bpf_redirect_common */
++ .msg = "veth ETH_HLEN packet ingress",
++ .data_in = eth_hlen,
++ .data_size_in = sizeof(eth_hlen),
++ .ifindex = &veth_ifindex,
++ .ret = -ERANGE,
++ .success_on_tc = true,
++ },
++ {
++ /* __bpf_redirect_no_mac
++ *
++ * lwt: skb->len=0 <= skb_network_offset=0
++ * tc: skb->len=14 <= skb_network_offset=14
++ */
++ .msg = "ipip ETH_HLEN packet ingress",
++ .data_in = eth_hlen,
++ .data_size_in = sizeof(eth_hlen),
++ .ifindex = &ipip_ifindex,
++ .ret = -ERANGE,
++ },
++
++ /* ETH_HLEN+1-sized packet should be redirected. */
++
++ {
++ .msg = "veth ETH_HLEN+1 packet ingress",
++ .data_in = eth_hlen_pp,
++ .data_size_in = sizeof(eth_hlen_pp),
++ .ifindex = &veth_ifindex,
++ },
++ {
++ .msg = "ipip ETH_HLEN+1 packet ingress",
++ .data_in = eth_hlen_pp,
++ .data_size_in = sizeof(eth_hlen_pp),
++ .ifindex = &ipip_ifindex,
++ },
++ };
++
++ SYS("ip netns add empty_skb");
++ tok = open_netns("empty_skb");
++ SYS("ip link add veth0 type veth peer veth1");
++ SYS("ip link set dev veth0 up");
++ SYS("ip link set dev veth1 up");
++ SYS("ip addr add 10.0.0.1/8 dev veth0");
++ SYS("ip addr add 10.0.0.2/8 dev veth1");
++ veth_ifindex = if_nametoindex("veth0");
++
++ SYS("ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2");
++ SYS("ip link set ipip0 up");
++ SYS("ip addr add 192.168.1.1/16 dev ipip0");
++ ipip_ifindex = if_nametoindex("ipip0");
++
++ bpf_obj = empty_skb__open_and_load();
++ if (!ASSERT_OK_PTR(bpf_obj, "open skeleton"))
++ goto out;
++
++ for (i = 0; i < ARRAY_SIZE(tests); i++) {
++ bpf_object__for_each_program(prog, bpf_obj->obj) {
++ char buf[128];
++ bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2);
++
++ tattr.data_in = tests[i].data_in;
++ tattr.data_size_in = tests[i].data_size_in;
++
++ tattr.data_size_out = 0;
++ bpf_obj->bss->ifindex = *tests[i].ifindex;
++ bpf_obj->bss->ret = 0;
++ err = bpf_prog_test_run_opts(bpf_program__fd(prog), &tattr);
++ sprintf(buf, "err: %s [%s]", tests[i].msg, bpf_program__name(prog));
++
++ if (at_tc && tests[i].success_on_tc)
++ ASSERT_GE(err, 0, buf);
++ else
++ ASSERT_EQ(err, tests[i].err, buf);
++ sprintf(buf, "ret: %s [%s]", tests[i].msg, bpf_program__name(prog));
++ if (at_tc && tests[i].success_on_tc)
++ ASSERT_GE(bpf_obj->bss->ret, 0, buf);
++ else
++ ASSERT_EQ(bpf_obj->bss->ret, tests[i].ret, buf);
++ }
++ }
++
++out:
++ if (bpf_obj)
++ empty_skb__destroy(bpf_obj);
++ if (tok)
++ close_netns(tok);
++ system("ip netns del empty_skb");
++}
+diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+index a4b4133d39e95..0d82e28aed1ac 100644
+--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
++++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+@@ -325,7 +325,7 @@ static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_u
+ static int get_syms(char ***symsp, size_t *cntp)
+ {
+ size_t cap = 0, cnt = 0, i;
+- char *name, **syms = NULL;
++ char *name = NULL, **syms = NULL;
+ struct hashmap *map;
+ char buf[256];
+ FILE *f;
+@@ -352,6 +352,8 @@ static int get_syms(char ***symsp, size_t *cntp)
+ /* skip modules */
+ if (strchr(buf, '['))
+ continue;
++
++ free(name);
+ if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
+ continue;
+ /*
+@@ -371,32 +373,32 @@ static int get_syms(char ***symsp, size_t *cntp)
+ if (!strncmp(name, "__ftrace_invalid_address__",
+ sizeof("__ftrace_invalid_address__") - 1))
+ continue;
++
+ err = hashmap__add(map, name, NULL);
+- if (err) {
+- free(name);
+- if (err == -EEXIST)
+- continue;
++ if (err == -EEXIST)
++ continue;
++ if (err)
+ goto error;
+- }
++
+ err = libbpf_ensure_mem((void **) &syms, &cap,
+ sizeof(*syms), cnt + 1);
+- if (err) {
+- free(name);
++ if (err)
+ goto error;
+- }
+- syms[cnt] = name;
+- cnt++;
++
++ syms[cnt++] = name;
++ name = NULL;
+ }
+
+ *symsp = syms;
+ *cntp = cnt;
+
+ error:
++ free(name);
+ fclose(f);
+ hashmap__free(map);
+ if (err) {
+ for (i = 0; i < cnt; i++)
+- free(syms[cnt]);
++ free(syms[i]);
+ free(syms);
+ }
+ return err;
+diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
+index 1102e4f42d2d4..f117bfef68a14 100644
+--- a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
++++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
+@@ -173,10 +173,12 @@ static void test_lsm_cgroup_functional(void)
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count");
+
+- /* AF_UNIX is prohibited. */
+-
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+- ASSERT_LT(fd, 0, "socket(AF_UNIX)");
++ if (!(skel->kconfig->CONFIG_SECURITY_APPARMOR
++ || skel->kconfig->CONFIG_SECURITY_SELINUX
++ || skel->kconfig->CONFIG_SECURITY_SMACK))
++ /* AF_UNIX is prohibited. */
++ ASSERT_LT(fd, 0, "socket(AF_UNIX)");
+ close(fd);
+
+ /* AF_INET6 gets default policy (sk_priority). */
+@@ -233,11 +235,18 @@ static void test_lsm_cgroup_functional(void)
+
+ /* AF_INET6+SOCK_STREAM
+ * AF_PACKET+SOCK_RAW
++ * AF_UNIX+SOCK_RAW if already have non-bpf lsms installed
+ * listen_fd
+ * client_fd
+ * accepted_fd
+ */
+- ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
++ if (skel->kconfig->CONFIG_SECURITY_APPARMOR
++ || skel->kconfig->CONFIG_SECURITY_SELINUX
++ || skel->kconfig->CONFIG_SECURITY_SMACK)
++ /* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed */
++ ASSERT_EQ(skel->bss->called_socket_post_create2, 6, "called_create2");
++ else
++ ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
+
+ /* start_server
+ * bind(ETH_P_ALL)
+diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
+index fdcea7a61491e..0d66b15242089 100644
+--- a/tools/testing/selftests/bpf/prog_tests/map_kptr.c
++++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
+@@ -105,7 +105,7 @@ static void test_map_kptr_success(bool test_run)
+ ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
+
+ if (test_run)
+- return;
++ goto exit;
+
+ ret = bpf_map__update_elem(skel->maps.array_map,
+ &key, sizeof(key), buf, sizeof(buf), 0);
+@@ -132,6 +132,7 @@ static void test_map_kptr_success(bool test_run)
+ ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
+ ASSERT_OK(ret, "lru_hash_map delete");
+
++exit:
+ map_kptr__destroy(skel);
+ }
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
+index 617bbce6ef8f1..57191773572a0 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
++++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
+@@ -485,7 +485,7 @@ static void misc(void)
+ goto check_linum;
+
+ ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg));
+- if (ASSERT_EQ(ret, sizeof(send_msg), "read(msg)"))
++ if (!ASSERT_EQ(ret, sizeof(send_msg), "read(msg)"))
+ goto check_linum;
+ }
+
+@@ -539,7 +539,7 @@ void test_tcp_hdr_options(void)
+ goto skel_destroy;
+
+ cg_fd = test__join_cgroup(CG_NAME);
+- if (ASSERT_GE(cg_fd, 0, "join_cgroup"))
++ if (!ASSERT_GE(cg_fd, 0, "join_cgroup"))
+ goto skel_destroy;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
+index d5022b91d1e4c..48dc9472e160a 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
++++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
+@@ -15,7 +15,7 @@ static void test_fentry(void)
+
+ err = tracing_struct__attach(skel);
+ if (!ASSERT_OK(err, "tracing_struct__attach"))
+- return;
++ goto destroy_skel;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
+@@ -54,6 +54,7 @@ static void test_fentry(void)
+ ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret");
+
+ tracing_struct__detach(skel);
++destroy_skel:
+ tracing_struct__destroy(skel);
+ }
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+index 9b9cf8458adf8..39973ea1ce433 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+@@ -18,7 +18,7 @@ static void test_xdp_adjust_tail_shrink(void)
+ );
+
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+- if (ASSERT_OK(err, "test_xdp_adjust_tail_shrink"))
++ if (!ASSERT_OK(err, "test_xdp_adjust_tail_shrink"))
+ return;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+@@ -53,7 +53,7 @@ static void test_xdp_adjust_tail_grow(void)
+ );
+
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+- if (ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
++ if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
+ return;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+@@ -63,6 +63,7 @@ static void test_xdp_adjust_tail_grow(void)
+ expect_sz = sizeof(pkt_v6) + 40; /* Test grow with 40 bytes */
+ topts.data_in = &pkt_v6;
+ topts.data_size_in = sizeof(pkt_v6);
++ topts.data_size_out = sizeof(buf);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "ipv6");
+ ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval");
+@@ -89,7 +90,7 @@ static void test_xdp_adjust_tail_grow2(void)
+ );
+
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+- if (ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
++ if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
+ return;
+
+ /* Test case-64 */
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+index a50971c6cf4a5..9ac6f6a268db2 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+@@ -85,7 +85,7 @@ static void test_max_pkt_size(int fd)
+ }
+
+ #define NUM_PKTS 10000
+-void test_xdp_do_redirect(void)
++void serial_test_xdp_do_redirect(void)
+ {
+ int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst;
+ char data[sizeof(pkt_udp) + sizeof(__u32)];
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
+index 75550a40e029d..879f5da2f21e6 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
+@@ -174,7 +174,7 @@ out:
+ system("ip netns del synproxy");
+ }
+
+-void test_xdp_synproxy(void)
++void serial_test_xdp_synproxy(void)
+ {
+ if (test__start_subtest("xdp"))
+ test_synproxy(true);
+diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
+index 285c008cbf9c2..9ba14c37bbcc9 100644
+--- a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
++++ b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
+@@ -7,14 +7,14 @@ char _license[] SEC("license") = "GPL";
+
+ unsigned long last_sym_value = 0;
+
+-static inline char tolower(char c)
++static inline char to_lower(char c)
+ {
+ if (c >= 'A' && c <= 'Z')
+ c += ('a' - 'A');
+ return c;
+ }
+
+-static inline char toupper(char c)
++static inline char to_upper(char c)
+ {
+ if (c >= 'a' && c <= 'z')
+ c -= ('a' - 'A');
+@@ -54,7 +54,7 @@ int dump_ksym(struct bpf_iter__ksym *ctx)
+ type = iter->type;
+
+ if (iter->module_name[0]) {
+- type = iter->exported ? toupper(type) : tolower(type);
++ type = iter->exported ? to_upper(type) : to_lower(type);
+ BPF_SEQ_PRINTF(seq, "0x%llx %c %s [ %s ] ",
+ value, type, iter->name, iter->module_name);
+ } else {
+diff --git a/tools/testing/selftests/bpf/progs/empty_skb.c b/tools/testing/selftests/bpf/progs/empty_skb.c
+new file mode 100644
+index 0000000000000..4b0cd67532511
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/empty_skb.c
+@@ -0,0 +1,37 @@
++// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
++#include <linux/bpf.h>
++#include <bpf/bpf_helpers.h>
++#include <bpf/bpf_endian.h>
++
++char _license[] SEC("license") = "GPL";
++
++int ifindex;
++int ret;
++
++SEC("lwt_xmit")
++int redirect_ingress(struct __sk_buff *skb)
++{
++ ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
++ return 0;
++}
++
++SEC("lwt_xmit")
++int redirect_egress(struct __sk_buff *skb)
++{
++ ret = bpf_clone_redirect(skb, ifindex, 0);
++ return 0;
++}
++
++SEC("tc")
++int tc_redirect_ingress(struct __sk_buff *skb)
++{
++ ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
++ return 0;
++}
++
++SEC("tc")
++int tc_redirect_egress(struct __sk_buff *skb)
++{
++ ret = bpf_clone_redirect(skb, ifindex, 0);
++ return 0;
++}
+diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
+index 4f2d60b87b75d..02c11d16b692a 100644
+--- a/tools/testing/selftests/bpf/progs/lsm_cgroup.c
++++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
+@@ -7,6 +7,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
++extern bool CONFIG_SECURITY_SELINUX __kconfig __weak;
++extern bool CONFIG_SECURITY_SMACK __kconfig __weak;
++extern bool CONFIG_SECURITY_APPARMOR __kconfig __weak;
++
+ #ifndef AF_PACKET
+ #define AF_PACKET 17
+ #endif
+@@ -140,6 +144,10 @@ SEC("lsm_cgroup/sk_alloc_security")
+ int BPF_PROG(socket_alloc, struct sock *sk, int family, gfp_t priority)
+ {
+ called_socket_alloc++;
++ /* if already have non-bpf lsms installed, EPERM will cause memory leak of non-bpf lsms */
++ if (CONFIG_SECURITY_SELINUX || CONFIG_SECURITY_SMACK || CONFIG_SECURITY_APPARMOR)
++ return 1;
++
+ if (family == AF_UNIX)
+ return 0; /* EPERM */
+
+diff --git a/tools/testing/selftests/bpf/test_bpftool_metadata.sh b/tools/testing/selftests/bpf/test_bpftool_metadata.sh
+index 1bf81b49457af..b5520692f41bd 100755
+--- a/tools/testing/selftests/bpf/test_bpftool_metadata.sh
++++ b/tools/testing/selftests/bpf/test_bpftool_metadata.sh
+@@ -4,6 +4,9 @@
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
+
++BPF_FILE_USED="metadata_used.bpf.o"
++BPF_FILE_UNUSED="metadata_unused.bpf.o"
++
+ TESTNAME=bpftool_metadata
+ BPF_FS=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts)
+ BPF_DIR=$BPF_FS/test_$TESTNAME
+@@ -55,7 +58,7 @@ mkdir $BPF_DIR
+
+ trap cleanup EXIT
+
+-bpftool prog load metadata_unused.o $BPF_DIR/unused
++bpftool prog load $BPF_FILE_UNUSED $BPF_DIR/unused
+
+ METADATA_PLAIN="$(bpftool prog)"
+ echo "$METADATA_PLAIN" | grep 'a = "foo"' > /dev/null
+@@ -67,7 +70,7 @@ bpftool map | grep 'metadata.rodata' > /dev/null
+
+ rm $BPF_DIR/unused
+
+-bpftool prog load metadata_used.o $BPF_DIR/used
++bpftool prog load $BPF_FILE_USED $BPF_DIR/used
+
+ METADATA_PLAIN="$(bpftool prog)"
+ echo "$METADATA_PLAIN" | grep 'a = "bar"' > /dev/null
+diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh
+index 5303ce0c977bd..4b298863797a2 100755
+--- a/tools/testing/selftests/bpf/test_flow_dissector.sh
++++ b/tools/testing/selftests/bpf/test_flow_dissector.sh
+@@ -2,6 +2,8 @@
+ # SPDX-License-Identifier: GPL-2.0
+ #
+ # Load BPF flow dissector and verify it correctly dissects traffic
++
++BPF_FILE="bpf_flow.bpf.o"
+ export TESTNAME=test_flow_dissector
+ unmount=0
+
+@@ -22,7 +24,7 @@ if [[ -z $(ip netns identify $$) ]]; then
+ if bpftool="$(which bpftool)"; then
+ echo "Testing global flow dissector..."
+
+- $bpftool prog loadall ./bpf_flow.o /sys/fs/bpf/flow \
++ $bpftool prog loadall $BPF_FILE /sys/fs/bpf/flow \
+ type flow_dissector
+
+ if ! unshare --net $bpftool prog attach pinned \
+@@ -95,7 +97,7 @@ else
+ fi
+
+ # Attach BPF program
+-./flow_dissector_load -p bpf_flow.o -s _dissect
++./flow_dissector_load -p $BPF_FILE -s _dissect
+
+ # Setup
+ tc qdisc add dev lo ingress
+diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+index 6c69c42b1d607..1e565f47aca94 100755
+--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
++++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+@@ -38,6 +38,7 @@
+ # ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
+ # ping replies go DST->SRC directly
+
++BPF_FILE="test_lwt_ip_encap.bpf.o"
+ if [[ $EUID -ne 0 ]]; then
+ echo "This script must be run as root"
+ echo "FAIL"
+@@ -373,14 +374,14 @@ test_egress()
+ # install replacement routes (LWT/eBPF), pings succeed
+ if [ "${ENCAP}" == "IPv4" ] ; then
+ ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
+- test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
++ ${BPF_FILE} sec encap_gre dev veth1 ${VRF}
+ ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
+- test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
++ ${BPF_FILE} sec encap_gre dev veth1 ${VRF}
+ elif [ "${ENCAP}" == "IPv6" ] ; then
+ ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
+- test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
++ ${BPF_FILE} sec encap_gre6 dev veth1 ${VRF}
+ ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
+- test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
++ ${BPF_FILE} sec encap_gre6 dev veth1 ${VRF}
+ else
+ echo " unknown encap ${ENCAP}"
+ TEST_STATUS=1
+@@ -431,14 +432,14 @@ test_ingress()
+ # install replacement routes (LWT/eBPF), pings succeed
+ if [ "${ENCAP}" == "IPv4" ] ; then
+ ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
+- test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
++ ${BPF_FILE} sec encap_gre dev veth2 ${VRF}
+ ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
+- test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
++ ${BPF_FILE} sec encap_gre dev veth2 ${VRF}
+ elif [ "${ENCAP}" == "IPv6" ] ; then
+ ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
+- test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
++ ${BPF_FILE} sec encap_gre6 dev veth2 ${VRF}
+ ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
+- test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
++ ${BPF_FILE} sec encap_gre6 dev veth2 ${VRF}
+ else
+ echo "FAIL: unknown encap ${ENCAP}"
+ TEST_STATUS=1
+diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh
+index 826f4423ce029..0efea2292d6aa 100755
+--- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh
++++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh
+@@ -23,6 +23,7 @@
+
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
++BPF_FILE="test_lwt_seg6local.bpf.o"
+ readonly NS1="ns1-$(mktemp -u XXXXXX)"
+ readonly NS2="ns2-$(mktemp -u XXXXXX)"
+ readonly NS3="ns3-$(mktemp -u XXXXXX)"
+@@ -117,18 +118,18 @@ ip netns exec ${NS6} ip -6 addr add fb00::109/16 dev veth10 scope link
+ ip netns exec ${NS1} ip -6 addr add fb00::1/16 dev lo
+ ip netns exec ${NS1} ip -6 route add fb00::6 dev veth1 via fb00::21
+
+-ip netns exec ${NS2} ip -6 route add fb00::6 encap bpf in obj test_lwt_seg6local.o sec encap_srh dev veth2
++ip netns exec ${NS2} ip -6 route add fb00::6 encap bpf in obj ${BPF_FILE} sec encap_srh dev veth2
+ ip netns exec ${NS2} ip -6 route add fd00::1 dev veth3 via fb00::43 scope link
+
+ ip netns exec ${NS3} ip -6 route add fc42::1 dev veth5 via fb00::65
+-ip netns exec ${NS3} ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec add_egr_x dev veth4
++ip netns exec ${NS3} ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec add_egr_x dev veth4
+
+-ip netns exec ${NS4} ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec pop_egr dev veth6
++ip netns exec ${NS4} ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec pop_egr dev veth6
+ ip netns exec ${NS4} ip -6 addr add fc42::1 dev lo
+ ip netns exec ${NS4} ip -6 route add fd00::3 dev veth7 via fb00::87
+
+ ip netns exec ${NS5} ip -6 route add fd00::4 table 117 dev veth9 via fb00::109
+-ip netns exec ${NS5} ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec inspect_t dev veth8
++ip netns exec ${NS5} ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec inspect_t dev veth8
+
+ ip netns exec ${NS6} ip -6 addr add fb00::6/16 dev lo
+ ip netns exec ${NS6} ip -6 addr add fd00::4/16 dev lo
+diff --git a/tools/testing/selftests/bpf/test_tc_edt.sh b/tools/testing/selftests/bpf/test_tc_edt.sh
+index daa7d1b8d3092..76f0bd17061f9 100755
+--- a/tools/testing/selftests/bpf/test_tc_edt.sh
++++ b/tools/testing/selftests/bpf/test_tc_edt.sh
+@@ -5,6 +5,7 @@
+ # with dst port = 9000 down to 5MBps. Then it measures actual
+ # throughput of the flow.
+
++BPF_FILE="test_tc_edt.bpf.o"
+ if [[ $EUID -ne 0 ]]; then
+ echo "This script must be run as root"
+ echo "FAIL"
+@@ -54,7 +55,7 @@ ip -netns ${NS_DST} route add ${IP_SRC}/32 dev veth_dst
+ ip netns exec ${NS_SRC} tc qdisc add dev veth_src root fq
+ ip netns exec ${NS_SRC} tc qdisc add dev veth_src clsact
+ ip netns exec ${NS_SRC} tc filter add dev veth_src egress \
+- bpf da obj test_tc_edt.o sec cls_test
++ bpf da obj ${BPF_FILE} sec cls_test
+
+
+ # start the listener
+diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+index 088fcad138c98..334bdfeab9403 100755
+--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
++++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+@@ -3,6 +3,7 @@
+ #
+ # In-place tunneling
+
++BPF_FILE="test_tc_tunnel.bpf.o"
+ # must match the port that the bpf program filters on
+ readonly port=8000
+
+@@ -196,7 +197,7 @@ verify_data
+ # client can no longer connect
+ ip netns exec "${ns1}" tc qdisc add dev veth1 clsact
+ ip netns exec "${ns1}" tc filter add dev veth1 egress \
+- bpf direct-action object-file ./test_tc_tunnel.o \
++ bpf direct-action object-file ${BPF_FILE} \
+ section "encap_${tuntype}_${mac}"
+ echo "test bpf encap without decap (expect failure)"
+ server_listen
+@@ -296,7 +297,7 @@ fi
+ ip netns exec "${ns2}" ip link del dev testtun0
+ ip netns exec "${ns2}" tc qdisc add dev veth2 clsact
+ ip netns exec "${ns2}" tc filter add dev veth2 ingress \
+- bpf direct-action object-file ./test_tc_tunnel.o section decap
++ bpf direct-action object-file ${BPF_FILE} section decap
+ echo "test bpf encap with bpf decap"
+ client_connect
+ verify_data
+diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh
+index e9ebc67d73f70..2eaedc1d9ed30 100755
+--- a/tools/testing/selftests/bpf/test_tunnel.sh
++++ b/tools/testing/selftests/bpf/test_tunnel.sh
+@@ -45,6 +45,7 @@
+ # 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet
+ # 6) Forward the packet to the overlay tnl dev
+
++BPF_FILE="test_tunnel_kern.bpf.o"
+ BPF_PIN_TUNNEL_DIR="/sys/fs/bpf/tc/tunnel"
+ PING_ARG="-c 3 -w 10 -q"
+ ret=0
+@@ -545,7 +546,7 @@ test_xfrm_tunnel()
+ > /sys/kernel/debug/tracing/trace
+ setup_xfrm_tunnel
+ mkdir -p ${BPF_PIN_TUNNEL_DIR}
+- bpftool prog loadall ./test_tunnel_kern.o ${BPF_PIN_TUNNEL_DIR}
++ bpftool prog loadall ${BPF_FILE} ${BPF_PIN_TUNNEL_DIR}
+ tc qdisc add dev veth1 clsact
+ tc filter add dev veth1 proto ip ingress bpf da object-pinned \
+ ${BPF_PIN_TUNNEL_DIR}/xfrm_get_state
+@@ -572,7 +573,7 @@ attach_bpf()
+ SET=$2
+ GET=$3
+ mkdir -p ${BPF_PIN_TUNNEL_DIR}
+- bpftool prog loadall ./test_tunnel_kern.o ${BPF_PIN_TUNNEL_DIR}/
++ bpftool prog loadall ${BPF_FILE} ${BPF_PIN_TUNNEL_DIR}/
+ tc qdisc add dev $DEV clsact
+ tc filter add dev $DEV egress bpf da object-pinned ${BPF_PIN_TUNNEL_DIR}/$SET
+ tc filter add dev $DEV ingress bpf da object-pinned ${BPF_PIN_TUNNEL_DIR}/$GET
+diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
+index ea69370caae30..2740322c1878b 100755
+--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
++++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
+@@ -1,5 +1,6 @@
+ #!/bin/sh
+
++BPF_FILE="test_xdp_meta.bpf.o"
+ # Kselftest framework requirement - SKIP code is 4.
+ readonly KSFT_SKIP=4
+ readonly NS1="ns1-$(mktemp -u XXXXXX)"
+@@ -42,11 +43,11 @@ ip netns exec ${NS2} ip addr add 10.1.1.22/24 dev veth2
+ ip netns exec ${NS1} tc qdisc add dev veth1 clsact
+ ip netns exec ${NS2} tc qdisc add dev veth2 clsact
+
+-ip netns exec ${NS1} tc filter add dev veth1 ingress bpf da obj test_xdp_meta.o sec t
+-ip netns exec ${NS2} tc filter add dev veth2 ingress bpf da obj test_xdp_meta.o sec t
++ip netns exec ${NS1} tc filter add dev veth1 ingress bpf da obj ${BPF_FILE} sec t
++ip netns exec ${NS2} tc filter add dev veth2 ingress bpf da obj ${BPF_FILE} sec t
+
+-ip netns exec ${NS1} ip link set dev veth1 xdp obj test_xdp_meta.o sec x
+-ip netns exec ${NS2} ip link set dev veth2 xdp obj test_xdp_meta.o sec x
++ip netns exec ${NS1} ip link set dev veth1 xdp obj ${BPF_FILE} sec x
++ip netns exec ${NS2} ip link set dev veth2 xdp obj ${BPF_FILE} sec x
+
+ ip netns exec ${NS1} ip link set dev veth1 up
+ ip netns exec ${NS2} ip link set dev veth2 up
+diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
+index 810c407e0286e..fbcaa9f0120b2 100755
+--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
++++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
+@@ -200,11 +200,11 @@ ip netns exec ${NS2} sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Success: First p
+ # ----------------------------------------------------------------------
+ # In ns1: ingress use XDP to remove VLAN tags
+ export DEVNS1=veth1
+-export FILE=test_xdp_vlan.o
++export BPF_FILE=test_xdp_vlan.bpf.o
+
+ # First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change"
+ export XDP_PROG=xdp_vlan_change
+-ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
++ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $BPF_FILE section $XDP_PROG
+
+ # In ns1: egress use TC to add back VLAN tag 4011
+ # (del cmd)
+@@ -212,7 +212,7 @@ ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PRO
+ #
+ ip netns exec ${NS1} tc qdisc add dev $DEVNS1 clsact
+ ip netns exec ${NS1} tc filter add dev $DEVNS1 egress \
+- prio 1 handle 1 bpf da obj $FILE sec tc_vlan_push
++ prio 1 handle 1 bpf da obj $BPF_FILE sec tc_vlan_push
+
+ # Now the namespaces can reach each-other, test with ping:
+ ip netns exec ${NS2} ping -i 0.2 -W 2 -c 2 $IPADDR1
+@@ -226,7 +226,7 @@ ip netns exec ${NS1} ping -i 0.2 -W 2 -c 2 $IPADDR2
+ #
+ export XDP_PROG=xdp_vlan_remove_outer2
+ ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE off
+-ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
++ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $BPF_FILE section $XDP_PROG
+
+ # Now the namespaces should still be able reach each-other, test with ping:
+ ip netns exec ${NS2} ping -i 0.2 -W 2 -c 2 $IPADDR1
+diff --git a/tools/testing/selftests/bpf/xdp_synproxy.c b/tools/testing/selftests/bpf/xdp_synproxy.c
+index ff35320d2be97..410a1385a01dd 100644
+--- a/tools/testing/selftests/bpf/xdp_synproxy.c
++++ b/tools/testing/selftests/bpf/xdp_synproxy.c
+@@ -104,7 +104,8 @@ static void parse_options(int argc, char *argv[], unsigned int *ifindex, __u32 *
+ { "tc", no_argument, NULL, 'c' },
+ { NULL, 0, NULL, 0 },
+ };
+- unsigned long mss4, mss6, wscale, ttl;
++ unsigned long mss4, wscale, ttl;
++ unsigned long long mss6;
+ unsigned int tcpipopts_mask = 0;
+
+ if (argc < 2)
+@@ -286,7 +287,7 @@ static int syncookie_open_bpf_maps(__u32 prog_id, int *values_map_fd, int *ports
+
+ prog_info = (struct bpf_prog_info) {
+ .nr_map_ids = 8,
+- .map_ids = (__u64)map_ids,
++ .map_ids = (__u64)(unsigned long)map_ids,
+ };
+ info_len = sizeof(prog_info);
+
+diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
+index 4c52cc6f2f9cc..e8bbbdb77e0d5 100644
+--- a/tools/testing/selftests/cgroup/cgroup_util.c
++++ b/tools/testing/selftests/cgroup/cgroup_util.c
+@@ -555,6 +555,7 @@ int proc_mount_contains(const char *option)
+ ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size)
+ {
+ char path[PATH_MAX];
++ ssize_t ret;
+
+ if (!pid)
+ snprintf(path, sizeof(path), "/proc/%s/%s",
+@@ -562,8 +563,8 @@ ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t
+ else
+ snprintf(path, sizeof(path), "/proc/%d/%s", pid, item);
+
+- size = read_text(path, buf, size);
+- return size < 0 ? -1 : size;
++ ret = read_text(path, buf, size);
++ return ret < 0 ? -1 : ret;
+ }
+
+ int proc_read_strstr(int pid, bool thread, const char *item, const char *needle)
+diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+index 9de1d123f4f5d..a08c02abde121 100755
+--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
++++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+@@ -496,8 +496,8 @@ dummy_reporter_test()
+
+ check_reporter_info dummy healthy 3 3 10 true
+
+- echo 8192> $DEBUGFS_DIR/health/binary_len
+- check_fail $? "Failed set dummy reporter binary len to 8192"
++ echo 8192 > $DEBUGFS_DIR/health/binary_len
++ check_err $? "Failed set dummy reporter binary len to 8192"
+
+ local dump=$(devlink health dump show $DL_HANDLE reporter dummy -j)
+ check_err $? "Failed show dump of dummy reporter"
+diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
+index a90f394f9aa90..d374878cc0ba9 100755
+--- a/tools/testing/selftests/efivarfs/efivarfs.sh
++++ b/tools/testing/selftests/efivarfs/efivarfs.sh
+@@ -87,6 +87,11 @@ test_create_read()
+ {
+ local file=$efivarfs_mount/$FUNCNAME-$test_guid
+ ./create-read $file
++ if [ $? -ne 0 ]; then
++ echo "create and read $file failed"
++ file_cleanup $file
++ exit 1
++ fi
+ file_cleanup $file
+ }
+
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+index 8d26d5505808b..3eea2abf68f9e 100644
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+@@ -38,11 +38,18 @@ cnt_trace() {
+
+ test_event_enabled() {
+ val=$1
++ check_times=10 # wait for 10 * SLEEP_TIME at most
+
+- e=`cat $EVENT_ENABLE`
+- if [ "$e" != $val ]; then
+- fail "Expected $val but found $e"
+- fi
++ while [ $check_times -ne 0 ]; do
++ e=`cat $EVENT_ENABLE`
++ if [ "$e" == $val ]; then
++ return 0
++ fi
++ sleep $SLEEP_TIME
++ check_times=$((check_times - 1))
++ done
++
++ fail "Expected $val but found $e"
+ }
+
+ run_enable_disable() {
+diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
+index b48e1833bc896..76645aaf2b58f 100755
+--- a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
++++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
+@@ -35,6 +35,8 @@ cleanup() {
+ for i in 1 2;do ip netns del nsrouter$i;done
+ }
+
++trap cleanup EXIT
++
+ ipv4() {
+ echo -n 192.168.$1.2
+ }
+@@ -146,11 +148,17 @@ ip netns exec nsclient1 nft -f - <<EOF
+ table inet filter {
+ counter unknown { }
+ counter related { }
++ counter redir4 { }
++ counter redir6 { }
+ chain input {
+ type filter hook input priority 0; policy accept;
+- meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
++ icmp type "redirect" ct state "related" counter name "redir4" accept
++ icmpv6 type "nd-redirect" ct state "related" counter name "redir6" accept
++
++ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+ meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
++
+ counter name "unknown" drop
+ }
+ }
+@@ -279,5 +287,29 @@ else
+ echo "ERROR: icmp error RELATED state test has failed"
+ fi
+
+-cleanup
++# add 'bad' route, expect icmp REDIRECT to be generated
++ip netns exec nsclient1 ip route add 192.168.1.42 via 192.168.1.1
++ip netns exec nsclient1 ip route add dead:1::42 via dead:1::1
++
++ip netns exec "nsclient1" ping -q -c 2 192.168.1.42 > /dev/null
++
++expect="packets 1 bytes 112"
++check_counter nsclient1 "redir4" "$expect"
++if [ $? -ne 0 ];then
++ ret=1
++fi
++
++ip netns exec "nsclient1" ping -c 1 dead:1::42 > /dev/null
++expect="packets 1 bytes 192"
++check_counter nsclient1 "redir6" "$expect"
++if [ $? -ne 0 ];then
++ ret=1
++fi
++
++if [ $ret -eq 0 ];then
++ echo "PASS: icmp redirects had RELATED state"
++else
++ echo "ERROR: icmp redirect RELATED state test has failed"
++fi
++
+ exit $ret
+diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
+index fbbdffdb2e5d2..f20d1c166d1e4 100644
+--- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
++++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
+@@ -24,6 +24,7 @@ static int check_cpu_dscr_default(char *file, unsigned long val)
+ rc = read(fd, buf, sizeof(buf));
+ if (rc == -1) {
+ perror("read() failed");
++ close(fd);
+ return 1;
+ }
+ close(fd);
+@@ -65,8 +66,10 @@ static int check_all_cpu_dscr_defaults(unsigned long val)
+ if (access(file, F_OK))
+ continue;
+
+- if (check_cpu_dscr_default(file, val))
++ if (check_cpu_dscr_default(file, val)) {
++ closedir(sysfs);
+ return 1;
++ }
+ }
+ closedir(sysfs);
+ return 0;
+diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c
+index e7ceabed7f51f..7d0aa22bdc12b 100644
+--- a/tools/testing/selftests/proc/proc-uptime-002.c
++++ b/tools/testing/selftests/proc/proc-uptime-002.c
+@@ -17,6 +17,7 @@
+ // while shifting across CPUs.
+ #undef NDEBUG
+ #include <assert.h>
++#include <errno.h>
+ #include <unistd.h>
+ #include <sys/syscall.h>
+ #include <stdlib.h>
+@@ -54,7 +55,7 @@ int main(void)
+ len += sizeof(unsigned long);
+ free(m);
+ m = malloc(len);
+- } while (sys_sched_getaffinity(0, len, m) == -EINVAL);
++ } while (sys_sched_getaffinity(0, len, m) == -1 && errno == EINVAL);
+
+ fd = open("/proc/uptime", O_RDONLY);
+ assert(fd >= 0);
diff --git a/system/easy-kernel/0120-XATTR_USER_PREFIX.patch b/system/easy-kernel/0120-XATTR_USER_PREFIX.patch
index 245dcc29f..fac3eed73 100644
--- a/system/easy-kernel/0120-XATTR_USER_PREFIX.patch
+++ b/system/easy-kernel/0120-XATTR_USER_PREFIX.patch
@@ -13,11 +13,10 @@ The namespace is added to any user with Extended Attribute support
enabled for tmpfs. Users who do not enable xattrs will not have
the XATTR_PAX flags preserved.
-diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
-index 1590c49..5eab462 100644
---- a/include/uapi/linux/xattr.h
-+++ b/include/uapi/linux/xattr.h
-@@ -73,5 +73,9 @@
+
+--- a/include/uapi/linux/xattr.h 2022-11-22 05:56:58.175733644 -0500
++++ b/include/uapi/linux/xattr.h 2022-11-22 06:04:26.394834989 -0500
+@@ -81,5 +81,9 @@
#define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
#define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
@@ -27,12 +26,12 @@ index 1590c49..5eab462 100644
+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
#endif /* _UAPI_LINUX_XATTR_H */
---- a/mm/shmem.c 2020-05-04 15:30:27.042035334 -0400
-+++ b/mm/shmem.c 2020-05-04 15:34:57.013881725 -0400
-@@ -3238,6 +3238,14 @@ static int shmem_xattr_handler_set(const
+--- a/mm/shmem.c 2022-11-22 05:57:29.011626215 -0500
++++ b/mm/shmem.c 2022-11-22 06:03:33.165939400 -0500
+@@ -3297,6 +3297,14 @@ static int shmem_xattr_handler_set(const
struct shmem_inode_info *info = SHMEM_I(inode);
+ int err;
- name = xattr_full_name(handler, name);
+
+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
@@ -41,10 +40,10 @@ index 1590c49..5eab462 100644
+ return -EINVAL;
+ }
+
- return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
- }
-
-@@ -3253,6 +3261,12 @@ static const struct xattr_handler shmem_
+ name = xattr_full_name(handler, name);
+ err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
+ if (!err) {
+@@ -3312,6 +3320,12 @@ static const struct xattr_handler shmem_
.set = shmem_xattr_handler_set,
};
@@ -54,10 +53,10 @@ index 1590c49..5eab462 100644
+ .set = shmem_xattr_handler_set,
+};
+
- static const struct xattr_handler *shmem_xattr_handlers[] = {
- #ifdef CONFIG_TMPFS_POSIX_ACL
- &posix_acl_access_xattr_handler,
-@@ -3260,6 +3274,7 @@ static const struct xattr_handler *shmem
+ static const struct xattr_handler shmem_trusted_xattr_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .get = shmem_xattr_handler_get,
+@@ -3325,6 +3339,7 @@ static const struct xattr_handler *shmem
#endif
&shmem_security_xattr_handler,
&shmem_trusted_xattr_handler,
diff --git a/system/easy-kernel/0122-link-security-restrictions.patch b/system/easy-kernel/0122-link-security-restrictions.patch
index f0ed144fb..e8c301579 100644
--- a/system/easy-kernel/0122-link-security-restrictions.patch
+++ b/system/easy-kernel/0122-link-security-restrictions.patch
@@ -1,20 +1,17 @@
-From: Ben Hutchings <ben@decadent.org.uk>
-Subject: fs: Enable link security restrictions by default
-Date: Fri, 02 Nov 2012 05:32:06 +0000
-Bug-Debian: https://bugs.debian.org/609455
-Forwarded: not-needed
-This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
-('VFS: don't do protected {sym,hard}links by default').
---- a/fs/namei.c 2018-09-28 07:56:07.770005006 -0400
-+++ b/fs/namei.c 2018-09-28 07:56:43.370349204 -0400
-@@ -885,8 +885,8 @@ static inline void put_link(struct namei
+--- a/fs/namei.c 2022-01-23 13:02:27.876558299 -0500
++++ b/fs/namei.c 2022-03-06 12:47:39.375719693 -0500
+@@ -1020,10 +1020,10 @@ static inline void put_link(struct namei
path_put(&last->link);
}
--int sysctl_protected_symlinks __read_mostly = 0;
--int sysctl_protected_hardlinks __read_mostly = 0;
-+int sysctl_protected_symlinks __read_mostly = 1;
-+int sysctl_protected_hardlinks __read_mostly = 1;
- int sysctl_protected_fifos __read_mostly;
- int sysctl_protected_regular __read_mostly;
+-static int sysctl_protected_symlinks __read_mostly;
+-static int sysctl_protected_hardlinks __read_mostly;
+-static int sysctl_protected_fifos __read_mostly;
+-static int sysctl_protected_regular __read_mostly;
++static int sysctl_protected_symlinks __read_mostly = 1;
++static int sysctl_protected_hardlinks __read_mostly = 1;
++int sysctl_protected_fifos __read_mostly = 1;
++int sysctl_protected_regular __read_mostly = 1;
+ #ifdef CONFIG_SYSCTL
+ static struct ctl_table namei_sysctls[] = {
diff --git a/system/easy-kernel/0200-x86-compile.patch b/system/easy-kernel/0200-x86-compile.patch
index 1d2d7d4ad..866ed7dd7 100644
--- a/system/easy-kernel/0200-x86-compile.patch
+++ b/system/easy-kernel/0200-x86-compile.patch
@@ -4,8 +4,8 @@
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
-- select HAVE_STACK_VALIDATION if X86_64
-+# select HAVE_STACK_VALIDATION if X86_64
+- select HAVE_STACK_VALIDATION if HAVE_OBJTOOL
++# select HAVE_STACK_VALIDATION if HAVE_OBJTOOL
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION
select HAVE_PREEMPT_DYNAMIC
diff --git a/system/easy-kernel/0502-gcc9-kcflags.patch b/system/easy-kernel/0502-gcc9-kcflags.patch
index becfda363..0841340b0 100644
--- a/system/easy-kernel/0502-gcc9-kcflags.patch
+++ b/system/easy-kernel/0502-gcc9-kcflags.patch
@@ -1,10 +1,7 @@
-From d31d2b0747ab55e65c2366d51149a0ec9896155e Mon Sep 17 00:00:00 2001
-From: graysky <graysky@archlinux.us>
-Date: Tue, 14 Sep 2021 15:35:34 -0400
-Subject: [PATCH] more uarches for kernel 5.15+
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
+From a0825feea3f100656d58446885b5f190284fd219
+From: graysky <therealgraysky@proton.me>
+Date: Fri, 4 Nov 2022 15:34:36 -0400
+Subject: [PATCH] more uarches for kernel 5.17+
FEATURES
This patch adds additional CPU options to the Linux kernel accessible under:
@@ -36,6 +33,7 @@ CPU-specific microarchitectures include:
• AMD Family 17h (Zen)
• AMD Family 17h (Zen 2)
• AMD Family 19h (Zen 3)†
+• AMD Family 19h (Zen 4)§
• Intel Silvermont low-power processors
• Intel Goldmont low-power processors (Apollo Lake and Denverton)
• Intel Goldmont Plus low-power processors (Gemini Lake)
@@ -55,11 +53,14 @@ CPU-specific microarchitectures include:
• Intel 3rd Gen 10nm++ Xeon (Sapphire Rapids)‡
• Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)‡
• Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡
+• Intel 13th Gen i3/i5/i7/i9-family (Raptor Lake)§
+• Intel 14th Gen i3/i5/i7/i9-family (Meteor Lake)§
Notes: If not otherwise noted, gcc >=9.1 is required for support.
*Requires gcc >=10.1 or clang >=10.0
†Required gcc >=10.3 or clang >=12.0
‡Required gcc >=11.1 or clang >=12.0
+ §Required gcc >=13.0 or clang >=15.0.5
It also offers to compile passing the 'native' option which, "selects the CPU
to generate code for at compilation time by determining the processor type of
@@ -86,7 +87,7 @@ See the following experimental evidence supporting this statement:
https://github.com/graysky2/kernel_gcc_patch
REQUIREMENTS
-linux version >=5.15
+linux version 5.17+
gcc version >=9.0 or clang version >=9.0
ACKNOWLEDGMENTS
@@ -99,21 +100,14 @@ REFERENCES
4. https://github.com/graysky2/kernel_gcc_patch/issues/15
5. http://www.linuxforge.net/docs/linux/linux-gcc.php
-Signed-off-by: graysky <graysky@archlinux.us>
---
-From 1bfa1ef4e3a93e540a64cd1020863019dff3046e Mon Sep 17 00:00:00 2001
-From: graysky <graysky@archlinux.us>
-Date: Sun, 14 Nov 2021 16:08:29 -0500
-Subject: [PATCH] iiii
-
----
- arch/x86/Kconfig.cpu | 332 ++++++++++++++++++++++++++++++--
- arch/x86/Makefile | 40 +++-
- arch/x86/include/asm/vermagic.h | 66 +++++++
- 3 files changed, 424 insertions(+), 14 deletions(-)
+ arch/x86/Kconfig.cpu | 416 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile | 43 +++-
+ arch/x86/include/asm/vermagic.h | 72 ++++++
+ 3 files changed, 514 insertions(+), 17 deletions(-)
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index eefc434351db..331f7631339a 100644
+index 542377cd419d..08d887d1220d 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -157,7 +157,7 @@ config MPENTIUM4
@@ -134,7 +128,7 @@ index eefc434351db..331f7631339a 100644
depends on X86_32
help
Select this for an AMD Athlon K7-family processor. Enables use of
-@@ -173,12 +173,98 @@ config MK7
+@@ -173,12 +173,106 @@ config MK7
flags to GCC.
config MK8
@@ -231,10 +225,18 @@ index eefc434351db..331f7631339a 100644
+
+ Enables -march=znver3
+
++config MZEN4
++ bool "AMD Zen 4"
++ depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500)
++ help
++ Select this for AMD Family 19h Zen 4 processors.
++
++ Enables -march=znver4
++
config MCRUSOE
bool "Crusoe"
depends on X86_32
-@@ -270,7 +356,7 @@ config MPSC
+@@ -270,7 +364,7 @@ config MPSC
in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
config MCORE2
@@ -243,7 +245,7 @@ index eefc434351db..331f7631339a 100644
help
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,6 +364,8 @@ config MCORE2
+@@ -278,6 +372,8 @@ config MCORE2
family in /proc/cpuinfo. Newer ones have 6 and older ones 15
(not a typo)
@@ -252,7 +254,7 @@ index eefc434351db..331f7631339a 100644
config MATOM
bool "Intel Atom"
help
-@@ -287,6 +375,182 @@ config MATOM
+@@ -287,6 +383,202 @@ config MATOM
accordingly optimized code. Use a recent GCC with specific Atom
support in order to fully benefit from selecting this option.
@@ -432,10 +434,30 @@ index eefc434351db..331f7631339a 100644
+
+ Enables -march=alderlake
+
++config MRAPTORLAKE
++ bool "Intel Raptor Lake"
++ depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500)
++ select X86_P6_NOP
++ help
++
++ Select this for thirteenth-generation processors in the Raptor Lake family.
++
++ Enables -march=raptorlake
++
++config MMETEORLAKE
++ bool "Intel Meteor Lake"
++ depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500)
++ select X86_P6_NOP
++ help
++
++ Select this for fourteenth-generation processors in the Meteor Lake family.
++
++ Enables -march=meteorlake
++
config GENERIC_CPU
bool "Generic-x86-64"
depends on X86_64
-@@ -294,6 +558,50 @@ config GENERIC_CPU
+@@ -294,6 +586,50 @@ config GENERIC_CPU
Generic x86-64 CPU.
Run equally well on all x86-64 CPUs.
@@ -486,66 +508,131 @@ index eefc434351db..331f7631339a 100644
endchoice
config X86_GENERIC
-@@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,9 +654,17 @@ config X86_INTERNODE_CACHE_SHIFT
config X86_L1_CACHE_SHIFT
int
default "7" if MPENTIUM4 || MPSC
- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+ default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4
++ default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 \
++ || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER \
++ || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT \
++ || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \
++ || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
++ || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE \
++ || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 \
++ || GENERIC_CPU4
default "4" if MELAN || M486SX || M486 || MGEODEGX1
- default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+- default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
++ default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII \
++ || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-@@ -336,11 +644,11 @@ config X86_ALIGNMENT_16
+ config X86_F00F_BUG
+ def_bool y
+@@ -332,15 +676,27 @@ config X86_INVD_BUG
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC \
++ || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
config X86_INTEL_USERCOPY
def_bool y
- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL
++ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC \
++ || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
++ || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
++ || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
++ || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL
config X86_USE_PPRO_CHECKSUM
def_bool y
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
-
- config X86_USE_3DNOW
- def_bool y
-@@ -360,26 +668,26 @@ config X86_USE_3DNOW
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM \
++ || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX \
++ || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER \
++ || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM \
++ || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE \
++ || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE \
++ || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
++ || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD
+
+ #
+ # P6_NOPs are a relatively minor optimization that require a family >=
+@@ -356,32 +712,62 @@ config X86_USE_PPRO_CHECKSUM
config X86_P6_NOP
def_bool y
depends on X86_64
- depends on (MCORE2 || MPENTIUM4 || MPSC)
-+ depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL)
++ depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
++ || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE \
++ || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE \
++ || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL)
config X86_TSC
def_bool y
- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
++ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM \
++ || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 \
++ || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER \
++ || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM \
++ || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL \
++ || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
++ || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL \
++ || MNATIVE_AMD) || X86_64
config X86_CMPXCHG64
def_bool y
- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
-+ depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
++ depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
++ || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 \
++ || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN \
++ || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS \
++ || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE \
++ || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
++ || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD
# this should be set for all -march=.. options where the compiler
# generates cmov.
config X86_CMOV
def_bool y
- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+ depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
++ depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
++ || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 \
++ || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR \
++ || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
++ || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
++ || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
++ || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD)
config X86_MINIMUM_CPU_FAMILY
int
default "64" if X86_64
- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
-+ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
++ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
++ || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 || MK8SSE3 \
++ || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER \
++ || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT \
++ || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \
++ || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
++ || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MRAPTORLAKE \
++ || MNATIVE_INTEL || MNATIVE_AMD)
default "5" if X86_32 && X86_CMPXCHG64
default "4"
+ config X86_DEBUGCTLMSR
+ def_bool y
+- depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486SX || M486) && !UML
++ depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 \
++ || M486SX || M486) && !UML
+
+ config IA32_FEAT_CTL
+ def_bool y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 42243869216d..ab1ad6959b96 100644
+index bafbd905e6e7..7fae52788560 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
-@@ -119,8 +119,44 @@ else
+@@ -150,8 +150,47 @@ else
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
cflags-$(CONFIG_MK8) += -march=k8
cflags-$(CONFIG_MPSC) += -march=nocona
@@ -563,6 +650,7 @@ index 42243869216d..ab1ad6959b96 100644
+ cflags-$(CONFIG_MZEN) += -march=znver1
+ cflags-$(CONFIG_MZEN2) += -march=znver2
+ cflags-$(CONFIG_MZEN3) += -march=znver3
++ cflags-$(CONFIG_MZEN4) += -march=znver4
+ cflags-$(CONFIG_MNATIVE_INTEL) += -march=native
+ cflags-$(CONFIG_MNATIVE_AMD) += -march=native
+ cflags-$(CONFIG_MATOM) += -march=bonnell
@@ -586,6 +674,8 @@ index 42243869216d..ab1ad6959b96 100644
+ cflags-$(CONFIG_MSAPPHIRERAPIDS) += -march=sapphirerapids
+ cflags-$(CONFIG_MROCKETLAKE) += -march=rocketlake
+ cflags-$(CONFIG_MALDERLAKE) += -march=alderlake
++ cflags-$(CONFIG_MRAPTORLAKE) += -march=raptorlake
++ cflags-$(CONFIG_MMETEORLAKE) += -march=meteorlake
+ cflags-$(CONFIG_GENERIC_CPU2) += -march=x86-64-v2
+ cflags-$(CONFIG_GENERIC_CPU3) += -march=x86-64-v3
+ cflags-$(CONFIG_GENERIC_CPU4) += -march=x86-64-v4
@@ -593,10 +683,10 @@ index 42243869216d..ab1ad6959b96 100644
KBUILD_CFLAGS += $(cflags-y)
diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..4e6a08d4c7e5 100644
+index 75884d2cdec3..18021e8c0c28 100644
--- a/arch/x86/include/asm/vermagic.h
+++ b/arch/x86/include/asm/vermagic.h
-@@ -17,6 +17,48 @@
+@@ -17,6 +17,52 @@
#define MODULE_PROC_FAMILY "586MMX "
#elif defined CONFIG_MCORE2
#define MODULE_PROC_FAMILY "CORE2 "
@@ -642,10 +732,14 @@ index 75884d2cdec3..4e6a08d4c7e5 100644
+#define MODULE_PROC_FAMILY "ROCKETLAKE "
+#elif defined CONFIG_MALDERLAKE
+#define MODULE_PROC_FAMILY "ALDERLAKE "
++#elif defined CONFIG_MRAPTORLAKE
++#define MODULE_PROC_FAMILY "RAPTORLAKE "
++#elif defined CONFIG_MMETEORLAKE
++#define MODULE_PROC_FAMILY "METEORLAKE "
#elif defined CONFIG_MATOM
#define MODULE_PROC_FAMILY "ATOM "
#elif defined CONFIG_M686
-@@ -35,6 +77,30 @@
+@@ -35,6 +81,32 @@
#define MODULE_PROC_FAMILY "K7 "
#elif defined CONFIG_MK8
#define MODULE_PROC_FAMILY "K8 "
@@ -673,8 +767,10 @@ index 75884d2cdec3..4e6a08d4c7e5 100644
+#define MODULE_PROC_FAMILY "ZEN2 "
+#elif defined CONFIG_MZEN3
+#define MODULE_PROC_FAMILY "ZEN3 "
++#elif defined CONFIG_MZEN4
++#define MODULE_PROC_FAMILY "ZEN4 "
#elif defined CONFIG_MELAN
#define MODULE_PROC_FAMILY "ELAN "
#elif defined CONFIG_MCRUSOE
--
-2.33.1
+2.38.1
diff --git a/system/easy-kernel/1000-version.patch b/system/easy-kernel/1000-version.patch
index 0e4210b60..03f473733 100644
--- a/system/easy-kernel/1000-version.patch
+++ b/system/easy-kernel/1000-version.patch
@@ -2,12 +2,12 @@ diff -Naur linux-5.15/Makefile linux-5.15-branded/Makefile
--- linux-5.15/Makefile 2022-11-20 13:01:48.939295185 +1100
+++ linux-5.15-branded/Makefile 2022-11-20 13:02:53.729292859 +1100
@@ -2,8 +2,8 @@
- VERSION = 5
- PATCHLEVEL = 15
- SUBLEVEL = 76
+ VERSION = 6
+ PATCHLEVEL = 1
+ SUBLEVEL = 3
-EXTRAVERSION =
--NAME = Trick or Treat
-+EXTRAVERSION = -mc3
+-NAME = Hurr durr I'ma ninja sloth
++EXTRAVERSION = -mc0
+NAME = Ponder the Icosahedron
# *DOCUMENTATION*
diff --git a/system/easy-kernel/APKBUILD b/system/easy-kernel/APKBUILD
index a3fe0aad0..f184f9ab5 100644
--- a/system/easy-kernel/APKBUILD
+++ b/system/easy-kernel/APKBUILD
@@ -2,9 +2,9 @@
# Maintainer: Adelie Platform Group <adelie-devel@lists.adelielinux.org>
# KEEP THIS IN SYNC with the other easy-kernel packages.
_kflavour=""
-_patchver=3 # must match 1000-version.patch
+_patchver=0 # must match 1000-version.patch
_pkgname=easy-kernel$_kflavour
-pkgver=5.15.76
+pkgver=6.1.3
pkgrel=0
pkgname=$_pkgname-$pkgver-mc$_patchver
pkgdesc="The Linux kernel, packaged for your convenience"
@@ -32,13 +32,12 @@ source="https://cdn.kernel.org/pub/linux/kernel/v${_pkgmajver}.x/linux-${_pkgmin
config-x86_64
kernel.h
- 0100-linux-5.15.76.patch
+ 0100-linux-6.1.3.patch
0120-XATTR_USER_PREFIX.patch
0122-link-security-restrictions.patch
0124-bluetooth-keysize-check.patch
0126-sign-file-libressl.patch
0200-x86-compile.patch
- 0255-ultra-ksm.patch
0260-reduce-swappiness.patch
0300-tmp513-regression-fix.patch
0500-print-fw-info.patch
@@ -153,8 +152,8 @@ src() {
mv "$srcdir"/linux-src "$subpkgdir"/usr/src/linux-$pkgver-mc$_patchver$_kflavour
}
-sha512sums="d25ad40b5bcd6a4c6042fd0fd84e196e7a58024734c3e9a484fd0d5d54a0c1d87db8a3c784eff55e43b6f021709dc685eb0efa18d2aec327e4f88a79f405705a linux-5.15.tar.xz
-025d721c9ef36ca62b18ff37d9a4ae34aeb420cbe5fb1d9a32081e0cac2693e0cd9cf8b87175920166bfa07011e66a7db802acd563d2153ebb1c21e5e4e99e41 config-aarch64
+sha512sums="6ed2a73c2699d0810e54753715635736fc370288ad5ce95c594f2379959b0e418665cd71bc512a0273fe226fe90074d8b10d14c209080a6466498417a4fdda68 linux-6.1.tar.xz
+e1b496c86bab54401fbb98e26bdfcd7eb482e35a2d4c85fc9da964dbe7f3f54fb72d76b46f17af920f5f21f66cc5d9b32f794a32ba15fabcbfd5b74d3afad0d8 config-aarch64
9f5279d20fc6eaad78ab27b7fb86553e310369c8a68a2ff60c7cd9895febd3002cae748ad3a8b4fddbb62c6e829104138fc2bbca939e1c88c0bfcf7aa42809bf config-armv7
d87b8052b5180e5a2ebfe248fae9917afad3aec4c156836106238368e7f990f9ac5e5f6fa4251cd240c3726bfb8bdab91467902d1ddf65305049a2e74ce2ba02 config-m68k
4f585e36cc0f4a8ec47a131f15fc25c2e36e42a2ec00ddbb8b2460ba136c63d70bd1d435b418ac23b5df75a51cb3d05a5e3e174c85aad5261f0d0d73f6353b30 config-pmmx
@@ -163,18 +162,17 @@ c6b7f7cb0c6a155465b2db849187a95ff1a31dd157699019a3f3f44e522c62c6e6ec29532b1b1b0c
e0c859b385140a390ef696a6c26ff28843282d0a0d57cabb50b0d8da66bbb17c37ee5733010d57bd58c6eac5f4e1eb4accf33216d8638115d376f0a431803f78 config-sparc64
d02dad2531c13ccb8dc9b0bdb31943ba12b374f559b6c0ffaac7cf7d3b809e35b83df705758ce1ea223611ffe6f399295a06286e6ad9033ae8b861ad4933f049 config-x86_64
1ca1bc094a8106b33176ab6cfb55c9514e7d69f1ed30c580eee419a3c422a2e6625d71c4581702e911f36c4bbac175010f46b7123bb81fda3303d4decdc57287 kernel.h
-9e2e5c0f45c532f3782c575d2d1b1dd34c94ecb21ae19798284c118e56dedfd08b3956892639e54550da94e808ba4e3c73b8e665e44993d6c9d2ec2fdf193946 0100-linux-5.15.76.patch
-3ed100909f9aed72836a3c712e45e0116cd3c4331961a76a27b867a7098d0df9458387b656c9ea01385c3c37585436e48168ac35666b0e46dca7da05e5e38a61 0120-XATTR_USER_PREFIX.patch
-c97a3799a2d5e4da9c9dfe129756da629fba8183479b02ca82f9b6d9993f17a165a96bd35ac50eb25fb293785b9b529a95165b1a2eb79c05134bee8ccf22a5d3 0122-link-security-restrictions.patch
+09685671e002ccd3787d6b1690f7066641d666c10d28d55fdab0eb1a52bd4607074d7c7397f727a1a5dde4cd431906d854fc0ffbef3b8d02fa419abcd99c54ab 0100-linux-6.1.3.patch
+489dd3e2f2991526043b91614cc2ebf626f809e0981cbb6569d7b201586be019fbc3e8587925de7a22bac5511f4d472a6bc216878eb9f0c20baa9e7fca2f2073 0120-XATTR_USER_PREFIX.patch
+d333494e1a261175ab11d84ace49ad3dcb010614e61d0bfe1d39d7c330d1c0e0311699423fbec5841c9c6ff514f4f5b1e63072f0289450ac2883f1d3a80c2961 0122-link-security-restrictions.patch
dc47b18749d95a456f8bc47fd6a0618c286b646b38466c3d950dfbeb25adf3fc1a794e95552e4da1abb58e49f0bd841f7222e71c4d04cb0264ca23476ca9caef 0124-bluetooth-keysize-check.patch
79eaf814d76402a445efc961666a7c7c74207e552b0cb32d93d5cb828da580f7dbe93509dc9f53321c7844663205a8dce4e518ba047e4c57fc55f5c3498088ec 0126-sign-file-libressl.patch
-16b8b8333fe89a68bc7f9a49b1bae263ab830692c12981640ac3dd9a0fb687f90b53783be05d47e5b38759ace4d99e82b59edd537a85a7ee27e21e65bbfa40a6 0200-x86-compile.patch
-ed92b74c9ea406eb65a304b3c5b93de69167569c70d4d5027ae0a83c59159596ce88bd433573832e36bc1d7fb66b31d20921aa2bc583c7fbd9bf5563075c4c41 0255-ultra-ksm.patch
+8ee913a4187740457a2de64708edf757666c6a8a7f8ef30aaa8eee22290a30fa5d636d10de1fad334a30b4acdb733ffe556fb046d5d1769bde3b4e85906189d5 0200-x86-compile.patch
5f74e6a72876d3cf3b3188a43b999b981b6ea0ca401ad72b3c7d5cc65bf505f50e7ee17d435ec95b7a012dc92e6540aea1bdb501f48690c242705c47d2403513 0260-reduce-swappiness.patch
4e637935c2f37cc18f347293e3c94b18f90e2caccca726304a95c4891257a5b2bb3093aee7a97571038b29c0c987cc60a9a80aefd0d4c9a063b33d102f03579e 0300-tmp513-regression-fix.patch
8ddac562bd16fd96aea4b5165cf4a93eaee49011805c5d648913cea6865a1415c61aed5a34bfc319e4cd394dbaebad1360a6e07dab6e02b6b425a0e414107984 0500-print-fw-info.patch
-f0e532539e93d19fc65b417b4a0663e3757823340b968f63bd3a2665f99524feebb843ecf88ccf6909f93a8e7e9290721677c8f43bc3a2a37d99a51c1281a469 0502-gcc9-kcflags.patch
-8e5b1357056c31a865bf00ec5999e8df6836674ebe99a6a6f3cc460d91b45075a9436e891948f04f8b3732c52b6f9272ff4c4bb02a5b4b0bc92fc77ea53a3081 1000-version.patch
+acadf608e1546761388b41a632eff08b57ab4aba6daf62e3d7f48a07515e3cd7be7ea66032c9785b06c86b31e018c70df6b0d6dfc77cfd13bd7f64bd020f9218 0502-gcc9-kcflags.patch
+44eea23f3911764bead8ec1e038acf237fa84c75892391bf96197c866a0b2f1dd2d648fce5b206788937e47885353f3f33c7bdaae0b3ab02ab14fc44b4a0e604 1000-version.patch
03a73db9eda84a52315499cb511f730946939d2de1b3aa52c60f9bd3a364377a65ddf2b62f505689a84d3e2f0fc7da5ca90429629d93d9909360ee0c3c599bbe no-require-gnu-tar.patch
aadf8a3cc46a08e3a396ebd45656aee235103db7a2155cc6980df20b750151a9938b8b73c9319c6cd1b5f8aba6ce707f857a47dabf69df8d91dd93d440cffcb5 no-require-lilo.patch
7bb07eb22002cc48caf0cd55d17ce4097aa583e0ca4048c11c92e1519761b2ae982ffe98311543d4b0dfc991c8bc411b2e1c7be9488b6c6f19ffaa08e69e2f47 no-autoload-fb.conf
diff --git a/system/easy-kernel/config-aarch64 b/system/easy-kernel/config-aarch64
index b13f8d7b7..85b19e38a 100644
--- a/system/easy-kernel/config-aarch64
+++ b/system/easy-kernel/config-aarch64
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/arm64 5.15.76-mc3 Kernel Configuration
+# Linux/arm64 6.1.3-mc3 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (Adelie 8.5.0) 8.5.0"
CONFIG_CC_IS_GCC=y
@@ -13,9 +13,9 @@ CONFIG_LD_VERSION=23200
CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y
-CONFIG_CC_HAS_ASM_GOTO=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
+CONFIG_PAHOLE_VERSION=0
CONFIG_IRQ_WORK=y
CONFIG_BUILDTIME_TABLE_SORT=y
CONFIG_THREAD_INFO_IN_TASK=y
@@ -31,9 +31,9 @@ CONFIG_LOCALVERSION="-easy"
CONFIG_BUILD_SALT=""
CONFIG_DEFAULT_INIT=""
CONFIG_DEFAULT_HOSTNAME="adelie"
-CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_SYSVIPC_COMPAT=y
CONFIG_POSIX_MQUEUE=y
CONFIG_POSIX_MQUEUE_SYSCTL=y
# CONFIG_WATCH_QUEUE is not set
@@ -59,7 +59,6 @@ CONFIG_GENERIC_IRQ_IPI=y
CONFIG_GENERIC_MSI_IRQ=y
CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
CONFIG_IRQ_MSI_IOMMU=y
-CONFIG_HANDLE_DOMAIN_IRQ=y
CONFIG_IRQ_FORCED_THREADING=y
CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_IRQ_DEBUGFS is not set
@@ -69,6 +68,10 @@ CONFIG_GENERIC_TIME_VSYSCALL=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_ARCH_HAS_TICK_BROADCAST=y
CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y
+CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CONTEXT_TRACKING_IDLE=y
#
# Timers subsystem
@@ -97,9 +100,11 @@ CONFIG_BPF_UNPRIV_DEFAULT_OFF=y
# CONFIG_BPF_PRELOAD is not set
# end of BPF subsystem
+CONFIG_PREEMPT_VOLUNTARY_BUILD=y
# CONFIG_PREEMPT_NONE is not set
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
+# CONFIG_PREEMPT_DYNAMIC is not set
CONFIG_SCHED_CORE=y
#
@@ -152,13 +157,15 @@ CONFIG_GENERIC_SCHED_CLOCK=y
CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_CC_HAS_INT128=y
+CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
+CONFIG_GCC12_NO_ARRAY_BOUNDS=y
CONFIG_ARCH_SUPPORTS_INT128=y
CONFIG_NUMA_BALANCING=y
CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
+# CONFIG_CGROUP_FAVOR_DYNMODS is not set
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_MEMCG_KMEM=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_WRITEBACK=y
@@ -189,7 +196,7 @@ CONFIG_NET_NS=y
# CONFIG_CHECKPOINT_RESTORE is not set
CONFIG_SCHED_AUTOGROUP=y
# CONFIG_SYSFS_DEPRECATED is not set
-# CONFIG_RELAY is not set
+CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
@@ -200,6 +207,8 @@ CONFIG_RD_LZO=y
CONFIG_RD_LZ4=y
CONFIG_RD_ZSTD=y
CONFIG_BOOT_CONFIG=y
+# CONFIG_BOOT_CONFIG_EMBED is not set
+CONFIG_INITRAMFS_PRESERVE_MTIME=y
CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_LD_ORPHAN_WARN=y
@@ -219,7 +228,6 @@ CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_FUTEX_PI=y
-CONFIG_HAVE_FUTEX_CMPXCHG=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
@@ -228,18 +236,17 @@ CONFIG_SHMEM=y
CONFIG_AIO=y
CONFIG_IO_URING=y
CONFIG_ADVISE_SYSCALLS=y
-CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y
CONFIG_MEMBARRIER=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
CONFIG_KALLSYMS_BASE_RELATIVE=y
-CONFIG_USERFAULTFD=y
CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
CONFIG_KCMP=y
CONFIG_RSEQ=y
# CONFIG_DEBUG_RSEQ is not set
# CONFIG_EMBEDDED is not set
CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_GUEST_PERF_EVENTS=y
# CONFIG_PC104 is not set
#
@@ -249,22 +256,12 @@ CONFIG_PERF_EVENTS=y
# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
# end of Kernel Performance Events And Counters
-CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_SLUB_DEBUG is not set
-# CONFIG_COMPAT_BRK is not set
-# CONFIG_SLAB is not set
-CONFIG_SLUB=y
-# CONFIG_SLOB is not set
-CONFIG_SLAB_MERGE_DEFAULT=y
-CONFIG_SLAB_FREELIST_RANDOM=y
-CONFIG_SLAB_FREELIST_HARDENED=y
-# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set
-CONFIG_SLUB_CPU_PARTIAL=y
CONFIG_SYSTEM_DATA_VERIFICATION=y
CONFIG_PROFILING=y
# end of General setup
CONFIG_ARM64=y
+CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS=y
CONFIG_64BIT=y
CONFIG_MMU=y
CONFIG_ARM64_PAGE_SHIFT=12
@@ -297,32 +294,36 @@ CONFIG_ARCH_PROC_KCORE_TEXT=y
CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_ALPINE=y
CONFIG_ARCH_APPLE=y
+CONFIG_ARCH_BCM=y
CONFIG_ARCH_BCM2835=y
-# CONFIG_ARCH_BCM4908 is not set
# CONFIG_ARCH_BCM_IPROC is not set
+# CONFIG_ARCH_BCMBCA is not set
+# CONFIG_ARCH_BRCMSTB is not set
# CONFIG_ARCH_BERLIN is not set
# CONFIG_ARCH_BITMAIN is not set
-# CONFIG_ARCH_BRCMSTB is not set
CONFIG_ARCH_EXYNOS=y
# CONFIG_ARCH_SPARX5 is not set
# CONFIG_ARCH_K3 is not set
-CONFIG_ARCH_LAYERSCAPE=y
# CONFIG_ARCH_LG1K is not set
CONFIG_ARCH_HISI=y
# CONFIG_ARCH_KEEMBAY is not set
CONFIG_ARCH_MEDIATEK=y
CONFIG_ARCH_MESON=y
CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_NXP=y
+CONFIG_ARCH_LAYERSCAPE=y
CONFIG_ARCH_MXC=y
-# CONFIG_ARCH_QCOM is not set
+# CONFIG_ARCH_S32 is not set
+# CONFIG_ARCH_NPCM is not set
+CONFIG_ARCH_QCOM=y
CONFIG_ARCH_REALTEK=y
# CONFIG_ARCH_RENESAS is not set
CONFIG_ARCH_ROCKCHIP=y
-# CONFIG_ARCH_S32 is not set
# CONFIG_ARCH_SEATTLE is not set
# CONFIG_ARCH_INTEL_SOCFPGA is not set
CONFIG_ARCH_SYNQUACER=y
CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_TESLA_FSD=y
# CONFIG_ARCH_SPRD is not set
CONFIG_ARCH_THUNDER=y
CONFIG_ARCH_THUNDER2=y
@@ -363,6 +364,12 @@ CONFIG_ARM64_ERRATUM_1286807=y
CONFIG_ARM64_ERRATUM_1463225=y
CONFIG_ARM64_ERRATUM_1542419=y
CONFIG_ARM64_ERRATUM_1508412=y
+CONFIG_ARM64_ERRATUM_2051678=y
+CONFIG_ARM64_ERRATUM_2077057=y
+CONFIG_ARM64_ERRATUM_2658417=y
+CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y
+CONFIG_ARM64_ERRATUM_2054223=y
+CONFIG_ARM64_ERRATUM_2067961=y
CONFIG_ARM64_ERRATUM_2441009=y
CONFIG_ARM64_ERRATUM_2457168=y
CONFIG_CAVIUM_ERRATUM_22375=y
@@ -392,14 +399,12 @@ CONFIG_ARM64_PA_BITS=48
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SCHED_MC=y
+# CONFIG_SCHED_CLUSTER is not set
CONFIG_SCHED_SMT=y
CONFIG_NR_CPUS=256
CONFIG_HOTPLUG_CPU=y
CONFIG_NUMA=y
CONFIG_NODES_SHIFT=2
-CONFIG_USE_PERCPU_NUMA_NODE_ID=y
-CONFIG_HAVE_SETUP_PER_CPU_AREA=y
-CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
CONFIG_HZ_100=y
# CONFIG_HZ_250 is not set
# CONFIG_HZ_300 is not set
@@ -417,7 +422,7 @@ CONFIG_KEXEC_FILE=y
CONFIG_TRANS_TABLE=y
CONFIG_XEN_DOM0=y
CONFIG_XEN=y
-CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_ARCH_FORCE_MAX_ORDER=11
CONFIG_UNMAP_KERNEL_AT_EL0=y
CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y
CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
@@ -425,6 +430,7 @@ CONFIG_ARM64_SW_TTBR0_PAN=y
CONFIG_ARM64_TAGGED_ADDR_ABI=y
CONFIG_COMPAT=y
CONFIG_KUSER_HELPERS=y
+# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set
CONFIG_ARMV8_DEPRECATED=y
# CONFIG_SWP_EMULATION is not set
CONFIG_CP15_BARRIER_EMULATION=y
@@ -444,6 +450,8 @@ CONFIG_ARM64_USE_LSE_ATOMICS=y
#
# ARMv8.2 architectural features
#
+CONFIG_AS_HAS_ARMV8_2=y
+CONFIG_AS_HAS_SHA3=y
# CONFIG_ARM64_PMEM is not set
CONFIG_ARM64_RAS_EXTN=y
CONFIG_ARM64_CNP=y
@@ -471,7 +479,6 @@ CONFIG_ARM64_TLB_RANGE=y
CONFIG_AS_HAS_ARMV8_5=y
CONFIG_ARM64_BTI=y
CONFIG_ARM64_E0PD=y
-CONFIG_ARCH_RANDOM=y
# end of ARMv8.5 architectural features
#
@@ -486,6 +493,7 @@ CONFIG_ARM64_MODULE_PLTS=y
CONFIG_RELOCATABLE=y
CONFIG_RANDOMIZE_BASE=y
CONFIG_RANDOMIZE_MODULE_REGION_FULL=y
+CONFIG_ARCH_NR_GPIO=2048
# end of Kernel Features
#
@@ -498,8 +506,6 @@ CONFIG_EFI=y
CONFIG_DMI=y
# end of Boot options
-CONFIG_SYSVIPC_COMPAT=y
-
#
# Power management options
#
@@ -513,6 +519,7 @@ CONFIG_PM_STD_PARTITION=""
CONFIG_PM_SLEEP=y
CONFIG_PM_SLEEP_SMP=y
# CONFIG_PM_AUTOSLEEP is not set
+# CONFIG_PM_USERSPACE_AUTOSLEEP is not set
# CONFIG_PM_WAKELOCKS is not set
CONFIG_PM=y
CONFIG_PM_DEBUG=y
@@ -544,11 +551,11 @@ CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y
CONFIG_CPU_IDLE_GOV_MENU=y
CONFIG_CPU_IDLE_GOV_TEO=y
CONFIG_DT_IDLE_STATES=y
+CONFIG_DT_IDLE_GENPD=y
#
# ARM CPU Idle Drivers
#
-CONFIG_ARM_CPUIDLE=y
CONFIG_ARM_PSCI_CPUIDLE=y
CONFIG_ARM_PSCI_CPUIDLE_DOMAIN=y
# end of ARM CPU Idle Drivers
@@ -586,6 +593,7 @@ CONFIG_ARM_SCPI_CPUFREQ=y
CONFIG_ARM_IMX_CPUFREQ_DT=m
CONFIG_ARM_MEDIATEK_CPUFREQ=m
CONFIG_ARM_MEDIATEK_CPUFREQ_HW=m
+# CONFIG_ARM_QCOM_CPUFREQ_HW is not set
CONFIG_ARM_SCMI_CPUFREQ=y
CONFIG_ARM_TEGRA20_CPUFREQ=m
CONFIG_ARM_TEGRA124_CPUFREQ=y
@@ -604,6 +612,7 @@ CONFIG_ACPI_SPCR_TABLE=y
CONFIG_ACPI_AC=y
CONFIG_ACPI_BATTERY=y
CONFIG_ACPI_BUTTON=y
+CONFIG_ACPI_VIDEO=m
CONFIG_ACPI_FAN=y
# CONFIG_ACPI_TAD is not set
CONFIG_ACPI_DOCK=y
@@ -627,13 +636,14 @@ CONFIG_ACPI_NUMA=y
CONFIG_HAVE_ACPI_APEI=y
# CONFIG_ACPI_APEI is not set
# CONFIG_ACPI_CONFIGFS is not set
+# CONFIG_ACPI_PFRUT is not set
CONFIG_ACPI_IORT=y
CONFIG_ACPI_GTDT=y
CONFIG_ACPI_PPTT=y
# CONFIG_PMIC_OPREGION is not set
+CONFIG_ACPI_PRMT=y
CONFIG_IRQ_BYPASS_MANAGER=y
-CONFIG_VIRTUALIZATION=y
-CONFIG_KVM=y
+CONFIG_HAVE_KVM=y
CONFIG_HAVE_KVM_IRQCHIP=y
CONFIG_HAVE_KVM_IRQFD=y
CONFIG_HAVE_KVM_IRQ_ROUTING=y
@@ -647,27 +657,9 @@ CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
CONFIG_HAVE_KVM_IRQ_BYPASS=y
CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y
CONFIG_KVM_XFER_TO_GUEST_WORK=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
# CONFIG_NVHE_EL2_DEBUG is not set
-CONFIG_ARM64_CRYPTO=y
-CONFIG_CRYPTO_SHA256_ARM64=m
-CONFIG_CRYPTO_SHA512_ARM64=m
-CONFIG_CRYPTO_SHA1_ARM64_CE=m
-CONFIG_CRYPTO_SHA2_ARM64_CE=m
-CONFIG_CRYPTO_SHA512_ARM64_CE=m
-CONFIG_CRYPTO_SHA3_ARM64=m
-CONFIG_CRYPTO_SM3_ARM64_CE=m
-CONFIG_CRYPTO_SM4_ARM64_CE=m
-CONFIG_CRYPTO_GHASH_ARM64_CE=m
-CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
-CONFIG_CRYPTO_AES_ARM64=m
-CONFIG_CRYPTO_AES_ARM64_CE=m
-CONFIG_CRYPTO_AES_ARM64_CE_CCM=m
-CONFIG_CRYPTO_AES_ARM64_CE_BLK=m
-CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
-CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_CRYPTO_POLY1305_NEON=m
-CONFIG_CRYPTO_NHPOLY1305_NEON=m
-CONFIG_CRYPTO_AES_ARM64_BS=m
#
# General architecture-dependent options
@@ -679,8 +671,10 @@ CONFIG_JUMP_LABEL=y
# CONFIG_STATIC_KEYS_SELFTEST is not set
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
CONFIG_KRETPROBES=y
+CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
+CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y
CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
CONFIG_HAVE_NMI=y
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
@@ -724,21 +718,26 @@ CONFIG_ARCH_SUPPORTS_LTO_CLANG=y
CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y
CONFIG_LTO_NONE=y
CONFIG_ARCH_SUPPORTS_CFI_CLANG=y
-CONFIG_HAVE_CONTEXT_TRACKING=y
+CONFIG_HAVE_CONTEXT_TRACKING_USER=y
CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
CONFIG_HAVE_MOVE_PUD=y
CONFIG_HAVE_MOVE_PMD=y
CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
CONFIG_HAVE_ARCH_HUGE_VMAP=y
+CONFIG_HAVE_ARCH_HUGE_VMALLOC=y
CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
CONFIG_MODULES_USE_ELF_RELA=y
+CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y
+CONFIG_SOFTIRQ_ON_OWN_STACK=y
CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
CONFIG_ARCH_MMAP_RND_BITS=18
CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11
+CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y
CONFIG_CLONE_BACKWARDS=y
CONFIG_OLD_SIGSUSPEND3=y
@@ -747,6 +746,7 @@ CONFIG_COMPAT_32BIT_TIME=y
CONFIG_HAVE_ARCH_VMAP_STACK=y
CONFIG_VMAP_STACK=y
CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y
+CONFIG_RANDOMIZE_KSTACK_OFFSET=y
# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set
CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
CONFIG_STRICT_KERNEL_RWX=y
@@ -757,8 +757,12 @@ CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y
CONFIG_ARCH_USE_MEMREMAP_PROT=y
# CONFIG_LOCK_EVENT_COUNTS is not set
CONFIG_ARCH_HAS_RELR=y
+CONFIG_HAVE_PREEMPT_DYNAMIC=y
+CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y
CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y
+CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y
#
# GCOV-based kernel profiling
@@ -778,6 +782,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_ASM_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
@@ -794,14 +799,17 @@ CONFIG_MODULE_SIG_HASH="sha512"
# CONFIG_MODULE_COMPRESS_GZIP is not set
CONFIG_MODULE_COMPRESS_XZ=y
# CONFIG_MODULE_COMPRESS_ZSTD is not set
+# CONFIG_MODULE_DECOMPRESS is not set
# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set
CONFIG_MODPROBE_PATH="/sbin/modprobe"
# CONFIG_TRIM_UNUSED_KSYMS is not set
CONFIG_MODULES_TREE_LOOKUP=y
CONFIG_BLOCK=y
+CONFIG_BLOCK_LEGACY_AUTOLOAD=y
CONFIG_BLK_RQ_ALLOC_TIME=y
CONFIG_BLK_CGROUP_RWSTAT=y
CONFIG_BLK_DEV_BSG_COMMON=y
+CONFIG_BLK_ICQ=y
CONFIG_BLK_DEV_BSGLIB=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_INTEGRITY_T10=y
@@ -849,6 +857,7 @@ CONFIG_BLK_MQ_PCI=y
CONFIG_BLK_MQ_VIRTIO=y
CONFIG_BLK_PM=y
CONFIG_BLOCK_HOLDER_DEPRECATED=y
+CONFIG_BLK_MQ_STACKING=y
#
# IO Schedulers
@@ -932,6 +941,7 @@ CONFIG_FREEZER=y
CONFIG_BINFMT_ELF=y
CONFIG_COMPAT_BINFMT_ELF=y
CONFIG_ARCH_BINFMT_ELF_STATE=y
+CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y
CONFIG_ARCH_HAVE_ELF_PROT=y
CONFIG_ARCH_USE_GNU_PROPERTY=y
CONFIG_ELFCORE=y
@@ -944,6 +954,41 @@ CONFIG_COREDUMP=y
#
# Memory Management options
#
+CONFIG_ZPOOL=y
+CONFIG_SWAP=y
+CONFIG_ZSWAP=y
+# CONFIG_ZSWAP_DEFAULT_ON is not set
+# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set
+CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y
+# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set
+# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set
+# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set
+# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
+CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"
+CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
+# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set
+# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
+CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud"
+CONFIG_ZBUD=y
+CONFIG_Z3FOLD=m
+CONFIG_ZSMALLOC=m
+# CONFIG_ZSMALLOC_STAT is not set
+
+#
+# SLAB allocator options
+#
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_SLAB_MERGE_DEFAULT=y
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+# CONFIG_SLUB_STATS is not set
+CONFIG_SLUB_CPU_PARTIAL=y
+# end of SLAB allocator options
+
+# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set
+# CONFIG_COMPAT_BRK is not set
CONFIG_SPARSEMEM=y
CONFIG_SPARSEMEM_EXTREME=y
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
@@ -952,11 +997,11 @@ CONFIG_HAVE_FAST_GUP=y
CONFIG_ARCH_KEEP_MEMBLOCK=y
CONFIG_NUMA_KEEP_MEMINFO=y
CONFIG_MEMORY_ISOLATION=y
+CONFIG_EXCLUSIVE_SYSTEM_RAM=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
CONFIG_MEMORY_HOTPLUG=y
-CONFIG_MEMORY_HOTPLUG_SPARSE=y
# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set
-CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
# CONFIG_MEMORY_HOTREMOVE is not set
CONFIG_MHP_MEMMAP_ON_MEMORY=y
CONFIG_SPLIT_PTLOCK_CPUS=4
@@ -964,6 +1009,7 @@ CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
CONFIG_MEMORY_BALLOON=y
CONFIG_BALLOON_COMPACTION=y
CONFIG_COMPACTION=y
+CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
CONFIG_PAGE_REPORTING=y
CONFIG_MIGRATION=y
CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
@@ -972,53 +1018,46 @@ CONFIG_CONTIG_ALLOC=y
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_MMU_NOTIFIER=y
CONFIG_KSM=y
-CONFIG_UKSM=y
-# CONFIG_KSM_LEGACY is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
CONFIG_MEMORY_FAILURE=y
# CONFIG_HWPOISON_INJECT is not set
+CONFIG_ARCH_WANTS_THP_SWAP=y
CONFIG_TRANSPARENT_HUGEPAGE=y
# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
-# CONFIG_CLEANCACHE is not set
+CONFIG_THP_SWAP=y
+# CONFIG_READ_ONLY_THP_FOR_FS is not set
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_USE_PERCPU_NUMA_NODE_ID=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
# CONFIG_CMA_DEBUG is not set
CONFIG_CMA_DEBUGFS=y
# CONFIG_CMA_SYSFS is not set
CONFIG_CMA_AREAS=7
-CONFIG_ZSWAP=y
-# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set
-CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y
-# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set
-# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set
-# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set
-# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
-CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"
-CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
-# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set
-# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
-CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud"
-# CONFIG_ZSWAP_DEFAULT_ON is not set
-CONFIG_ZPOOL=y
-CONFIG_ZBUD=y
-CONFIG_Z3FOLD=m
-# CONFIG_ZSMALLOC is not set
CONFIG_GENERIC_EARLY_IOREMAP=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
# CONFIG_IDLE_PAGE_TRACKING is not set
CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y
CONFIG_ARCH_HAS_PTE_DEVMAP=y
CONFIG_ARCH_HAS_ZONE_DMA_SET=y
CONFIG_ZONE_DMA=y
CONFIG_ZONE_DMA32=y
+CONFIG_VMAP_PFN=y
+CONFIG_VM_EVENT_COUNTERS=y
# CONFIG_PERCPU_STATS is not set
# CONFIG_GUP_TEST is not set
-# CONFIG_READ_ONLY_THP_FOR_FS is not set
CONFIG_ARCH_HAS_PTE_SPECIAL=y
CONFIG_MAPPING_DIRTY_HELPERS=y
CONFIG_SECRETMEM=y
+CONFIG_ANON_VMA_NAME=y
+CONFIG_USERFAULTFD=y
+CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y
+# CONFIG_LRU_GEN is not set
#
# Data Access Monitoring
@@ -1086,6 +1125,7 @@ CONFIG_INET_ESP=m
CONFIG_INET_ESP_OFFLOAD=m
# CONFIG_INET_ESPINTCP is not set
CONFIG_INET_IPCOMP=m
+CONFIG_INET_TABLE_PERTURB_ORDER=16
CONFIG_INET_XFRM_TUNNEL=m
CONFIG_INET_TUNNEL=m
CONFIG_INET_DIAG=m
@@ -1157,6 +1197,8 @@ CONFIG_BRIDGE_NETFILTER=m
# Core Netfilter Configuration
#
CONFIG_NETFILTER_INGRESS=y
+CONFIG_NETFILTER_EGRESS=y
+CONFIG_NETFILTER_SKIP_EGRESS=y
CONFIG_NETFILTER_NETLINK=m
CONFIG_NETFILTER_FAMILY_BRIDGE=y
CONFIG_NETFILTER_FAMILY_ARP=y
@@ -1209,7 +1251,6 @@ CONFIG_NF_TABLES_NETDEV=y
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_COUNTER=m
CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -1238,6 +1279,7 @@ CONFIG_NFT_FWD_NETDEV=m
# CONFIG_NFT_REJECT_NETDEV is not set
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
+# CONFIG_NF_FLOW_TABLE_PROCFS is not set
CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XTABLES_COMPAT=y
@@ -1357,7 +1399,6 @@ CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
-CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_DUP_IPV4=m
CONFIG_NF_LOG_ARP=m
CONFIG_NF_LOG_IPV4=m
@@ -1397,7 +1438,6 @@ CONFIG_NF_TABLES_IPV6=y
CONFIG_NFT_REJECT_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
-CONFIG_NF_FLOW_TABLE_IPV6=m
CONFIG_NF_DUP_IPV6=m
CONFIG_NF_REJECT_IPV6=m
CONFIG_NF_LOG_IPV6=m
@@ -1480,7 +1520,6 @@ CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_VLAN_8021Q_MVRP=y
-# CONFIG_DECNET is not set
CONFIG_LLC=m
# CONFIG_LLC2 is not set
# CONFIG_ATALK is not set
@@ -1593,6 +1632,7 @@ CONFIG_NET_NSH=m
# CONFIG_NET_SWITCHDEV is not set
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_QRTR=m
+# CONFIG_QRTR_SMD is not set
# CONFIG_QRTR_TUN is not set
CONFIG_QRTR_MHI=m
# CONFIG_NET_NCSI is not set
@@ -1641,6 +1681,7 @@ CONFIG_BT_INTEL=m
CONFIG_BT_BCM=m
CONFIG_BT_RTL=m
CONFIG_BT_QCA=m
+CONFIG_BT_MTK=m
CONFIG_BT_HCIBTUSB=m
CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y
CONFIG_BT_HCIBTUSB_BCM=y
@@ -1673,6 +1714,7 @@ CONFIG_BT_MRVL=m
CONFIG_BT_ATH3K=m
# CONFIG_BT_MTKSDIO is not set
CONFIG_BT_MTKUART=m
+# CONFIG_BT_QCOMSMD is not set
CONFIG_BT_HCIRSI=m
CONFIG_BT_VIRTIO=m
# end of Bluetooth device drivers
@@ -1719,6 +1761,7 @@ CONFIG_RFKILL_LEDS=y
CONFIG_RFKILL_INPUT=y
# CONFIG_RFKILL_GPIO is not set
CONFIG_NET_9P=m
+CONFIG_NET_9P_FD=m
CONFIG_NET_9P_VIRTIO=m
CONFIG_NET_9P_XEN=m
# CONFIG_NET_9P_DEBUG is not set
@@ -1735,6 +1778,7 @@ CONFIG_NET_SELFTESTS=y
CONFIG_NET_SOCK_MSG=y
CONFIG_NET_DEVLINK=y
CONFIG_PAGE_POOL=y
+CONFIG_PAGE_POOL_STATS=y
CONFIG_FAILOVER=m
CONFIG_ETHTOOL_NETLINK=y
@@ -1775,11 +1819,14 @@ CONFIG_PCI_IOV=y
# CONFIG_PCI_PRI is not set
# CONFIG_PCI_PASID is not set
CONFIG_PCI_LABEL=y
+CONFIG_PCI_HYPERV=m
# CONFIG_PCIE_BUS_TUNE_OFF is not set
CONFIG_PCIE_BUS_DEFAULT=y
# CONFIG_PCIE_BUS_SAFE is not set
# CONFIG_PCIE_BUS_PERFORMANCE is not set
# CONFIG_PCIE_BUS_PEER2PEER is not set
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
CONFIG_HOTPLUG_PCI=y
# CONFIG_HOTPLUG_PCI_ACPI is not set
# CONFIG_HOTPLUG_PCI_CPCI is not set
@@ -1804,7 +1851,10 @@ CONFIG_PCIE_ROCKCHIP_HOST=m
CONFIG_PCIE_MEDIATEK=m
# CONFIG_PCIE_MEDIATEK_GEN3 is not set
# CONFIG_PCIE_BRCMSTB is not set
+CONFIG_PCI_HYPERV_INTERFACE=m
# CONFIG_PCIE_MICROCHIP_HOST is not set
+CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR=0xfffff000
+CONFIG_PCIE_APPLE=m
#
# DesignWare PCI Core Support
@@ -1816,6 +1866,7 @@ CONFIG_PCIE_DW_HOST=y
CONFIG_PCI_IMX6=y
CONFIG_PCI_LAYERSCAPE=y
CONFIG_PCI_HISI=y
+# CONFIG_PCIE_QCOM is not set
CONFIG_PCIE_ARMADA_8K=y
# CONFIG_PCIE_ROCKCHIP_DW_HOST is not set
CONFIG_PCIE_KIRIN=y
@@ -1878,6 +1929,7 @@ CONFIG_AUXILIARY_BUS=y
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEVTMPFS_SAFE=y
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
@@ -1889,7 +1941,10 @@ CONFIG_FW_LOADER_PAGED_BUF=y
CONFIG_EXTRA_FIRMWARE=""
# CONFIG_FW_LOADER_USER_HELPER is not set
CONFIG_FW_LOADER_COMPRESS=y
+CONFIG_FW_LOADER_COMPRESS_XZ=y
+# CONFIG_FW_LOADER_COMPRESS_ZSTD is not set
CONFIG_FW_CACHE=y
+# CONFIG_FW_UPLOAD is not set
# end of Firmware loader
CONFIG_WANT_DEV_COREDUMP=y
@@ -1905,8 +1960,10 @@ CONFIG_SOC_BUS=y
CONFIG_REGMAP=y
CONFIG_REGMAP_I2C=y
CONFIG_REGMAP_SPI=y
+CONFIG_REGMAP_SPMI=m
CONFIG_REGMAP_MMIO=y
CONFIG_REGMAP_IRQ=y
+CONFIG_REGMAP_SOUNDWIRE=m
CONFIG_DMA_SHARED_BUFFER=y
# CONFIG_DMA_FENCE_TRACE is not set
CONFIG_GENERIC_ARCH_TOPOLOGY=y
@@ -1920,6 +1977,8 @@ CONFIG_BRCMSTB_GISB_ARB=y
# CONFIG_MOXTET is not set
# CONFIG_HISILICON_LPC is not set
# CONFIG_IMX_WEIM is not set
+CONFIG_QCOM_EBI2=y
+# CONFIG_QCOM_SSC_BLOCK_BUS is not set
CONFIG_SUN50I_DE2_BUS=y
CONFIG_SUNXI_RSB=y
CONFIG_TEGRA_ACONNECT=m
@@ -1930,6 +1989,7 @@ CONFIG_VEXPRESS_CONFIG=y
CONFIG_MHI_BUS=m
# CONFIG_MHI_BUS_DEBUG is not set
# CONFIG_MHI_BUS_PCI_GENERIC is not set
+# CONFIG_MHI_BUS_EP is not set
# end of Bus devices
CONFIG_CONNECTOR=m
@@ -1944,10 +2004,14 @@ CONFIG_CONNECTOR=m
CONFIG_ARM_SCMI_PROTOCOL=y
CONFIG_ARM_SCMI_HAVE_TRANSPORT=y
CONFIG_ARM_SCMI_HAVE_SHMEM=y
+CONFIG_ARM_SCMI_HAVE_MSG=y
CONFIG_ARM_SCMI_TRANSPORT_MAILBOX=y
+CONFIG_ARM_SCMI_TRANSPORT_OPTEE=y
CONFIG_ARM_SCMI_TRANSPORT_SMC=y
+# CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE is not set
# CONFIG_ARM_SCMI_TRANSPORT_VIRTIO is not set
CONFIG_ARM_SCMI_POWER_DOMAIN=y
+# CONFIG_ARM_SCMI_POWER_CONTROL is not set
# end of ARM System Control and Management Interface Protocol
CONFIG_ARM_SCPI_PROTOCOL=y
@@ -1959,6 +2023,8 @@ CONFIG_DMI_SYSFS=y
CONFIG_RASPBERRYPI_FIRMWARE=m
CONFIG_FW_CFG_SYSFS=m
CONFIG_FW_CFG_SYSFS_CMDLINE=y
+CONFIG_QCOM_SCM=y
+# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set
CONFIG_SYSFB=y
# CONFIG_SYSFB_SIMPLEFB is not set
# CONFIG_TURRIS_MOX_RWTM is not set
@@ -1973,6 +2039,7 @@ CONFIG_EFI_ESRT=y
CONFIG_EFI_PARAMS_FROM_FDT=y
CONFIG_EFI_RUNTIME_WRAPPERS=y
CONFIG_EFI_GENERIC_STUB=y
+# CONFIG_EFI_ZBOOT is not set
CONFIG_EFI_ARMSTUB_DTB_LOADER=y
CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
CONFIG_EFI_BOOTLOADER_CONTROL=m
@@ -1980,10 +2047,12 @@ CONFIG_EFI_BOOTLOADER_CONTROL=m
# CONFIG_EFI_TEST is not set
# CONFIG_RESET_ATTACK_MITIGATION is not set
# CONFIG_EFI_DISABLE_PCI_DMA is not set
-# end of EFI (Extensible Firmware Interface) Support
-
CONFIG_EFI_EARLYCON=y
CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y
+# CONFIG_EFI_DISABLE_RUNTIME is not set
+# CONFIG_EFI_COCO_SECRET is not set
+# end of EFI (Extensible Firmware Interface) Support
+
CONFIG_MESON_SM=y
CONFIG_ARM_PSCI_FW=y
# CONFIG_ARM_PSCI_CHECKER is not set
@@ -2004,6 +2073,7 @@ CONFIG_GNSS_SERIAL=m
CONFIG_GNSS_MTK_SERIAL=m
CONFIG_GNSS_SIRF_SERIAL=m
CONFIG_GNSS_UBX_SERIAL=m
+# CONFIG_GNSS_USB is not set
CONFIG_MTD=m
# CONFIG_MTD_TESTS is not set
@@ -2016,6 +2086,7 @@ CONFIG_MTD_OF_PARTS=m
# CONFIG_MTD_AFS_PARTS is not set
# CONFIG_MTD_PARSER_TRX is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_QCOMSMEM_PARTS is not set
# end of Partition parsers
#
@@ -2101,6 +2172,8 @@ CONFIG_MTD_BLOCK2MTD=m
#
# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set
# CONFIG_MTD_NAND_ECC_SW_BCH is not set
+# CONFIG_MTD_NAND_ECC_MXIC is not set
+# CONFIG_MTD_NAND_ECC_MEDIATEK is not set
# end of ECC engine support
# end of NAND
@@ -2148,12 +2221,16 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_NULL_BLK is not set
CONFIG_CDROM=y
# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+CONFIG_ZRAM=m
+CONFIG_ZRAM_DEF_COMP_LZORLE=y
+# CONFIG_ZRAM_DEF_COMP_LZO is not set
+CONFIG_ZRAM_DEF_COMP="lzo-rle"
+# CONFIG_ZRAM_WRITEBACK is not set
+# CONFIG_ZRAM_MEMORY_TRACKING is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
# CONFIG_BLK_DEV_DRBD is not set
CONFIG_BLK_DEV_NBD=m
-# CONFIG_BLK_DEV_SX8 is not set
CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
@@ -2163,7 +2240,7 @@ CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_BLKDEV_BACKEND=m
CONFIG_VIRTIO_BLK=y
# CONFIG_BLK_DEV_RBD is not set
-# CONFIG_BLK_DEV_RSXX is not set
+# CONFIG_BLK_DEV_UBLK is not set
#
# NVME Support
@@ -2171,16 +2248,20 @@ CONFIG_VIRTIO_BLK=y
CONFIG_NVME_CORE=y
CONFIG_BLK_DEV_NVME=y
CONFIG_NVME_MULTIPATH=y
+# CONFIG_NVME_VERBOSE_ERRORS is not set
# CONFIG_NVME_HWMON is not set
CONFIG_NVME_FABRICS=m
CONFIG_NVME_FC=m
CONFIG_NVME_TCP=m
+# CONFIG_NVME_AUTH is not set
+CONFIG_NVME_APPLE=m
CONFIG_NVME_TARGET=m
# CONFIG_NVME_TARGET_PASSTHRU is not set
CONFIG_NVME_TARGET_LOOP=m
CONFIG_NVME_TARGET_FC=m
# CONFIG_NVME_TARGET_FCLOOP is not set
CONFIG_NVME_TARGET_TCP=m
+# CONFIG_NVME_TARGET_AUTH is not set
# end of NVME Support
#
@@ -2195,6 +2276,7 @@ CONFIG_TIFM_7XX1=m
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HI6421V600_IRQ is not set
# CONFIG_HP_ILO is not set
+# CONFIG_QCOM_FASTRPC is not set
# CONFIG_APDS9802ALS is not set
# CONFIG_ISL29003 is not set
# CONFIG_ISL29020 is not set
@@ -2209,6 +2291,8 @@ CONFIG_SRAM=y
# CONFIG_PCI_ENDPOINT_TEST is not set
# CONFIG_XILINX_SDFEC is not set
# CONFIG_HISI_HIKEY_USB is not set
+# CONFIG_OPEN_DICE is not set
+# CONFIG_VCPU_STALL_DETECTOR is not set
# CONFIG_C2PORT is not set
#
@@ -2234,6 +2318,7 @@ CONFIG_EEPROM_EE1004=m
# CONFIG_SENSORS_LIS3_I2C is not set
# CONFIG_ALTERA_STAPL is not set
+CONFIG_VMWARE_VMCI=m
# CONFIG_GENWQE is not set
# CONFIG_ECHO is not set
# CONFIG_BCM_VK is not set
@@ -2243,6 +2328,7 @@ CONFIG_EEPROM_EE1004=m
# CONFIG_HABANA_AI is not set
# CONFIG_UACCE is not set
# CONFIG_PVPANIC is not set
+# CONFIG_GP_PCI1XXXX is not set
# end of Misc devices
#
@@ -2329,18 +2415,9 @@ CONFIG_SCSI_MPT3SAS_MAX_SGE=128
# CONFIG_SCSI_MPT2SAS is not set
CONFIG_SCSI_MPI3MR=m
CONFIG_SCSI_SMARTPQI=m
-CONFIG_SCSI_UFSHCD=m
-CONFIG_SCSI_UFSHCD_PCI=m
-# CONFIG_SCSI_UFS_DWC_TC_PCI is not set
-CONFIG_SCSI_UFSHCD_PLATFORM=m
-# CONFIG_SCSI_UFS_CDNS_PLATFORM is not set
-# CONFIG_SCSI_UFS_DWC_TC_PLATFORM is not set
-# CONFIG_SCSI_UFS_MEDIATEK is not set
-# CONFIG_SCSI_UFS_HISI is not set
-# CONFIG_SCSI_UFS_BSG is not set
-# CONFIG_SCSI_UFS_EXYNOS is not set
-# CONFIG_SCSI_UFS_HPB is not set
CONFIG_SCSI_HPTIOP=m
+CONFIG_SCSI_BUSLOGIC=m
+# CONFIG_SCSI_FLASHPOINT is not set
CONFIG_SCSI_MYRB=m
CONFIG_SCSI_MYRS=m
CONFIG_XEN_SCSI_FRONTEND=y
@@ -2373,7 +2450,6 @@ CONFIG_SCSI_VIRTIO=y
# CONFIG_SCSI_DH is not set
# end of SCSI device support
-CONFIG_HAVE_PATA_PLATFORM=y
CONFIG_ATA=y
CONFIG_SATA_HOST=y
CONFIG_PATA_TIMINGS=y
@@ -2389,6 +2465,7 @@ CONFIG_SATA_PMP=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_MOBILE_LPM_POLICY=0
CONFIG_SATA_AHCI_PLATFORM=y
+# CONFIG_AHCI_DWC is not set
CONFIG_AHCI_IMX=m
CONFIG_AHCI_CEVA=y
CONFIG_AHCI_MTK=m
@@ -2484,6 +2561,7 @@ CONFIG_VXLAN=m
# CONFIG_GENEVE is not set
# CONFIG_BAREUDP is not set
# CONFIG_GTP is not set
+# CONFIG_AMT is not set
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -2539,6 +2617,8 @@ CONFIG_AQTION=m
CONFIG_NET_VENDOR_ARC=y
CONFIG_ARC_EMAC_CORE=m
CONFIG_EMAC_ROCKCHIP=m
+CONFIG_NET_VENDOR_ASIX=y
+# CONFIG_SPI_AX88796C is not set
CONFIG_NET_VENDOR_ATHEROS=y
CONFIG_ATL2=m
CONFIG_ATL1=m
@@ -2586,6 +2666,8 @@ CONFIG_NET_VENDOR_CISCO=y
CONFIG_ENIC=m
CONFIG_NET_VENDOR_CORTINA=y
CONFIG_GEMINI_ETHERNET=m
+CONFIG_NET_VENDOR_DAVICOM=y
+# CONFIG_DM9051 is not set
CONFIG_DNET=m
CONFIG_NET_VENDOR_DEC=y
CONFIG_NET_TULIP=y
@@ -2610,6 +2692,8 @@ CONFIG_BE2NET_BE2=y
CONFIG_BE2NET_BE3=y
CONFIG_BE2NET_LANCER=y
CONFIG_BE2NET_SKYHAWK=y
+CONFIG_NET_VENDOR_ENGLEDER=y
+# CONFIG_TSNEP is not set
CONFIG_NET_VENDOR_EZCHIP=y
# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set
CONFIG_NET_VENDOR_FREESCALE=y
@@ -2626,6 +2710,8 @@ CONFIG_FSL_ENETC_PTP_CLOCK=m
# CONFIG_FSL_ENETC_QOS is not set
CONFIG_NET_VENDOR_FUJITSU=y
# CONFIG_PCMCIA_FMVJ18X is not set
+CONFIG_NET_VENDOR_FUNGIBLE=y
+# CONFIG_FUN_ETH is not set
CONFIG_NET_VENDOR_GOOGLE=y
# CONFIG_GVE is not set
CONFIG_NET_VENDOR_HISILICON=y
@@ -2659,7 +2745,11 @@ CONFIG_I40EVF=m
CONFIG_ICE=m
CONFIG_FM10K=m
CONFIG_IGC=m
+CONFIG_NET_VENDOR_WANGXUN=y
+# CONFIG_NGBE is not set
+# CONFIG_TXGBE is not set
CONFIG_JME=m
+CONFIG_NET_VENDOR_ADI=y
CONFIG_NET_VENDOR_LITEX=y
CONFIG_LITEX_LITEETH=m
CONFIG_NET_VENDOR_MARVELL=y
@@ -2675,6 +2765,7 @@ CONFIG_OCTEONTX2_AF=m
# CONFIG_NDC_DIS_DYNAMIC_CACHING is not set
CONFIG_OCTEONTX2_PF=m
CONFIG_OCTEONTX2_VF=m
+# CONFIG_OCTEON_EP is not set
# CONFIG_NET_VENDOR_MEDIATEK is not set
CONFIG_NET_VENDOR_MELLANOX=y
CONFIG_MLX4_EN=m
@@ -2713,7 +2804,6 @@ CONFIG_NET_VENDOR_NATSEMI=y
# CONFIG_NS83820 is not set
CONFIG_NET_VENDOR_NETERION=y
# CONFIG_S2IO is not set
-# CONFIG_VXGE is not set
CONFIG_NET_VENDOR_NETRONOME=y
# CONFIG_NFP is not set
CONFIG_NET_VENDOR_8390=y
@@ -2764,6 +2854,7 @@ CONFIG_NET_VENDOR_SIS=y
CONFIG_NET_VENDOR_SOLARFLARE=y
# CONFIG_SFC is not set
# CONFIG_SFC_FALCON is not set
+# CONFIG_SFC_SIENA is not set
CONFIG_NET_VENDOR_SMSC=y
CONFIG_SMC91X=y
# CONFIG_PCMCIA_SMC91C92 is not set
@@ -2779,8 +2870,10 @@ CONFIG_STMMAC_ETH=m
CONFIG_STMMAC_PLATFORM=m
# CONFIG_DWMAC_DWC_QOS_ETH is not set
CONFIG_DWMAC_GENERIC=m
+CONFIG_DWMAC_IPQ806X=m
# CONFIG_DWMAC_MEDIATEK is not set
CONFIG_DWMAC_MESON=m
+CONFIG_DWMAC_QCOM_ETHQOS=m
CONFIG_DWMAC_ROCKCHIP=m
CONFIG_DWMAC_SUNXI=m
CONFIG_DWMAC_SUN8I=m
@@ -2800,6 +2893,8 @@ CONFIG_NET_VENDOR_TEHUTI=y
CONFIG_NET_VENDOR_TI=y
# CONFIG_TI_CPSW_PHY_SEL is not set
# CONFIG_TLAN is not set
+CONFIG_NET_VENDOR_VERTEXCOM=y
+# CONFIG_MSE102X is not set
CONFIG_NET_VENDOR_VIA=y
# CONFIG_VIA_RHINE is not set
# CONFIG_VIA_VELOCITY is not set
@@ -2830,6 +2925,7 @@ CONFIG_FIXED_PHY=y
# CONFIG_AMD_PHY is not set
CONFIG_MESON_GXL_PHY=m
# CONFIG_ADIN_PHY is not set
+# CONFIG_ADIN1100_PHY is not set
# CONFIG_AQUANTIA_PHY is not set
CONFIG_AX88796B_PHY=m
CONFIG_BROADCOM_PHY=m
@@ -2871,9 +2967,11 @@ CONFIG_SMSC_PHY=m
# CONFIG_DP83848_PHY is not set
# CONFIG_DP83867_PHY is not set
# CONFIG_DP83869_PHY is not set
+# CONFIG_DP83TD510_PHY is not set
# CONFIG_VITESSE_PHY is not set
# CONFIG_XILINX_GMII2RGMII is not set
# CONFIG_MICREL_KS8995MA is not set
+# CONFIG_PSE_CONTROLLER is not set
CONFIG_MDIO_DEVICE=y
CONFIG_MDIO_BUS=y
CONFIG_FWNODE_MDIO=y
@@ -2908,6 +3006,7 @@ CONFIG_MDIO_BUS_MUX_MMIOREG=y
#
CONFIG_PCS_XPCS=m
CONFIG_PCS_LYNX=m
+CONFIG_PCS_ALTERA_TSE=m
# end of PCS device drivers
CONFIG_PPP=m
@@ -3015,6 +3114,7 @@ CONFIG_ATH10K_PCI=m
CONFIG_ATH10K_AHB=y
CONFIG_ATH10K_SDIO=m
CONFIG_ATH10K_USB=m
+# CONFIG_ATH10K_SNOC is not set
# CONFIG_ATH10K_DEBUG is not set
# CONFIG_ATH10K_DEBUGFS is not set
CONFIG_WCN36XX=m
@@ -3098,7 +3198,6 @@ CONFIG_IWLWIFI_LEDS=y
CONFIG_IWLDVM=m
CONFIG_IWLMVM=m
CONFIG_IWLWIFI_OPMODE_MODULAR=y
-# CONFIG_IWLWIFI_BCAST_FILTERING is not set
#
# Debugging Options
@@ -3167,12 +3266,18 @@ CONFIG_MT7663_USB_SDIO_COMMON=m
CONFIG_MT7663U=m
CONFIG_MT7663S=m
CONFIG_MT7915E=m
+# CONFIG_MT7986_WMAC is not set
+CONFIG_MT7921_COMMON=m
CONFIG_MT7921E=m
+# CONFIG_MT7921S is not set
+# CONFIG_MT7921U is not set
CONFIG_WLAN_VENDOR_MICROCHIP=y
CONFIG_WILC1000=m
CONFIG_WILC1000_SDIO=m
CONFIG_WILC1000_SPI=m
# CONFIG_WILC1000_HW_OOB_INTR is not set
+CONFIG_WLAN_VENDOR_PURELIFI=y
+# CONFIG_PLFXLC is not set
CONFIG_WLAN_VENDOR_RALINK=y
CONFIG_RT2X00=m
CONFIG_RT2400PCI=m
@@ -3239,12 +3344,23 @@ CONFIG_RTW88_8723DE=m
CONFIG_RTW88_8821CE=m
# CONFIG_RTW88_DEBUG is not set
# CONFIG_RTW88_DEBUGFS is not set
+CONFIG_RTW89=m
+CONFIG_RTW89_CORE=m
+CONFIG_RTW89_PCI=m
+CONFIG_RTW89_8852A=m
+CONFIG_RTW89_8852C=m
+CONFIG_RTW89_8852AE=m
+CONFIG_RTW89_8852CE=m
+# CONFIG_RTW89_DEBUGMSG is not set
+# CONFIG_RTW89_DEBUGFS is not set
CONFIG_WLAN_VENDOR_RSI=y
CONFIG_RSI_91X=m
CONFIG_RSI_DEBUGFS=y
CONFIG_RSI_SDIO=m
CONFIG_RSI_USB=m
CONFIG_RSI_COEX=y
+CONFIG_WLAN_VENDOR_SILABS=y
+# CONFIG_WFX is not set
CONFIG_WLAN_VENDOR_ST=y
CONFIG_CW1200=m
CONFIG_CW1200_WLAN_SDIO=m
@@ -3296,6 +3412,7 @@ CONFIG_INPUT_LEDS=y
CONFIG_INPUT_FF_MEMLESS=y
# CONFIG_INPUT_SPARSEKMAP is not set
CONFIG_INPUT_MATRIXKMAP=y
+CONFIG_INPUT_VIVALDIFMAP=y
#
# Userland interfaces
@@ -3333,6 +3450,7 @@ CONFIG_KEYBOARD_GPIO=y
# CONFIG_KEYBOARD_NEWTON is not set
# CONFIG_KEYBOARD_TEGRA is not set
# CONFIG_KEYBOARD_OPENCORES is not set
+CONFIG_KEYBOARD_PINEPHONE=m
# CONFIG_KEYBOARD_SAMSUNG is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
# CONFIG_KEYBOARD_SUNKBD is not set
@@ -3340,9 +3458,10 @@ CONFIG_KEYBOARD_GPIO=y
# CONFIG_KEYBOARD_OMAP4 is not set
# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set
# CONFIG_KEYBOARD_XTKBD is not set
-CONFIG_KEYBOARD_CROS_EC=y
# CONFIG_KEYBOARD_CAP11XX is not set
# CONFIG_KEYBOARD_BCM is not set
+# CONFIG_KEYBOARD_MT6779 is not set
+# CONFIG_KEYBOARD_CYPRESS_SF is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
CONFIG_MOUSE_PS2_ALPS=y
@@ -3394,9 +3513,11 @@ CONFIG_INPUT_UINPUT=y
# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
# CONFIG_INPUT_DA7280_HAPTICS is not set
# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IBM_PANEL is not set
# CONFIG_INPUT_IMS_PCU is not set
# CONFIG_INPUT_IQS269A is not set
# CONFIG_INPUT_IQS626A is not set
+# CONFIG_INPUT_IQS7222 is not set
# CONFIG_INPUT_CMA3000 is not set
CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y
# CONFIG_INPUT_SOC_BUTTON_ARRAY is not set
@@ -3460,7 +3581,6 @@ CONFIG_SERIAL_8250_NR_UARTS=4
CONFIG_SERIAL_8250_RUNTIME_UARTS=4
CONFIG_SERIAL_8250_EXTENDED=y
# CONFIG_SERIAL_8250_MANY_PORTS is not set
-# CONFIG_SERIAL_8250_ASPEED_VUART is not set
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
# CONFIG_SERIAL_8250_RSA is not set
@@ -3471,6 +3591,7 @@ CONFIG_SERIAL_8250_DW=y
# CONFIG_SERIAL_8250_RT288X is not set
CONFIG_SERIAL_8250_MT6577=y
CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_8250_PERICOM=y
CONFIG_SERIAL_8250_TEGRA=y
CONFIG_SERIAL_OF_PLATFORM=y
@@ -3497,10 +3618,11 @@ CONFIG_SERIAL_TEGRA=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_MSM is not set
+# CONFIG_SERIAL_QCOM_GENI is not set
# CONFIG_SERIAL_SIFIVE is not set
# CONFIG_SERIAL_SCCNXP is not set
# CONFIG_SERIAL_SC16IS7XX is not set
-# CONFIG_SERIAL_BCM63XX is not set
# CONFIG_SERIAL_ALTERA_JTAGUART is not set
# CONFIG_SERIAL_ALTERA_UART is not set
CONFIG_SERIAL_XILINX_PS_UART=y
@@ -3525,6 +3647,7 @@ CONFIG_HVC_IRQ=y
CONFIG_HVC_XEN=y
CONFIG_HVC_XEN_FRONTEND=y
# CONFIG_HVC_DCC is not set
+# CONFIG_RPMSG_TTY is not set
CONFIG_SERIAL_DEV_BUS=y
CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
# CONFIG_TTY_PRINTK is not set
@@ -3548,6 +3671,7 @@ CONFIG_HW_RANDOM_OPTEE=m
# CONFIG_HW_RANDOM_CCTRNG is not set
# CONFIG_HW_RANDOM_XIPHERA is not set
CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=m
+CONFIG_HW_RANDOM_CN10K=m
# CONFIG_APPLICOM is not set
#
@@ -3649,10 +3773,15 @@ CONFIG_I2C_MESON=y
CONFIG_I2C_MV64XXX=y
# CONFIG_I2C_NOMADIK is not set
# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_APPLE=y
# CONFIG_I2C_PCA_PLATFORM is not set
CONFIG_I2C_PXA=y
# CONFIG_I2C_PXA_SLAVE is not set
+# CONFIG_I2C_QCOM_CCI is not set
+# CONFIG_I2C_QCOM_GENI is not set
+# CONFIG_I2C_QUP is not set
CONFIG_I2C_RK3X=y
+CONFIG_I2C_S3C2410=m
# CONFIG_I2C_SIMTEC is not set
# CONFIG_I2C_SYNQUACER is not set
CONFIG_I2C_TEGRA=y
@@ -3669,6 +3798,7 @@ CONFIG_I2C_UNIPHIER_F=y
#
# CONFIG_I2C_DIOLAN_U2C is not set
# CONFIG_I2C_CP2615 is not set
+# CONFIG_I2C_PCI1XXXX is not set
# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
# CONFIG_I2C_TAOS_EVM is not set
# CONFIG_I2C_TINY_USB is not set
@@ -3676,7 +3806,6 @@ CONFIG_I2C_UNIPHIER_F=y
#
# Other I2C/SMBus bus drivers
#
-CONFIG_I2C_CROS_EC_TUNNEL=y
# CONFIG_I2C_XGENE_SLIMPRO is not set
# CONFIG_I2C_VIRTIO is not set
# end of I2C Hardware Bus support
@@ -3704,9 +3833,11 @@ CONFIG_SPI_MEM=y
# CONFIG_SPI_AXI_SPI_ENGINE is not set
CONFIG_SPI_BCM2835=y
CONFIG_SPI_BCM2835AUX=y
+# CONFIG_SPI_BCM_QSPI is not set
# CONFIG_SPI_BITBANG is not set
# CONFIG_SPI_CADENCE is not set
# CONFIG_SPI_CADENCE_QUADSPI is not set
+# CONFIG_SPI_CADENCE_XSPI is not set
# CONFIG_SPI_DESIGNWARE is not set
# CONFIG_SPI_FSL_LPSPI is not set
# CONFIG_SPI_FSL_QUADSPI is not set
@@ -3719,6 +3850,8 @@ CONFIG_SPI_BCM2835AUX=y
# CONFIG_SPI_FSL_DSPI is not set
CONFIG_SPI_MESON_SPICC=m
CONFIG_SPI_MESON_SPIFC=m
+# CONFIG_SPI_MICROCHIP_CORE is not set
+# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set
# CONFIG_SPI_MT65XX is not set
# CONFIG_SPI_MTK_NOR is not set
# CONFIG_SPI_OC_TINY is not set
@@ -3727,6 +3860,9 @@ CONFIG_SPI_PL022=y
# CONFIG_SPI_PXA2XX is not set
CONFIG_SPI_ROCKCHIP=y
# CONFIG_SPI_ROCKCHIP_SFC is not set
+# CONFIG_SPI_QCOM_QSPI is not set
+# CONFIG_SPI_QUP is not set
+# CONFIG_SPI_QCOM_GENI is not set
CONFIG_SPI_S3C64XX=y
# CONFIG_SPI_SC18IS602 is not set
# CONFIG_SPI_SIFIVE is not set
@@ -3761,6 +3897,8 @@ CONFIG_SPI_SPIDEV=m
CONFIG_SPI_DYNAMIC=y
CONFIG_SPMI=y
# CONFIG_SPMI_HISI3670 is not set
+CONFIG_SPMI_MSM_PMIC_ARB=y
+# CONFIG_SPMI_MTK_PMIF is not set
# CONFIG_HSI is not set
CONFIG_PPS=y
# CONFIG_PPS_DEBUG is not set
@@ -3799,27 +3937,99 @@ CONFIG_GENERIC_PINMUX_FUNCTIONS=y
CONFIG_PINCONF=y
CONFIG_GENERIC_PINCONF=y
# CONFIG_DEBUG_PINCTRL is not set
-# CONFIG_PINCTRL_AXP209 is not set
# CONFIG_PINCTRL_AMD is not set
+CONFIG_PINCTRL_APPLE_GPIO=m
+# CONFIG_PINCTRL_AXP209 is not set
+# CONFIG_PINCTRL_CY8C95X0 is not set
+CONFIG_PINCTRL_MAX77620=y
# CONFIG_PINCTRL_MCP23S08 is not set
+# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set
+# CONFIG_PINCTRL_OCELOT is not set
+# CONFIG_PINCTRL_RK805 is not set
CONFIG_PINCTRL_ROCKCHIP=y
CONFIG_PINCTRL_SINGLE=y
-# CONFIG_PINCTRL_SX150X is not set
# CONFIG_PINCTRL_STMFX is not set
-CONFIG_PINCTRL_MAX77620=y
-# CONFIG_PINCTRL_RK805 is not set
-# CONFIG_PINCTRL_OCELOT is not set
-# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set
+# CONFIG_PINCTRL_SX150X is not set
CONFIG_PINCTRL_BCM2835=y
# CONFIG_PINCTRL_IMX8MM is not set
# CONFIG_PINCTRL_IMX8MN is not set
# CONFIG_PINCTRL_IMX8MP is not set
# CONFIG_PINCTRL_IMX8MQ is not set
# CONFIG_PINCTRL_IMX8ULP is not set
+# CONFIG_PINCTRL_IMXRT1050 is not set
+# CONFIG_PINCTRL_IMX93 is not set
+# CONFIG_PINCTRL_IMXRT1170 is not set
+
+#
+# MediaTek pinctrl drivers
+#
+CONFIG_EINT_MTK=y
+CONFIG_PINCTRL_MTK=y
+CONFIG_PINCTRL_MTK_V2=y
+CONFIG_PINCTRL_MTK_MOORE=y
+CONFIG_PINCTRL_MTK_PARIS=y
+CONFIG_PINCTRL_MT2712=y
+CONFIG_PINCTRL_MT6765=y
+CONFIG_PINCTRL_MT6779=y
+CONFIG_PINCTRL_MT6795=y
+CONFIG_PINCTRL_MT6797=y
+CONFIG_PINCTRL_MT7622=y
+CONFIG_PINCTRL_MT7986=y
+CONFIG_PINCTRL_MT8167=y
+CONFIG_PINCTRL_MT8173=y
+CONFIG_PINCTRL_MT8183=y
+CONFIG_PINCTRL_MT8186=y
+CONFIG_PINCTRL_MT8188=y
+CONFIG_PINCTRL_MT8192=y
+# CONFIG_PINCTRL_MT8195 is not set
+CONFIG_PINCTRL_MT8365=y
+CONFIG_PINCTRL_MT8516=y
+# end of MediaTek pinctrl drivers
+
+CONFIG_PINCTRL_MESON=y
+CONFIG_PINCTRL_MESON_GXBB=y
+CONFIG_PINCTRL_MESON_GXL=y
+CONFIG_PINCTRL_MESON8_PMX=y
+CONFIG_PINCTRL_MESON_AXG=y
+CONFIG_PINCTRL_MESON_AXG_PMX=y
+CONFIG_PINCTRL_MESON_G12A=y
+CONFIG_PINCTRL_MESON_A1=y
+CONFIG_PINCTRL_MESON_S4=y
CONFIG_PINCTRL_MVEBU=y
CONFIG_PINCTRL_ARMADA_AP806=y
CONFIG_PINCTRL_ARMADA_CP110=y
+CONFIG_PINCTRL_AC5=y
CONFIG_PINCTRL_ARMADA_37XX=y
+CONFIG_PINCTRL_MSM=m
+CONFIG_PINCTRL_IPQ8074=m
+CONFIG_PINCTRL_IPQ6018=m
+CONFIG_PINCTRL_MDM9607=m
+CONFIG_PINCTRL_MSM8916=m
+CONFIG_PINCTRL_MSM8953=m
+CONFIG_PINCTRL_MSM8976=m
+CONFIG_PINCTRL_MSM8994=m
+CONFIG_PINCTRL_MSM8996=m
+CONFIG_PINCTRL_MSM8998=m
+CONFIG_PINCTRL_QCM2290=m
+CONFIG_PINCTRL_QCS404=m
+CONFIG_PINCTRL_QDF2XXX=m
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=m
+CONFIG_PINCTRL_QCOM_SSBI_PMIC=m
+CONFIG_PINCTRL_SC7180=m
+CONFIG_PINCTRL_SC7280=m
+CONFIG_PINCTRL_SC8180X=m
+CONFIG_PINCTRL_SC8280XP=m
+CONFIG_PINCTRL_SDM660=m
+CONFIG_PINCTRL_SDM845=m
+CONFIG_PINCTRL_SM6115=m
+CONFIG_PINCTRL_SM6125=m
+CONFIG_PINCTRL_SM6350=m
+CONFIG_PINCTRL_SM6375=m
+CONFIG_PINCTRL_SM8150=m
+CONFIG_PINCTRL_SM8250=m
+CONFIG_PINCTRL_SM8350=m
+CONFIG_PINCTRL_SM8450=m
+# CONFIG_PINCTRL_LPASS_LPI is not set
#
# Renesas pinctrl drivers
@@ -3844,6 +4054,7 @@ CONFIG_PINCTRL_SUN8I_H3_R=y
# CONFIG_PINCTRL_SUN8I_V3S is not set
# CONFIG_PINCTRL_SUN9I_A80 is not set
# CONFIG_PINCTRL_SUN9I_A80_R is not set
+# CONFIG_PINCTRL_SUN20I_D1 is not set
CONFIG_PINCTRL_SUN50I_A64=y
CONFIG_PINCTRL_SUN50I_A64_R=y
CONFIG_PINCTRL_SUN50I_A100=y
@@ -3867,37 +4078,7 @@ CONFIG_PINCTRL_UNIPHIER=y
CONFIG_PINCTRL_UNIPHIER_LD11=y
CONFIG_PINCTRL_UNIPHIER_LD20=y
CONFIG_PINCTRL_UNIPHIER_PXS3=y
-
-#
-# MediaTek pinctrl drivers
-#
-CONFIG_EINT_MTK=y
-CONFIG_PINCTRL_MTK=y
-CONFIG_PINCTRL_MTK_V2=y
-CONFIG_PINCTRL_MTK_MOORE=y
-CONFIG_PINCTRL_MTK_PARIS=y
-CONFIG_PINCTRL_MT2712=y
-CONFIG_PINCTRL_MT6765=y
-CONFIG_PINCTRL_MT6779=y
-CONFIG_PINCTRL_MT6797=y
-CONFIG_PINCTRL_MT7622=y
-CONFIG_PINCTRL_MT8167=y
-CONFIG_PINCTRL_MT8173=y
-CONFIG_PINCTRL_MT8183=y
-CONFIG_PINCTRL_MT8192=y
-# CONFIG_PINCTRL_MT8195 is not set
-CONFIG_PINCTRL_MT8365=y
-CONFIG_PINCTRL_MT8516=y
-# end of MediaTek pinctrl drivers
-
-CONFIG_PINCTRL_MESON=y
-CONFIG_PINCTRL_MESON_GXBB=y
-CONFIG_PINCTRL_MESON_GXL=y
-CONFIG_PINCTRL_MESON8_PMX=y
-CONFIG_PINCTRL_MESON_AXG=y
-CONFIG_PINCTRL_MESON_AXG_PMX=y
-CONFIG_PINCTRL_MESON_G12A=y
-CONFIG_PINCTRL_MESON_A1=y
+CONFIG_PINCTRL_UNIPHIER_NX1=y
CONFIG_GPIOLIB=y
CONFIG_GPIOLIB_FASTPATH_LIMIT=512
CONFIG_OF_GPIO=y
@@ -3931,13 +4112,13 @@ CONFIG_GPIO_MVEBU=y
CONFIG_GPIO_MXC=y
CONFIG_GPIO_PL061=y
CONFIG_GPIO_ROCKCHIP=y
-# CONFIG_GPIO_SAMA5D2_PIOBU is not set
# CONFIG_GPIO_SIFIVE is not set
# CONFIG_GPIO_SYSCON is not set
CONFIG_GPIO_TEGRA=y
CONFIG_GPIO_TEGRA186=y
# CONFIG_GPIO_THUNDERX is not set
# CONFIG_GPIO_UNIPHIER is not set
+CONFIG_GPIO_VF610=y
CONFIG_GPIO_XGENE=y
CONFIG_GPIO_XGENE_SB=y
# CONFIG_GPIO_XILINX is not set
@@ -3948,7 +4129,6 @@ CONFIG_GPIO_XGENE_SB=y
#
# I2C GPIO expanders
#
-# CONFIG_GPIO_ADP5588 is not set
# CONFIG_GPIO_ADNP is not set
# CONFIG_GPIO_GW_PLD is not set
# CONFIG_GPIO_MAX7300 is not set
@@ -3998,6 +4178,7 @@ CONFIG_GPIO_MAX77620=y
# CONFIG_GPIO_AGGREGATOR is not set
# CONFIG_GPIO_MOCKUP is not set
# CONFIG_GPIO_VIRTIO is not set
+# CONFIG_GPIO_SIM is not set
# end of Virtual GPIO drivers
# CONFIG_W1 is not set
@@ -4007,6 +4188,7 @@ CONFIG_POWER_RESET_BRCMSTB=y
# CONFIG_POWER_RESET_GPIO_RESTART is not set
# CONFIG_POWER_RESET_HISI is not set
# CONFIG_POWER_RESET_LINKSTATION is not set
+# CONFIG_POWER_RESET_MSM is not set
# CONFIG_POWER_RESET_LTC2952 is not set
# CONFIG_POWER_RESET_REGULATOR is not set
# CONFIG_POWER_RESET_RESTART is not set
@@ -4022,12 +4204,14 @@ CONFIG_POWER_SUPPLY=y
CONFIG_POWER_SUPPLY_HWMON=y
# CONFIG_PDA_POWER is not set
# CONFIG_GENERIC_ADC_BATTERY is not set
+# CONFIG_IP5XXX_POWER is not set
# CONFIG_TEST_POWER is not set
# CONFIG_CHARGER_ADP5061 is not set
# CONFIG_BATTERY_CW2015 is not set
# CONFIG_BATTERY_DS2780 is not set
# CONFIG_BATTERY_DS2781 is not set
# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SAMSUNG_SDI is not set
# CONFIG_BATTERY_SBS is not set
# CONFIG_CHARGER_SBS is not set
# CONFIG_MANAGER_SBS is not set
@@ -4045,6 +4229,7 @@ CONFIG_BATTERY_BQ27XXX_I2C=y
# CONFIG_CHARGER_LT3651 is not set
# CONFIG_CHARGER_LTC4162L is not set
# CONFIG_CHARGER_DETECTOR_MAX14656 is not set
+# CONFIG_CHARGER_MAX77976 is not set
# CONFIG_CHARGER_BQ2415X is not set
# CONFIG_CHARGER_BQ24190 is not set
# CONFIG_CHARGER_BQ24257 is not set
@@ -4053,15 +4238,15 @@ CONFIG_BATTERY_BQ27XXX_I2C=y
# CONFIG_CHARGER_BQ25890 is not set
# CONFIG_CHARGER_BQ25980 is not set
# CONFIG_CHARGER_BQ256XX is not set
+# CONFIG_CHARGER_RK817 is not set
# CONFIG_CHARGER_SMB347 is not set
# CONFIG_BATTERY_GAUGE_LTC2941 is not set
# CONFIG_BATTERY_GOLDFISH is not set
# CONFIG_BATTERY_RT5033 is not set
# CONFIG_CHARGER_RT9455 is not set
-# CONFIG_CHARGER_CROS_USBPD is not set
-CONFIG_CHARGER_CROS_PCHG=y
# CONFIG_CHARGER_UCS1002 is not set
# CONFIG_CHARGER_BD99954 is not set
+# CONFIG_BATTERY_UG3105 is not set
CONFIG_HWMON=y
# CONFIG_HWMON_DEBUG_CHIP is not set
@@ -4071,7 +4256,6 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_AD7314 is not set
# CONFIG_SENSORS_AD7414 is not set
# CONFIG_SENSORS_AD7418 is not set
-# CONFIG_SENSORS_ADM1021 is not set
# CONFIG_SENSORS_ADM1025 is not set
# CONFIG_SENSORS_ADM1026 is not set
# CONFIG_SENSORS_ADM1029 is not set
@@ -4091,7 +4275,6 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_AXI_FAN_CONTROL is not set
# CONFIG_SENSORS_ARM_SCMI is not set
CONFIG_SENSORS_ARM_SCPI=y
-# CONFIG_SENSORS_ASPEED is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_CORSAIR_CPRO is not set
# CONFIG_SENSORS_CORSAIR_PSU is not set
@@ -4133,9 +4316,10 @@ CONFIG_SENSORS_ARM_SCPI=y
# CONFIG_SENSORS_MAX197 is not set
# CONFIG_SENSORS_MAX31722 is not set
# CONFIG_SENSORS_MAX31730 is not set
+# CONFIG_SENSORS_MAX31760 is not set
+# CONFIG_SENSORS_MAX6620 is not set
# CONFIG_SENSORS_MAX6621 is not set
# CONFIG_SENSORS_MAX6639 is not set
-# CONFIG_SENSORS_MAX6642 is not set
# CONFIG_SENSORS_MAX6650 is not set
# CONFIG_SENSORS_MAX6697 is not set
# CONFIG_SENSORS_MAX31790 is not set
@@ -4165,10 +4349,12 @@ CONFIG_SENSORS_LM90=m
# CONFIG_SENSORS_NTC_THERMISTOR is not set
# CONFIG_SENSORS_NCT6683 is not set
# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_NCT6775_I2C is not set
# CONFIG_SENSORS_NCT7802 is not set
# CONFIG_SENSORS_NCT7904 is not set
# CONFIG_SENSORS_NPCM7XX is not set
# CONFIG_SENSORS_NZXT_KRAKEN2 is not set
+# CONFIG_SENSORS_NZXT_SMART2 is not set
# CONFIG_SENSORS_OCC_P8_I2C is not set
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_PMBUS is not set
@@ -4185,6 +4371,7 @@ CONFIG_SENSORS_LM90=m
# CONFIG_SENSORS_DME1737 is not set
# CONFIG_SENSORS_EMC1403 is not set
# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC2305 is not set
# CONFIG_SENSORS_EMC6W201 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
# CONFIG_SENSORS_SMSC47M192 is not set
@@ -4199,6 +4386,7 @@ CONFIG_SENSORS_LM90=m
# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_INA209 is not set
CONFIG_SENSORS_INA2XX=m
+# CONFIG_SENSORS_INA238 is not set
# CONFIG_SENSORS_INA3221 is not set
# CONFIG_SENSORS_TC74 is not set
# CONFIG_SENSORS_THMC50 is not set
@@ -4207,6 +4395,7 @@ CONFIG_SENSORS_INA2XX=m
# CONFIG_SENSORS_TMP108 is not set
# CONFIG_SENSORS_TMP401 is not set
# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_TMP464 is not set
# CONFIG_SENSORS_TMP513 is not set
# CONFIG_SENSORS_VEXPRESS is not set
# CONFIG_SENSORS_VIA686A is not set
@@ -4282,6 +4471,15 @@ CONFIG_EXYNOS_THERMAL=y
# end of NVIDIA Tegra thermal drivers
# CONFIG_GENERIC_ADC_THERMAL is not set
+
+#
+# Qualcomm thermal drivers
+#
+# CONFIG_QCOM_SPMI_ADC_TM5 is not set
+# CONFIG_QCOM_SPMI_TEMP_ALARM is not set
+# CONFIG_QCOM_LMH is not set
+# end of Qualcomm thermal drivers
+
# CONFIG_UNIPHIER_THERMAL is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y
@@ -4316,14 +4514,17 @@ CONFIG_S3C2410_WATCHDOG=y
# CONFIG_IMX2_WDT is not set
# CONFIG_IMX7ULP_WDT is not set
# CONFIG_TEGRA_WATCHDOG is not set
+# CONFIG_QCOM_WDT is not set
CONFIG_MESON_GXBB_WATCHDOG=m
CONFIG_MESON_WATCHDOG=m
# CONFIG_MEDIATEK_WATCHDOG is not set
# CONFIG_ARM_SMC_WATCHDOG is not set
CONFIG_UNIPHIER_WATCHDOG=y
CONFIG_RTD119X_WATCHDOG=y
+CONFIG_APPLE_WATCHDOG=m
# CONFIG_ALIM7101_WDT is not set
# CONFIG_I6300ESB_WDT is not set
+# CONFIG_HP_WATCHDOG is not set
CONFIG_BCM2835_WDT=y
# CONFIG_MEN_A21_WDT is not set
# CONFIG_XEN_WDT is not set
@@ -4381,7 +4582,6 @@ CONFIG_MFD_CORE=y
CONFIG_MFD_AXP20X=y
# CONFIG_MFD_AXP20X_I2C is not set
CONFIG_MFD_AXP20X_RSB=y
-CONFIG_MFD_CROS_EC_DEV=y
# CONFIG_MFD_MADERA is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_DA9052_SPI is not set
@@ -4403,7 +4603,6 @@ CONFIG_MFD_HI655X_PMIC=y
# CONFIG_HTC_I2CPLD is not set
# CONFIG_LPC_ICH is not set
# CONFIG_LPC_SCH is not set
-# CONFIG_MFD_INTEL_PMT is not set
# CONFIG_MFD_IQS62X is not set
# CONFIG_MFD_JANZ_CMODIO is not set
# CONFIG_MFD_KEMPLD is not set
@@ -4415,23 +4614,30 @@ CONFIG_MFD_MAX77620=y
# CONFIG_MFD_MAX77650 is not set
# CONFIG_MFD_MAX77686 is not set
# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX77714 is not set
# CONFIG_MFD_MAX77843 is not set
# CONFIG_MFD_MAX8907 is not set
# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_MAX8997 is not set
# CONFIG_MFD_MAX8998 is not set
# CONFIG_MFD_MT6360 is not set
+# CONFIG_MFD_MT6370 is not set
# CONFIG_MFD_MT6397 is not set
# CONFIG_MFD_MENF21BMC is not set
+# CONFIG_MFD_OCELOT is not set
# CONFIG_EZX_PCAP is not set
# CONFIG_MFD_CPCAP is not set
# CONFIG_MFD_VIPERBOARD is not set
# CONFIG_MFD_NTXEC is not set
# CONFIG_MFD_RETU is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_QCOM_RPM is not set
+# CONFIG_MFD_SPMI_PMIC is not set
+# CONFIG_MFD_SY7636A is not set
# CONFIG_MFD_RDC321X is not set
# CONFIG_MFD_RT4831 is not set
# CONFIG_MFD_RT5033 is not set
+# CONFIG_MFD_RT5120 is not set
# CONFIG_MFD_RC5T583 is not set
CONFIG_MFD_RK808=y
# CONFIG_MFD_RN5T618 is not set
@@ -4461,7 +4667,6 @@ CONFIG_MFD_SYSCON=y
# CONFIG_MFD_TPS65910 is not set
# CONFIG_MFD_TPS65912_I2C is not set
# CONFIG_MFD_TPS65912_SPI is not set
-# CONFIG_MFD_TPS80031 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_TWL6040_CORE is not set
# CONFIG_MFD_WL1273_CORE is not set
@@ -4478,7 +4683,6 @@ CONFIG_MFD_SYSCON=y
# CONFIG_MFD_WM8350_I2C is not set
CONFIG_MFD_WM8994=m
# CONFIG_MFD_ROHM_BD718XX is not set
-# CONFIG_MFD_ROHM_BD70528 is not set
# CONFIG_MFD_ROHM_BD71828 is not set
# CONFIG_MFD_ROHM_BD957XMUF is not set
# CONFIG_MFD_STPMIC1 is not set
@@ -4504,7 +4708,6 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y
# CONFIG_REGULATOR_ANATOP is not set
# CONFIG_REGULATOR_ARM_SCMI is not set
CONFIG_REGULATOR_AXP20X=y
-# CONFIG_REGULATOR_CROS_EC is not set
# CONFIG_REGULATOR_DA9121 is not set
# CONFIG_REGULATOR_DA9210 is not set
# CONFIG_REGULATOR_DA9211 is not set
@@ -4529,6 +4732,7 @@ CONFIG_REGULATOR_MAX77620=y
# CONFIG_REGULATOR_MAX8893 is not set
# CONFIG_REGULATOR_MAX8952 is not set
# CONFIG_REGULATOR_MAX8973 is not set
+# CONFIG_REGULATOR_MAX20086 is not set
# CONFIG_REGULATOR_MAX77826 is not set
# CONFIG_REGULATOR_MCP16502 is not set
# CONFIG_REGULATOR_MP5416 is not set
@@ -4544,11 +4748,15 @@ CONFIG_REGULATOR_MAX77620=y
# CONFIG_REGULATOR_PV88080 is not set
# CONFIG_REGULATOR_PV88090 is not set
CONFIG_REGULATOR_PWM=y
+# CONFIG_REGULATOR_QCOM_RPMH is not set
+# CONFIG_REGULATOR_QCOM_SMD_RPM is not set
CONFIG_REGULATOR_QCOM_SPMI=y
# CONFIG_REGULATOR_QCOM_USB_VBUS is not set
# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set
CONFIG_REGULATOR_RK808=y
# CONFIG_REGULATOR_RT4801 is not set
+# CONFIG_REGULATOR_RT5190A is not set
+# CONFIG_REGULATOR_RT5759 is not set
# CONFIG_REGULATOR_RT6160 is not set
# CONFIG_REGULATOR_RT6245 is not set
# CONFIG_REGULATOR_RTQ2134 is not set
@@ -4563,6 +4771,7 @@ CONFIG_REGULATOR_S2MPS11=y
# CONFIG_REGULATOR_SY8827N is not set
# CONFIG_REGULATOR_TPS51632 is not set
# CONFIG_REGULATOR_TPS62360 is not set
+# CONFIG_REGULATOR_TPS6286X is not set
# CONFIG_REGULATOR_TPS65023 is not set
# CONFIG_REGULATOR_TPS6507X is not set
# CONFIG_REGULATOR_TPS65132 is not set
@@ -4570,53 +4779,57 @@ CONFIG_REGULATOR_S2MPS11=y
CONFIG_REGULATOR_UNIPHIER=y
# CONFIG_REGULATOR_VCTRL is not set
# CONFIG_REGULATOR_VEXPRESS is not set
+# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set
# CONFIG_REGULATOR_WM8994 is not set
# CONFIG_REGULATOR_QCOM_LABIBB is not set
CONFIG_RC_CORE=m
-CONFIG_RC_MAP=m
# CONFIG_LIRC is not set
+CONFIG_RC_MAP=m
CONFIG_RC_DECODERS=y
+# CONFIG_IR_IMON_DECODER is not set
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_MCE_KBD_DECODER=m
CONFIG_IR_NEC_DECODER=m
CONFIG_IR_RC5_DECODER=m
CONFIG_IR_RC6_DECODER=m
-CONFIG_IR_JVC_DECODER=m
-CONFIG_IR_SONY_DECODER=m
+# CONFIG_IR_RCMM_DECODER is not set
CONFIG_IR_SANYO_DECODER=m
CONFIG_IR_SHARP_DECODER=m
-CONFIG_IR_MCE_KBD_DECODER=m
+CONFIG_IR_SONY_DECODER=m
CONFIG_IR_XMP_DECODER=m
-# CONFIG_IR_IMON_DECODER is not set
-# CONFIG_IR_RCMM_DECODER is not set
CONFIG_RC_DEVICES=y
-# CONFIG_RC_ATI_REMOTE is not set
# CONFIG_IR_ENE is not set
+# CONFIG_IR_FINTEK is not set
+# CONFIG_IR_GPIO_CIR is not set
# CONFIG_IR_HIX5HD2 is not set
+# CONFIG_IR_IGORPLUGUSB is not set
+# CONFIG_IR_IGUANA is not set
# CONFIG_IR_IMON is not set
# CONFIG_IR_IMON_RAW is not set
-# CONFIG_IR_MCEUSB is not set
# CONFIG_IR_ITE_CIR is not set
-# CONFIG_IR_FINTEK is not set
+# CONFIG_IR_MCEUSB is not set
CONFIG_IR_MESON=m
# CONFIG_IR_MESON_TX is not set
# CONFIG_IR_MTK is not set
# CONFIG_IR_NUVOTON is not set
# CONFIG_IR_REDRAT3 is not set
+# CONFIG_IR_SERIAL is not set
# CONFIG_IR_STREAMZAP is not set
-# CONFIG_IR_IGORPLUGUSB is not set
-# CONFIG_IR_IGUANA is not set
+# CONFIG_IR_SUNXI is not set
+# CONFIG_IR_TOY is not set
# CONFIG_IR_TTUSBIR is not set
+# CONFIG_RC_ATI_REMOTE is not set
# CONFIG_RC_LOOPBACK is not set
-# CONFIG_IR_GPIO_CIR is not set
-# CONFIG_IR_SUNXI is not set
-# CONFIG_IR_SERIAL is not set
-# CONFIG_IR_SIR is not set
# CONFIG_RC_XBOX_DVD is not set
-# CONFIG_IR_TOY is not set
CONFIG_CEC_CORE=m
-CONFIG_CEC_PIN=y
+
+#
+# CEC support
+#
# CONFIG_MEDIA_CEC_RC is not set
-# CONFIG_CEC_PIN_ERROR_INJ is not set
# CONFIG_MEDIA_CEC_SUPPORT is not set
+# end of CEC support
+
CONFIG_MEDIA_SUPPORT=m
# CONFIG_MEDIA_SUPPORT_FILTER is not set
CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
@@ -4644,11 +4857,10 @@ CONFIG_DVB_CORE=m
#
# Video4Linux options
#
-CONFIG_VIDEO_V4L2=m
CONFIG_VIDEO_V4L2_I2C=y
-CONFIG_VIDEO_V4L2_SUBDEV_API=y
# CONFIG_VIDEO_ADV_DEBUG is not set
# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_V4L2_H264=m
CONFIG_V4L2_MEM2MEM_DEV=m
# end of Video4Linux options
@@ -4656,6 +4868,7 @@ CONFIG_V4L2_MEM2MEM_DEV=m
# Media controller options
#
# CONFIG_MEDIA_CONTROLLER_DVB is not set
+CONFIG_MEDIA_CONTROLLER_REQUEST_API=y
# end of Media controller options
#
@@ -4672,44 +4885,141 @@ CONFIG_DVB_MAX_ADAPTERS=16
#
# Media drivers
#
+
+#
+# Media drivers
+#
# CONFIG_MEDIA_USB_SUPPORT is not set
# CONFIG_MEDIA_PCI_SUPPORT is not set
-CONFIG_RADIO_ADAPTERS=y
-# CONFIG_RADIO_SI470X is not set
-# CONFIG_RADIO_SI4713 is not set
-# CONFIG_USB_MR800 is not set
-# CONFIG_USB_DSBR is not set
+CONFIG_RADIO_ADAPTERS=m
# CONFIG_RADIO_MAXIRADIO is not set
+# CONFIG_RADIO_SAA7706H is not set
# CONFIG_RADIO_SHARK is not set
# CONFIG_RADIO_SHARK2 is not set
-# CONFIG_USB_KEENE is not set
-# CONFIG_USB_RAREMONO is not set
-# CONFIG_USB_MA901 is not set
+# CONFIG_RADIO_SI4713 is not set
# CONFIG_RADIO_TEA5764 is not set
-# CONFIG_RADIO_SAA7706H is not set
# CONFIG_RADIO_TEF6862 is not set
# CONFIG_RADIO_WL1273 is not set
-CONFIG_VIDEOBUF2_CORE=m
-CONFIG_VIDEOBUF2_V4L2=m
-CONFIG_VIDEOBUF2_MEMOPS=m
-CONFIG_VIDEOBUF2_DMA_CONTIG=m
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_KEENE is not set
+# CONFIG_USB_MA901 is not set
+# CONFIG_USB_MR800 is not set
+# CONFIG_USB_RAREMONO is not set
+# CONFIG_RADIO_SI470X is not set
+CONFIG_MEDIA_PLATFORM_DRIVERS=y
# CONFIG_V4L_PLATFORM_DRIVERS is not set
+# CONFIG_SDR_PLATFORM_DRIVERS is not set
+# CONFIG_DVB_PLATFORM_DRIVERS is not set
CONFIG_V4L_MEM2MEM_DRIVERS=y
+# CONFIG_VIDEO_MEM2MEM_DEINTERLACE is not set
+
+#
+# Allegro DVT media platform drivers
+#
+
+#
+# Amlogic media platform drivers
+#
+# CONFIG_VIDEO_MESON_GE2D is not set
+
+#
+# Amphion drivers
+#
+# CONFIG_VIDEO_AMPHION_VPU is not set
+
+#
+# Aspeed media platform drivers
+#
+
+#
+# Atmel media platform drivers
+#
+
+#
+# Cadence media platform drivers
+#
+# CONFIG_VIDEO_CADENCE_CSI2RX is not set
+# CONFIG_VIDEO_CADENCE_CSI2TX is not set
+
+#
+# Chips&Media media platform drivers
+#
# CONFIG_VIDEO_CODA is not set
+
+#
+# Intel media platform drivers
+#
+
+#
+# Marvell media platform drivers
+#
+
+#
+# Mediatek media platform drivers
+#
+# CONFIG_VIDEO_MEDIATEK_VPU is not set
+
+#
+# NVidia media platform drivers
+#
+CONFIG_VIDEO_TEGRA_VDE=m
+
+#
+# NXP media platform drivers
+#
+# CONFIG_VIDEO_IMX_MIPI_CSIS is not set
# CONFIG_VIDEO_IMX_PXP is not set
+# CONFIG_VIDEO_DW100 is not set
# CONFIG_VIDEO_IMX8_JPEG is not set
-# CONFIG_VIDEO_MEDIATEK_VPU is not set
-# CONFIG_VIDEO_MEM2MEM_DEINTERLACE is not set
-# CONFIG_VIDEO_MESON_GE2D is not set
+
+#
+# Qualcomm media platform drivers
+#
+# CONFIG_VIDEO_QCOM_VENUS is not set
+
+#
+# Renesas media platform drivers
+#
+
+#
+# Rockchip media platform drivers
+#
+# CONFIG_VIDEO_ROCKCHIP_RGA is not set
+
+#
+# Samsung media platform drivers
+#
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
# CONFIG_VIDEO_SAMSUNG_S5P_G2D is not set
CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
-CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
-# CONFIG_VIDEO_ROCKCHIP_RGA is not set
+
+#
+# STMicroelectronics media platform drivers
+#
+
+#
+# Sunxi media platform drivers
+#
# CONFIG_VIDEO_SUN8I_DEINTERLACE is not set
# CONFIG_VIDEO_SUN8I_ROTATE is not set
-# CONFIG_DVB_PLATFORM_DRIVERS is not set
-# CONFIG_SDR_PLATFORM_DRIVERS is not set
+
+#
+# Texas Instruments drivers
+#
+
+#
+# Verisilicon media platform drivers
+#
+# CONFIG_VIDEO_HANTRO is not set
+
+#
+# VIA media platform drivers
+#
+
+#
+# Xilinx media platform drivers
+#
#
# MMC/SDIO DVB adapters
@@ -4722,6 +5032,11 @@ CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
# FireWire (IEEE 1394) Adapters
#
# CONFIG_DVB_FIREDTV is not set
+CONFIG_VIDEOBUF2_CORE=m
+CONFIG_VIDEOBUF2_V4L2=m
+CONFIG_VIDEOBUF2_MEMOPS=m
+CONFIG_VIDEOBUF2_DMA_CONTIG=m
+CONFIG_VIDEOBUF2_DMA_SG=m
# end of Media drivers
#
@@ -4735,24 +5050,115 @@ CONFIG_MEDIA_ATTACH=y
CONFIG_VIDEO_IR_I2C=m
#
+# Camera sensor devices
+#
+# CONFIG_VIDEO_AR0521 is not set
+# CONFIG_VIDEO_HI556 is not set
+# CONFIG_VIDEO_HI846 is not set
+# CONFIG_VIDEO_HI847 is not set
+# CONFIG_VIDEO_IMX208 is not set
+# CONFIG_VIDEO_IMX214 is not set
+# CONFIG_VIDEO_IMX219 is not set
+# CONFIG_VIDEO_IMX258 is not set
+# CONFIG_VIDEO_IMX274 is not set
+# CONFIG_VIDEO_IMX290 is not set
+# CONFIG_VIDEO_IMX319 is not set
+# CONFIG_VIDEO_IMX334 is not set
+# CONFIG_VIDEO_IMX335 is not set
+# CONFIG_VIDEO_IMX355 is not set
+# CONFIG_VIDEO_IMX412 is not set
+# CONFIG_VIDEO_MT9M001 is not set
+# CONFIG_VIDEO_MT9M032 is not set
+# CONFIG_VIDEO_MT9M111 is not set
+# CONFIG_VIDEO_MT9P031 is not set
+# CONFIG_VIDEO_MT9T001 is not set
+# CONFIG_VIDEO_MT9T112 is not set
+# CONFIG_VIDEO_MT9V011 is not set
+# CONFIG_VIDEO_MT9V032 is not set
+# CONFIG_VIDEO_MT9V111 is not set
+# CONFIG_VIDEO_NOON010PC30 is not set
+# CONFIG_VIDEO_OG01A1B is not set
+# CONFIG_VIDEO_OV02A10 is not set
+# CONFIG_VIDEO_OV08D10 is not set
+# CONFIG_VIDEO_OV13858 is not set
+# CONFIG_VIDEO_OV13B10 is not set
+# CONFIG_VIDEO_OV2640 is not set
+# CONFIG_VIDEO_OV2659 is not set
+# CONFIG_VIDEO_OV2680 is not set
+# CONFIG_VIDEO_OV2685 is not set
+# CONFIG_VIDEO_OV2740 is not set
+# CONFIG_VIDEO_OV5640 is not set
+# CONFIG_VIDEO_OV5645 is not set
+# CONFIG_VIDEO_OV5647 is not set
+# CONFIG_VIDEO_OV5648 is not set
+# CONFIG_VIDEO_OV5670 is not set
+# CONFIG_VIDEO_OV5675 is not set
+# CONFIG_VIDEO_OV5693 is not set
+# CONFIG_VIDEO_OV5695 is not set
+# CONFIG_VIDEO_OV6650 is not set
+# CONFIG_VIDEO_OV7251 is not set
+# CONFIG_VIDEO_OV7640 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_OV772X is not set
+# CONFIG_VIDEO_OV7740 is not set
+# CONFIG_VIDEO_OV8856 is not set
+# CONFIG_VIDEO_OV8865 is not set
+# CONFIG_VIDEO_OV9282 is not set
+# CONFIG_VIDEO_OV9640 is not set
+# CONFIG_VIDEO_OV9650 is not set
+# CONFIG_VIDEO_OV9734 is not set
+# CONFIG_VIDEO_RDACM20 is not set
+# CONFIG_VIDEO_RDACM21 is not set
+# CONFIG_VIDEO_RJ54N1 is not set
+# CONFIG_VIDEO_S5C73M3 is not set
+# CONFIG_VIDEO_S5K4ECGX is not set
+# CONFIG_VIDEO_S5K5BAF is not set
+# CONFIG_VIDEO_S5K6A3 is not set
+# CONFIG_VIDEO_S5K6AA is not set
+# CONFIG_VIDEO_SR030PC30 is not set
+# CONFIG_VIDEO_VS6624 is not set
+# CONFIG_VIDEO_CCS is not set
+# CONFIG_VIDEO_ET8EK8 is not set
+# CONFIG_VIDEO_M5MOLS is not set
+# end of Camera sensor devices
+
+#
+# Lens drivers
+#
+# CONFIG_VIDEO_AD5820 is not set
+# CONFIG_VIDEO_AK7375 is not set
+# CONFIG_VIDEO_DW9714 is not set
+# CONFIG_VIDEO_DW9768 is not set
+# CONFIG_VIDEO_DW9807_VCM is not set
+# end of Lens drivers
+
+#
+# Flash devices
+#
+# CONFIG_VIDEO_ADP1653 is not set
+# CONFIG_VIDEO_LM3560 is not set
+# CONFIG_VIDEO_LM3646 is not set
+# end of Flash devices
+
+#
# Audio decoders, processors and mixers
#
-# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_CS3308 is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_SONY_BTF_MPX is not set
+# CONFIG_VIDEO_TDA1997X is not set
# CONFIG_VIDEO_TDA7432 is not set
# CONFIG_VIDEO_TDA9840 is not set
-# CONFIG_VIDEO_TDA1997X is not set
# CONFIG_VIDEO_TEA6415C is not set
# CONFIG_VIDEO_TEA6420 is not set
-# CONFIG_VIDEO_MSP3400 is not set
-# CONFIG_VIDEO_CS3308 is not set
-# CONFIG_VIDEO_CS5345 is not set
-# CONFIG_VIDEO_CS53L32A is not set
# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_TVAUDIO is not set
# CONFIG_VIDEO_UDA1342 is not set
-# CONFIG_VIDEO_WM8775 is not set
-# CONFIG_VIDEO_WM8739 is not set
# CONFIG_VIDEO_VP27SMPX is not set
-# CONFIG_VIDEO_SONY_BTF_MPX is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_WM8775 is not set
# end of Audio decoders, processors and mixers
#
@@ -4772,7 +5178,9 @@ CONFIG_VIDEO_IR_I2C=m
# CONFIG_VIDEO_BT819 is not set
# CONFIG_VIDEO_BT856 is not set
# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_ISL7998X is not set
# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_MAX9286 is not set
# CONFIG_VIDEO_ML86V7667 is not set
# CONFIG_VIDEO_SAA7110 is not set
# CONFIG_VIDEO_SAA711X is not set
@@ -4785,7 +5193,6 @@ CONFIG_VIDEO_IR_I2C=m
# CONFIG_VIDEO_TW9906 is not set
# CONFIG_VIDEO_TW9910 is not set
# CONFIG_VIDEO_VPX3220 is not set
-# CONFIG_VIDEO_MAX9286 is not set
#
# Video and audio decoders
@@ -4797,14 +5204,14 @@ CONFIG_VIDEO_IR_I2C=m
#
# Video encoders
#
-# CONFIG_VIDEO_SAA7127 is not set
-# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_AD9389B is not set
# CONFIG_VIDEO_ADV7170 is not set
# CONFIG_VIDEO_ADV7175 is not set
# CONFIG_VIDEO_ADV7343 is not set
# CONFIG_VIDEO_ADV7393 is not set
-# CONFIG_VIDEO_AD9389B is not set
# CONFIG_VIDEO_AK881X is not set
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7185 is not set
# CONFIG_VIDEO_THS8200 is not set
# end of Video encoders
@@ -4830,106 +5237,17 @@ CONFIG_VIDEO_IR_I2C=m
#
# Miscellaneous helper chips
#
-# CONFIG_VIDEO_THS7303 is not set
-# CONFIG_VIDEO_M52790 is not set
# CONFIG_VIDEO_I2C is not set
+# CONFIG_VIDEO_M52790 is not set
# CONFIG_VIDEO_ST_MIPID02 is not set
+# CONFIG_VIDEO_THS7303 is not set
# end of Miscellaneous helper chips
#
-# Camera sensor devices
-#
-# CONFIG_VIDEO_HI556 is not set
-# CONFIG_VIDEO_IMX208 is not set
-# CONFIG_VIDEO_IMX214 is not set
-# CONFIG_VIDEO_IMX219 is not set
-# CONFIG_VIDEO_IMX258 is not set
-# CONFIG_VIDEO_IMX274 is not set
-# CONFIG_VIDEO_IMX290 is not set
-# CONFIG_VIDEO_IMX319 is not set
-# CONFIG_VIDEO_IMX334 is not set
-# CONFIG_VIDEO_IMX335 is not set
-# CONFIG_VIDEO_IMX355 is not set
-# CONFIG_VIDEO_IMX412 is not set
-# CONFIG_VIDEO_OV02A10 is not set
-# CONFIG_VIDEO_OV2640 is not set
-# CONFIG_VIDEO_OV2659 is not set
-# CONFIG_VIDEO_OV2680 is not set
-# CONFIG_VIDEO_OV2685 is not set
-# CONFIG_VIDEO_OV2740 is not set
-# CONFIG_VIDEO_OV5640 is not set
-# CONFIG_VIDEO_OV5645 is not set
-# CONFIG_VIDEO_OV5647 is not set
-# CONFIG_VIDEO_OV5648 is not set
-# CONFIG_VIDEO_OV6650 is not set
-# CONFIG_VIDEO_OV5670 is not set
-# CONFIG_VIDEO_OV5675 is not set
-# CONFIG_VIDEO_OV5695 is not set
-# CONFIG_VIDEO_OV7251 is not set
-# CONFIG_VIDEO_OV772X is not set
-# CONFIG_VIDEO_OV7640 is not set
-# CONFIG_VIDEO_OV7670 is not set
-# CONFIG_VIDEO_OV7740 is not set
-# CONFIG_VIDEO_OV8856 is not set
-# CONFIG_VIDEO_OV8865 is not set
-# CONFIG_VIDEO_OV9282 is not set
-# CONFIG_VIDEO_OV9640 is not set
-# CONFIG_VIDEO_OV9650 is not set
-# CONFIG_VIDEO_OV9734 is not set
-# CONFIG_VIDEO_OV13858 is not set
-# CONFIG_VIDEO_VS6624 is not set
-# CONFIG_VIDEO_MT9M001 is not set
-# CONFIG_VIDEO_MT9M032 is not set
-# CONFIG_VIDEO_MT9M111 is not set
-# CONFIG_VIDEO_MT9P031 is not set
-# CONFIG_VIDEO_MT9T001 is not set
-# CONFIG_VIDEO_MT9T112 is not set
-# CONFIG_VIDEO_MT9V011 is not set
-# CONFIG_VIDEO_MT9V032 is not set
-# CONFIG_VIDEO_MT9V111 is not set
-# CONFIG_VIDEO_SR030PC30 is not set
-# CONFIG_VIDEO_NOON010PC30 is not set
-# CONFIG_VIDEO_M5MOLS is not set
-# CONFIG_VIDEO_RDACM20 is not set
-# CONFIG_VIDEO_RDACM21 is not set
-# CONFIG_VIDEO_RJ54N1 is not set
-# CONFIG_VIDEO_S5K6AA is not set
-# CONFIG_VIDEO_S5K6A3 is not set
-# CONFIG_VIDEO_S5K4ECGX is not set
-# CONFIG_VIDEO_S5K5BAF is not set
-# CONFIG_VIDEO_CCS is not set
-# CONFIG_VIDEO_ET8EK8 is not set
-# CONFIG_VIDEO_S5C73M3 is not set
-# end of Camera sensor devices
-
-#
-# Lens drivers
-#
-# CONFIG_VIDEO_AD5820 is not set
-# CONFIG_VIDEO_AK7375 is not set
-# CONFIG_VIDEO_DW9714 is not set
-# CONFIG_VIDEO_DW9768 is not set
-# CONFIG_VIDEO_DW9807_VCM is not set
-# end of Lens drivers
-
-#
-# Flash devices
-#
-# CONFIG_VIDEO_ADP1653 is not set
-# CONFIG_VIDEO_LM3560 is not set
-# CONFIG_VIDEO_LM3646 is not set
-# end of Flash devices
-
-#
-# SPI helper chips
-#
-# CONFIG_VIDEO_GS1662 is not set
-# end of SPI helper chips
-
-#
# Media SPI Adapters
#
# CONFIG_CXD2880_SPI_DRV is not set
+# CONFIG_VIDEO_GS1662 is not set
# end of Media SPI Adapters
CONFIG_MEDIA_TUNER=m
@@ -4937,43 +5255,43 @@ CONFIG_MEDIA_TUNER=m
#
# Customize TV tuners
#
-CONFIG_MEDIA_TUNER_SIMPLE=m
-# CONFIG_MEDIA_TUNER_TDA18250 is not set
-CONFIG_MEDIA_TUNER_TDA8290=m
-CONFIG_MEDIA_TUNER_TDA827X=m
-CONFIG_MEDIA_TUNER_TDA18271=m
-CONFIG_MEDIA_TUNER_TDA9887=m
-CONFIG_MEDIA_TUNER_TEA5761=m
-CONFIG_MEDIA_TUNER_TEA5767=m
+# CONFIG_MEDIA_TUNER_E4000 is not set
+# CONFIG_MEDIA_TUNER_FC0011 is not set
+# CONFIG_MEDIA_TUNER_FC0012 is not set
+# CONFIG_MEDIA_TUNER_FC0013 is not set
+# CONFIG_MEDIA_TUNER_FC2580 is not set
+# CONFIG_MEDIA_TUNER_IT913X is not set
+# CONFIG_MEDIA_TUNER_M88RS6000T is not set
+# CONFIG_MEDIA_TUNER_MAX2165 is not set
+CONFIG_MEDIA_TUNER_MC44S803=m
# CONFIG_MEDIA_TUNER_MSI001 is not set
-CONFIG_MEDIA_TUNER_MT20XX=m
# CONFIG_MEDIA_TUNER_MT2060 is not set
# CONFIG_MEDIA_TUNER_MT2063 is not set
-# CONFIG_MEDIA_TUNER_MT2266 is not set
+CONFIG_MEDIA_TUNER_MT20XX=m
# CONFIG_MEDIA_TUNER_MT2131 is not set
-# CONFIG_MEDIA_TUNER_QT1010 is not set
-CONFIG_MEDIA_TUNER_XC2028=m
-CONFIG_MEDIA_TUNER_XC5000=m
-CONFIG_MEDIA_TUNER_XC4000=m
+# CONFIG_MEDIA_TUNER_MT2266 is not set
+# CONFIG_MEDIA_TUNER_MXL301RF is not set
# CONFIG_MEDIA_TUNER_MXL5005S is not set
# CONFIG_MEDIA_TUNER_MXL5007T is not set
-CONFIG_MEDIA_TUNER_MC44S803=m
-# CONFIG_MEDIA_TUNER_MAX2165 is not set
-# CONFIG_MEDIA_TUNER_TDA18218 is not set
-# CONFIG_MEDIA_TUNER_FC0011 is not set
-# CONFIG_MEDIA_TUNER_FC0012 is not set
-# CONFIG_MEDIA_TUNER_FC0013 is not set
+# CONFIG_MEDIA_TUNER_QM1D1B0004 is not set
+# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set
+# CONFIG_MEDIA_TUNER_QT1010 is not set
+# CONFIG_MEDIA_TUNER_R820T is not set
+# CONFIG_MEDIA_TUNER_SI2157 is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
# CONFIG_MEDIA_TUNER_TDA18212 is not set
-# CONFIG_MEDIA_TUNER_E4000 is not set
-# CONFIG_MEDIA_TUNER_FC2580 is not set
-# CONFIG_MEDIA_TUNER_M88RS6000T is not set
+# CONFIG_MEDIA_TUNER_TDA18218 is not set
+# CONFIG_MEDIA_TUNER_TDA18250 is not set
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA827X=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
# CONFIG_MEDIA_TUNER_TUA9001 is not set
-# CONFIG_MEDIA_TUNER_SI2157 is not set
-# CONFIG_MEDIA_TUNER_IT913X is not set
-# CONFIG_MEDIA_TUNER_R820T is not set
-# CONFIG_MEDIA_TUNER_MXL301RF is not set
-# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set
-# CONFIG_MEDIA_TUNER_QM1D1B0004 is not set
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC4000=m
+CONFIG_MEDIA_TUNER_XC5000=m
# end of Customize TV tuners
#
@@ -4983,123 +5301,123 @@ CONFIG_MEDIA_TUNER_MC44S803=m
#
# Multistandard (satellite) frontends
#
+# CONFIG_DVB_M88DS3103 is not set
+# CONFIG_DVB_MXL5XX is not set
# CONFIG_DVB_STB0899 is not set
# CONFIG_DVB_STB6100 is not set
# CONFIG_DVB_STV090x is not set
# CONFIG_DVB_STV0910 is not set
# CONFIG_DVB_STV6110x is not set
# CONFIG_DVB_STV6111 is not set
-# CONFIG_DVB_MXL5XX is not set
-# CONFIG_DVB_M88DS3103 is not set
#
# Multistandard (cable + terrestrial) frontends
#
# CONFIG_DVB_DRXK is not set
-# CONFIG_DVB_TDA18271C2DD is not set
-# CONFIG_DVB_SI2165 is not set
# CONFIG_DVB_MN88472 is not set
# CONFIG_DVB_MN88473 is not set
+# CONFIG_DVB_SI2165 is not set
+# CONFIG_DVB_TDA18271C2DD is not set
#
# DVB-S (satellite) frontends
#
# CONFIG_DVB_CX24110 is not set
+# CONFIG_DVB_CX24116 is not set
+# CONFIG_DVB_CX24117 is not set
+# CONFIG_DVB_CX24120 is not set
# CONFIG_DVB_CX24123 is not set
+# CONFIG_DVB_DS3000 is not set
+# CONFIG_DVB_MB86A16 is not set
# CONFIG_DVB_MT312 is not set
-# CONFIG_DVB_ZL10036 is not set
-# CONFIG_DVB_ZL10039 is not set
# CONFIG_DVB_S5H1420 is not set
-# CONFIG_DVB_STV0288 is not set
+# CONFIG_DVB_SI21XX is not set
# CONFIG_DVB_STB6000 is not set
+# CONFIG_DVB_STV0288 is not set
# CONFIG_DVB_STV0299 is not set
-# CONFIG_DVB_STV6110 is not set
# CONFIG_DVB_STV0900 is not set
-# CONFIG_DVB_TDA8083 is not set
+# CONFIG_DVB_STV6110 is not set
+# CONFIG_DVB_TDA10071 is not set
# CONFIG_DVB_TDA10086 is not set
+# CONFIG_DVB_TDA8083 is not set
# CONFIG_DVB_TDA8261 is not set
-# CONFIG_DVB_VES1X93 is not set
-# CONFIG_DVB_TUNER_ITD1000 is not set
-# CONFIG_DVB_TUNER_CX24113 is not set
# CONFIG_DVB_TDA826X is not set
-# CONFIG_DVB_TUA6100 is not set
-# CONFIG_DVB_CX24116 is not set
-# CONFIG_DVB_CX24117 is not set
-# CONFIG_DVB_CX24120 is not set
-# CONFIG_DVB_SI21XX is not set
# CONFIG_DVB_TS2020 is not set
-# CONFIG_DVB_DS3000 is not set
-# CONFIG_DVB_MB86A16 is not set
-# CONFIG_DVB_TDA10071 is not set
+# CONFIG_DVB_TUA6100 is not set
+# CONFIG_DVB_TUNER_CX24113 is not set
+# CONFIG_DVB_TUNER_ITD1000 is not set
+# CONFIG_DVB_VES1X93 is not set
+# CONFIG_DVB_ZL10036 is not set
+# CONFIG_DVB_ZL10039 is not set
#
# DVB-T (terrestrial) frontends
#
-# CONFIG_DVB_SP887X is not set
+# CONFIG_DVB_AF9013 is not set
# CONFIG_DVB_CX22700 is not set
# CONFIG_DVB_CX22702 is not set
-# CONFIG_DVB_S5H1432 is not set
-# CONFIG_DVB_DRXD is not set
-# CONFIG_DVB_L64781 is not set
-# CONFIG_DVB_TDA1004X is not set
-# CONFIG_DVB_NXT6000 is not set
-# CONFIG_DVB_MT352 is not set
-# CONFIG_DVB_ZL10353 is not set
+# CONFIG_DVB_CXD2820R is not set
+# CONFIG_DVB_CXD2841ER is not set
# CONFIG_DVB_DIB3000MB is not set
# CONFIG_DVB_DIB3000MC is not set
# CONFIG_DVB_DIB7000M is not set
# CONFIG_DVB_DIB7000P is not set
# CONFIG_DVB_DIB9000 is not set
-# CONFIG_DVB_TDA10048 is not set
-# CONFIG_DVB_AF9013 is not set
+# CONFIG_DVB_DRXD is not set
# CONFIG_DVB_EC100 is not set
-# CONFIG_DVB_STV0367 is not set
-# CONFIG_DVB_CXD2820R is not set
-# CONFIG_DVB_CXD2841ER is not set
+# CONFIG_DVB_L64781 is not set
+# CONFIG_DVB_MT352 is not set
+# CONFIG_DVB_NXT6000 is not set
# CONFIG_DVB_RTL2830 is not set
# CONFIG_DVB_RTL2832 is not set
# CONFIG_DVB_RTL2832_SDR is not set
+# CONFIG_DVB_S5H1432 is not set
# CONFIG_DVB_SI2168 is not set
+# CONFIG_DVB_SP887X is not set
+# CONFIG_DVB_STV0367 is not set
+# CONFIG_DVB_TDA10048 is not set
+# CONFIG_DVB_TDA1004X is not set
# CONFIG_DVB_ZD1301_DEMOD is not set
+# CONFIG_DVB_ZL10353 is not set
# CONFIG_DVB_CXD2880 is not set
#
# DVB-C (cable) frontends
#
-# CONFIG_DVB_VES1820 is not set
+# CONFIG_DVB_STV0297 is not set
# CONFIG_DVB_TDA10021 is not set
# CONFIG_DVB_TDA10023 is not set
-# CONFIG_DVB_STV0297 is not set
+# CONFIG_DVB_VES1820 is not set
#
# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
#
-# CONFIG_DVB_NXT200X is not set
-# CONFIG_DVB_OR51211 is not set
-# CONFIG_DVB_OR51132 is not set
+# CONFIG_DVB_AU8522_DTV is not set
+# CONFIG_DVB_AU8522_V4L is not set
# CONFIG_DVB_BCM3510 is not set
-# CONFIG_DVB_LGDT330X is not set
+# CONFIG_DVB_LG2160 is not set
# CONFIG_DVB_LGDT3305 is not set
# CONFIG_DVB_LGDT3306A is not set
-# CONFIG_DVB_LG2160 is not set
+# CONFIG_DVB_LGDT330X is not set
+# CONFIG_DVB_MXL692 is not set
+# CONFIG_DVB_NXT200X is not set
+# CONFIG_DVB_OR51132 is not set
+# CONFIG_DVB_OR51211 is not set
# CONFIG_DVB_S5H1409 is not set
-# CONFIG_DVB_AU8522_DTV is not set
-# CONFIG_DVB_AU8522_V4L is not set
# CONFIG_DVB_S5H1411 is not set
-# CONFIG_DVB_MXL692 is not set
#
# ISDB-T (terrestrial) frontends
#
-# CONFIG_DVB_S921 is not set
# CONFIG_DVB_DIB8000 is not set
# CONFIG_DVB_MB86A20S is not set
+# CONFIG_DVB_S921 is not set
#
# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
#
-# CONFIG_DVB_TC90522 is not set
# CONFIG_DVB_MN88443X is not set
+# CONFIG_DVB_TC90522 is not set
#
# Digital terrestrial only tuners/PLL
@@ -5111,25 +5429,25 @@ CONFIG_MEDIA_TUNER_MC44S803=m
#
# SEC control devices for DVB-S
#
-# CONFIG_DVB_DRX39XYJ is not set
-# CONFIG_DVB_LNBH25 is not set
-# CONFIG_DVB_LNBH29 is not set
-# CONFIG_DVB_LNBP21 is not set
-# CONFIG_DVB_LNBP22 is not set
+# CONFIG_DVB_A8293 is not set
+# CONFIG_DVB_AF9033 is not set
+# CONFIG_DVB_ASCOT2E is not set
+# CONFIG_DVB_ATBM8830 is not set
+# CONFIG_DVB_HELENE is not set
+# CONFIG_DVB_HORUS3A is not set
# CONFIG_DVB_ISL6405 is not set
# CONFIG_DVB_ISL6421 is not set
# CONFIG_DVB_ISL6423 is not set
-# CONFIG_DVB_A8293 is not set
+# CONFIG_DVB_IX2505V is not set
# CONFIG_DVB_LGS8GL5 is not set
# CONFIG_DVB_LGS8GXX is not set
-# CONFIG_DVB_ATBM8830 is not set
-# CONFIG_DVB_TDA665x is not set
-# CONFIG_DVB_IX2505V is not set
+# CONFIG_DVB_LNBH25 is not set
+# CONFIG_DVB_LNBH29 is not set
+# CONFIG_DVB_LNBP21 is not set
+# CONFIG_DVB_LNBP22 is not set
# CONFIG_DVB_M88RS2000 is not set
-# CONFIG_DVB_AF9033 is not set
-# CONFIG_DVB_HORUS3A is not set
-# CONFIG_DVB_ASCOT2E is not set
-# CONFIG_DVB_HELENE is not set
+# CONFIG_DVB_TDA665x is not set
+# CONFIG_DVB_DRX39XYJ is not set
#
# Common Interface (EN50221) controller drivers
@@ -5147,28 +5465,32 @@ CONFIG_MEDIA_TUNER_MC44S803=m
#
# Graphics support
#
-CONFIG_VGA_ARB=y
-CONFIG_VGA_ARB_MAX_GPUS=16
+CONFIG_APERTURE_HELPERS=y
+CONFIG_TEGRA_HOST1X_CONTEXT_BUS=y
CONFIG_TEGRA_HOST1X=m
CONFIG_TEGRA_HOST1X_FIREWALL=y
CONFIG_DRM=m
CONFIG_DRM_MIPI_DSI=y
-CONFIG_DRM_DP_AUX_BUS=m
-# CONFIG_DRM_DP_AUX_CHARDEV is not set
-# CONFIG_DRM_DEBUG_SELFTEST is not set
CONFIG_DRM_KMS_HELPER=m
# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set
+# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_FBDEV_OVERALLOC=100
# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set
# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set
+CONFIG_DRM_DP_AUX_BUS=m
+CONFIG_DRM_DISPLAY_HELPER=m
+CONFIG_DRM_DISPLAY_DP_HELPER=y
+CONFIG_DRM_DISPLAY_HDCP_HELPER=y
+CONFIG_DRM_DISPLAY_HDMI_HELPER=y
+# CONFIG_DRM_DP_AUX_CHARDEV is not set
# CONFIG_DRM_DP_CEC is not set
CONFIG_DRM_TTM=m
+CONFIG_DRM_BUDDY=m
CONFIG_DRM_VRAM_HELPER=m
CONFIG_DRM_TTM_HELPER=m
-CONFIG_DRM_GEM_CMA_HELPER=y
-CONFIG_DRM_KMS_CMA_HELPER=y
-CONFIG_DRM_GEM_SHMEM_HELPER=y
+CONFIG_DRM_GEM_DMA_HELPER=m
+CONFIG_DRM_GEM_SHMEM_HELPER=m
CONFIG_DRM_SCHED=m
#
@@ -5248,6 +5570,8 @@ CONFIG_DRM_EXYNOS_MIC=y
# CONFIG_DRM_EXYNOS_ROTATOR is not set
# CONFIG_DRM_EXYNOS_SCALER is not set
CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_VOP=y
+CONFIG_ROCKCHIP_VOP2=y
CONFIG_ROCKCHIP_ANALOGIX_DP=y
CONFIG_ROCKCHIP_CDN_DP=y
CONFIG_ROCKCHIP_DW_HDMI=y
@@ -5262,17 +5586,16 @@ CONFIG_DRM_UDL=m
CONFIG_DRM_AST=m
# CONFIG_DRM_MGAG200 is not set
# CONFIG_DRM_RCAR_DW_HDMI is not set
-CONFIG_DRM_RCAR_LVDS=m
+# CONFIG_DRM_RCAR_USE_LVDS is not set
+# CONFIG_DRM_RCAR_USE_MIPI_DSI is not set
CONFIG_DRM_SUN4I=m
-CONFIG_DRM_SUN4I_HDMI=m
-CONFIG_DRM_SUN4I_HDMI_CEC=y
-CONFIG_DRM_SUN4I_BACKEND=m
CONFIG_DRM_SUN6I_DSI=m
CONFIG_DRM_SUN8I_DW_HDMI=m
CONFIG_DRM_SUN8I_MIXER=m
CONFIG_DRM_SUN8I_TCON_TOP=m
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
+# CONFIG_DRM_MSM is not set
CONFIG_DRM_TEGRA=m
# CONFIG_DRM_TEGRA_DEBUG is not set
# CONFIG_DRM_TEGRA_STAGING is not set
@@ -5284,11 +5607,14 @@ CONFIG_DRM_PANEL=y
# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set
# CONFIG_DRM_PANEL_ARM_VERSATILE is not set
# CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596 is not set
+# CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0 is not set
# CONFIG_DRM_PANEL_BOE_HIMAX8279D is not set
# CONFIG_DRM_PANEL_BOE_TV101WUM_NL6 is not set
# CONFIG_DRM_PANEL_DSI_CM is not set
# CONFIG_DRM_PANEL_LVDS is not set
CONFIG_DRM_PANEL_SIMPLE=m
+# CONFIG_DRM_PANEL_EDP is not set
+# CONFIG_DRM_PANEL_EBBG_FT8719 is not set
# CONFIG_DRM_PANEL_ELIDA_KD35T133 is not set
# CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02 is not set
# CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D is not set
@@ -5298,6 +5624,7 @@ CONFIG_DRM_PANEL_SIMPLE=m
# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set
# CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set
# CONFIG_DRM_PANEL_JDI_LT070ME05000 is not set
+# CONFIG_DRM_PANEL_JDI_R63452 is not set
# CONFIG_DRM_PANEL_KHADAS_TS050 is not set
# CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04 is not set
# CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W is not set
@@ -5306,7 +5633,10 @@ CONFIG_DRM_PANEL_SIMPLE=m
# CONFIG_DRM_PANEL_LG_LB035Q02 is not set
# CONFIG_DRM_PANEL_LG_LG4573 is not set
# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set
+# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set
# CONFIG_DRM_PANEL_NOVATEK_NT35510 is not set
+# CONFIG_DRM_PANEL_NOVATEK_NT35560 is not set
+# CONFIG_DRM_PANEL_NOVATEK_NT35950 is not set
# CONFIG_DRM_PANEL_NOVATEK_NT36672A is not set
# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set
# CONFIG_DRM_PANEL_MANTIX_MLAF057WE51 is not set
@@ -5321,6 +5651,7 @@ CONFIG_DRM_PANEL_SIMPLE=m
# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set
# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set
# CONFIG_DRM_PANEL_SAMSUNG_S6D16D0 is not set
+# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set
# CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2 is not set
# CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03 is not set
# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set
@@ -5331,11 +5662,12 @@ CONFIG_DRM_PANEL_SIMPLE=m
# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set
# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set
# CONFIG_DRM_PANEL_SHARP_LS043T1LE01 is not set
+# CONFIG_DRM_PANEL_SHARP_LS060T1SX01 is not set
# CONFIG_DRM_PANEL_SITRONIX_ST7701 is not set
# CONFIG_DRM_PANEL_SITRONIX_ST7703 is not set
# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set
-# CONFIG_DRM_PANEL_SONY_ACX424AKP is not set
# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set
+# CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521 is not set
# CONFIG_DRM_PANEL_TDO_TL070WSH30 is not set
# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set
# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set
@@ -5355,9 +5687,11 @@ CONFIG_DRM_PANEL_BRIDGE=y
# CONFIG_DRM_CDNS_DSI is not set
# CONFIG_DRM_CHIPONE_ICN6211 is not set
# CONFIG_DRM_CHRONTEL_CH7033 is not set
-# CONFIG_DRM_CROS_EC_ANX7688 is not set
CONFIG_DRM_DISPLAY_CONNECTOR=m
+# CONFIG_DRM_FSL_LDB is not set
+# CONFIG_DRM_ITE_IT6505 is not set
# CONFIG_DRM_LONTIUM_LT8912B is not set
+# CONFIG_DRM_LONTIUM_LT9211 is not set
# CONFIG_DRM_LONTIUM_LT9611 is not set
# CONFIG_DRM_LONTIUM_LT9611UXC is not set
# CONFIG_DRM_ITE_IT66121 is not set
@@ -5377,6 +5711,7 @@ CONFIG_DRM_DISPLAY_CONNECTOR=m
# CONFIG_DRM_TOSHIBA_TC358767 is not set
# CONFIG_DRM_TOSHIBA_TC358768 is not set
# CONFIG_DRM_TOSHIBA_TC358775 is not set
+# CONFIG_DRM_TI_DLPC3433 is not set
# CONFIG_DRM_TI_TFP410 is not set
# CONFIG_DRM_TI_SN65DSI83 is not set
# CONFIG_DRM_TI_SN65DSI86 is not set
@@ -5389,32 +5724,43 @@ CONFIG_DRM_I2C_ADV7511=m
# CONFIG_DRM_I2C_ADV7511_AUDIO is not set
CONFIG_DRM_I2C_ADV7511_CEC=y
# CONFIG_DRM_CDNS_MHDP8546 is not set
+# CONFIG_DRM_IMX8QM_LDB is not set
+# CONFIG_DRM_IMX8QXP_LDB is not set
+# CONFIG_DRM_IMX8QXP_PIXEL_COMBINER is not set
+# CONFIG_DRM_IMX8QXP_PIXEL_LINK_TO_DPI is not set
CONFIG_DRM_DW_HDMI=m
# CONFIG_DRM_DW_HDMI_AHB_AUDIO is not set
CONFIG_DRM_DW_HDMI_I2S_AUDIO=m
+# CONFIG_DRM_DW_HDMI_GP_AUDIO is not set
# CONFIG_DRM_DW_HDMI_CEC is not set
CONFIG_DRM_DW_MIPI_DSI=m
# end of Display Interface Bridges
CONFIG_DRM_IMX_DCSS=m
+CONFIG_DRM_V3D=m
CONFIG_DRM_VC4=m
CONFIG_DRM_VC4_HDMI_CEC=y
CONFIG_DRM_ETNAVIV=m
CONFIG_DRM_ETNAVIV_THERMAL=y
CONFIG_DRM_HISI_HIBMC=m
CONFIG_DRM_HISI_KIRIN=m
+# CONFIG_DRM_LOGICVC is not set
CONFIG_DRM_MEDIATEK=m
+# CONFIG_DRM_MEDIATEK_DP is not set
CONFIG_DRM_MEDIATEK_HDMI=m
CONFIG_DRM_MXS=y
CONFIG_DRM_MXSFB=m
+# CONFIG_DRM_IMX_LCDIF is not set
CONFIG_DRM_MESON=m
CONFIG_DRM_MESON_DW_HDMI=m
# CONFIG_DRM_ARCPGU is not set
CONFIG_DRM_BOCHS=m
# CONFIG_DRM_CIRRUS_QEMU is not set
# CONFIG_DRM_GM12U320 is not set
+# CONFIG_DRM_PANEL_MIPI_DBI is not set
CONFIG_DRM_SIMPLEDRM=m
# CONFIG_TINYDRM_HX8357D is not set
+# CONFIG_TINYDRM_ILI9163 is not set
# CONFIG_TINYDRM_ILI9225 is not set
# CONFIG_TINYDRM_ILI9341 is not set
# CONFIG_TINYDRM_ILI9486 is not set
@@ -5429,9 +5775,11 @@ CONFIG_DRM_LIMA=m
CONFIG_DRM_PANFROST=m
CONFIG_DRM_TIDSS=m
# CONFIG_DRM_GUD is not set
+# CONFIG_DRM_SSD130X is not set
# CONFIG_DRM_HYPERV is not set
# CONFIG_DRM_LEGACY is not set
CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
+CONFIG_DRM_NOMODESET=y
#
# Frame buffer Devices
@@ -5566,6 +5914,7 @@ CONFIG_SND_PCM_IEC958=y
CONFIG_SND_DMAENGINE_PCM=y
CONFIG_SND_HWDEP=m
CONFIG_SND_RAWMIDI=m
+CONFIG_SND_COMPRESS_OFFLOAD=y
CONFIG_SND_JACK=y
CONFIG_SND_JACK_INPUT_DEV=y
# CONFIG_SND_OSSEMUL is not set
@@ -5577,7 +5926,9 @@ CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_PROC_FS=y
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
+CONFIG_SND_CTL_FAST_LOOKUP=y
# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_CTL_INPUT_VALIDATION is not set
CONFIG_SND_VMASTER=y
CONFIG_SND_CTL_LED=m
# CONFIG_SND_SEQUENCER is not set
@@ -5586,6 +5937,7 @@ CONFIG_SND_DRIVERS=y
# CONFIG_SND_ALOOP is not set
# CONFIG_SND_MTPAV is not set
# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_SERIAL_GENERIC is not set
# CONFIG_SND_MPU401 is not set
CONFIG_SND_PCI=y
# CONFIG_SND_AD1889 is not set
@@ -5663,6 +6015,8 @@ CONFIG_SND_HDA_TEGRA=m
# CONFIG_SND_HDA_RECONFIG is not set
# CONFIG_SND_HDA_INPUT_BEEP is not set
# CONFIG_SND_HDA_PATCH_LOADER is not set
+# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set
+# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set
CONFIG_SND_HDA_CODEC_REALTEK=m
CONFIG_SND_HDA_CODEC_ANALOG=m
CONFIG_SND_HDA_CODEC_SIGMATEL=m
@@ -5720,9 +6074,12 @@ CONFIG_SND_PCMCIA=y
# CONFIG_SND_PDAUDIOCF is not set
CONFIG_SND_SOC=y
CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
+CONFIG_SND_SOC_COMPRESS=y
CONFIG_SND_SOC_TOPOLOGY=y
# CONFIG_SND_SOC_ADI is not set
# CONFIG_SND_SOC_AMD_ACP is not set
+# CONFIG_SND_AMD_ACP_CONFIG is not set
+CONFIG_SND_SOC_APPLE_MCA=m
# CONFIG_SND_ATMEL_SOC is not set
CONFIG_SND_BCM2835_SOC_I2S=m
# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set
@@ -5744,6 +6101,7 @@ CONFIG_SND_BCM2835_SOC_I2S=m
# CONFIG_SND_SOC_FSL_MICFIL is not set
# CONFIG_SND_SOC_FSL_XCVR is not set
# CONFIG_SND_SOC_FSL_AUD2HTX is not set
+# CONFIG_SND_SOC_FSL_RPMSG is not set
# CONFIG_SND_SOC_IMX_AUDMUX is not set
# CONFIG_SND_IMX_SOC is not set
# end of SoC Audio for Freescale CPUs
@@ -5764,6 +6122,7 @@ CONFIG_SND_SOC_MT8173_RT5650_RT5676=m
CONFIG_SND_SOC_MT8183=m
CONFIG_SND_SOC_MT8183_MT6358_TS3A227E_MAX98357A=m
CONFIG_SND_SOC_MT8183_DA7219_MAX98357A=m
+# CONFIG_SND_SOC_MT8186 is not set
CONFIG_SND_SOC_MTK_BTCVSD=m
CONFIG_SND_SOC_MT8192=m
CONFIG_SND_SOC_MT8195=m
@@ -5786,8 +6145,42 @@ CONFIG_SND_SOC_MT8195=m
# CONFIG_SND_SOC_MESON_T9015 is not set
# end of ASoC support for Amlogic platforms
+CONFIG_SND_SOC_QCOM=m
+CONFIG_SND_SOC_LPASS_CPU=m
+CONFIG_SND_SOC_LPASS_HDMI=m
+CONFIG_SND_SOC_LPASS_PLATFORM=m
+CONFIG_SND_SOC_LPASS_CDC_DMA=m
+CONFIG_SND_SOC_LPASS_IPQ806X=m
+CONFIG_SND_SOC_LPASS_APQ8016=m
+CONFIG_SND_SOC_LPASS_SC7180=m
+CONFIG_SND_SOC_LPASS_SC7280=m
+CONFIG_SND_SOC_STORM=m
+CONFIG_SND_SOC_APQ8016_SBC=m
+CONFIG_SND_SOC_QCOM_COMMON=m
+CONFIG_SND_SOC_QDSP6_COMMON=m
+CONFIG_SND_SOC_QDSP6_CORE=m
+CONFIG_SND_SOC_QDSP6_AFE=m
+CONFIG_SND_SOC_QDSP6_AFE_DAI=m
+CONFIG_SND_SOC_QDSP6_AFE_CLOCKS=m
+CONFIG_SND_SOC_QDSP6_ADM=m
+CONFIG_SND_SOC_QDSP6_ROUTING=m
+CONFIG_SND_SOC_QDSP6_ASM=m
+CONFIG_SND_SOC_QDSP6_ASM_DAI=m
+CONFIG_SND_SOC_QDSP6_APM_DAI=m
+CONFIG_SND_SOC_QDSP6_APM_LPASS_DAI=m
+CONFIG_SND_SOC_QDSP6_APM=m
+CONFIG_SND_SOC_QDSP6_PRM_LPASS_CLOCKS=m
+CONFIG_SND_SOC_QDSP6_PRM=m
+CONFIG_SND_SOC_QDSP6=m
+CONFIG_SND_SOC_MSM8996=m
+CONFIG_SND_SOC_SDM845=m
+CONFIG_SND_SOC_SM8250=m
+CONFIG_SND_SOC_SC8280XP=m
+CONFIG_SND_SOC_SC7180=m
+CONFIG_SND_SOC_SC7280=m
CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_I2S=m
+# CONFIG_SND_SOC_ROCKCHIP_I2S_TDM is not set
CONFIG_SND_SOC_ROCKCHIP_PDM=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_MAX98090=m
@@ -5810,10 +6203,8 @@ CONFIG_SND_SOC_SOF_TOPLEVEL=y
CONFIG_SND_SOC_SOF_PCI=m
CONFIG_SND_SOC_SOF_ACPI=m
CONFIG_SND_SOC_SOF_OF=m
-# CONFIG_SND_SOC_SOF_DEBUG_PROBES is not set
-# CONFIG_SND_SOC_SOF_DEVELOPER_SUPPORT is not set
-CONFIG_SND_SOC_SOF=m
# CONFIG_SND_SOC_SOF_IMX_TOPLEVEL is not set
+# CONFIG_SND_SOC_SOF_MTK_TOPLEVEL is not set
#
# STMicroelectronics STM32 SOC audio support
@@ -5829,6 +6220,7 @@ CONFIG_SND_SUN8I_CODEC_ANALOG=m
# CONFIG_SND_SUN50I_CODEC_ANALOG is not set
CONFIG_SND_SUN4I_I2S=m
CONFIG_SND_SUN4I_SPDIF=m
+# CONFIG_SND_SUN50I_DMIC is not set
CONFIG_SND_SUN8I_ADDA_PR_REGMAP=m
# end of Allwinner SoC Audio support
@@ -5850,11 +6242,12 @@ CONFIG_SND_SOC_WM_HUBS=m
# CONFIG_SND_SOC_ADAU1701 is not set
# CONFIG_SND_SOC_ADAU1761_I2C is not set
# CONFIG_SND_SOC_ADAU1761_SPI is not set
-# CONFIG_SND_SOC_ADAU7002 is not set
+CONFIG_SND_SOC_ADAU7002=m
# CONFIG_SND_SOC_ADAU7118_HW is not set
# CONFIG_SND_SOC_ADAU7118_I2C is not set
# CONFIG_SND_SOC_AK4104 is not set
# CONFIG_SND_SOC_AK4118 is not set
+# CONFIG_SND_SOC_AK4375 is not set
# CONFIG_SND_SOC_AK4458 is not set
# CONFIG_SND_SOC_AK4554 is not set
CONFIG_SND_SOC_AK4613=m
@@ -5862,19 +6255,25 @@ CONFIG_SND_SOC_AK4613=m
# CONFIG_SND_SOC_AK5386 is not set
# CONFIG_SND_SOC_AK5558 is not set
# CONFIG_SND_SOC_ALC5623 is not set
+# CONFIG_SND_SOC_AW8738 is not set
# CONFIG_SND_SOC_BD28623 is not set
CONFIG_SND_SOC_BT_SCO=m
-CONFIG_SND_SOC_CROS_EC_CODEC=m
+# CONFIG_SND_SOC_CROS_EC_CODEC is not set
# CONFIG_SND_SOC_CS35L32 is not set
# CONFIG_SND_SOC_CS35L33 is not set
# CONFIG_SND_SOC_CS35L34 is not set
# CONFIG_SND_SOC_CS35L35 is not set
# CONFIG_SND_SOC_CS35L36 is not set
+# CONFIG_SND_SOC_CS35L41_SPI is not set
+# CONFIG_SND_SOC_CS35L41_I2C is not set
+# CONFIG_SND_SOC_CS35L45_SPI is not set
+# CONFIG_SND_SOC_CS35L45_I2C is not set
# CONFIG_SND_SOC_CS42L42 is not set
# CONFIG_SND_SOC_CS42L51_I2C is not set
# CONFIG_SND_SOC_CS42L52 is not set
# CONFIG_SND_SOC_CS42L56 is not set
# CONFIG_SND_SOC_CS42L73 is not set
+# CONFIG_SND_SOC_CS42L83 is not set
# CONFIG_SND_SOC_CS4234 is not set
# CONFIG_SND_SOC_CS4265 is not set
# CONFIG_SND_SOC_CS4270 is not set
@@ -5894,10 +6293,12 @@ CONFIG_SND_SOC_HDMI_CODEC=m
# CONFIG_SND_SOC_ES7134 is not set
# CONFIG_SND_SOC_ES7241 is not set
# CONFIG_SND_SOC_ES8316 is not set
+# CONFIG_SND_SOC_ES8326 is not set
CONFIG_SND_SOC_ES8328=m
CONFIG_SND_SOC_ES8328_I2C=m
CONFIG_SND_SOC_ES8328_SPI=m
# CONFIG_SND_SOC_GTM601 is not set
+# CONFIG_SND_SOC_HDA is not set
# CONFIG_SND_SOC_ICS43432 is not set
# CONFIG_SND_SOC_INNO_RK3036 is not set
# CONFIG_SND_SOC_MAX98088 is not set
@@ -5906,9 +6307,12 @@ CONFIG_SND_SOC_MAX98095=m
CONFIG_SND_SOC_MAX98357A=m
# CONFIG_SND_SOC_MAX98504 is not set
# CONFIG_SND_SOC_MAX9867 is not set
-# CONFIG_SND_SOC_MAX98927 is not set
+CONFIG_SND_SOC_MAX98927=m
+# CONFIG_SND_SOC_MAX98520 is not set
# CONFIG_SND_SOC_MAX98373_I2C is not set
+# CONFIG_SND_SOC_MAX98373_SDW is not set
# CONFIG_SND_SOC_MAX98390 is not set
+# CONFIG_SND_SOC_MAX98396 is not set
# CONFIG_SND_SOC_MAX9860 is not set
# CONFIG_SND_SOC_MSM8916_WCD_ANALOG is not set
# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set
@@ -5930,6 +6334,8 @@ CONFIG_SND_SOC_MAX98357A=m
CONFIG_SND_SOC_RL6231=m
CONFIG_SND_SOC_RT1015=m
CONFIG_SND_SOC_RT1015P=m
+# CONFIG_SND_SOC_RT1308_SDW is not set
+# CONFIG_SND_SOC_RT1316_SDW is not set
CONFIG_SND_SOC_RT5514=m
CONFIG_SND_SOC_RT5514_SPI=m
# CONFIG_SND_SOC_RT5616 is not set
@@ -5937,12 +6343,25 @@ CONFIG_SND_SOC_RT5631=m
# CONFIG_SND_SOC_RT5640 is not set
CONFIG_SND_SOC_RT5645=m
# CONFIG_SND_SOC_RT5659 is not set
+CONFIG_SND_SOC_RT5663=m
CONFIG_SND_SOC_RT5677=m
CONFIG_SND_SOC_RT5677_SPI=m
+CONFIG_SND_SOC_RT5682=m
+CONFIG_SND_SOC_RT5682_I2C=m
+# CONFIG_SND_SOC_RT5682_SDW is not set
+CONFIG_SND_SOC_RT5682S=m
+# CONFIG_SND_SOC_RT700_SDW is not set
+# CONFIG_SND_SOC_RT711_SDW is not set
+# CONFIG_SND_SOC_RT711_SDCA_SDW is not set
+# CONFIG_SND_SOC_RT715_SDW is not set
+# CONFIG_SND_SOC_RT715_SDCA_SDW is not set
+# CONFIG_SND_SOC_RT9120 is not set
+# CONFIG_SND_SOC_SDW_MOCKUP is not set
# CONFIG_SND_SOC_SGTL5000 is not set
# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set
# CONFIG_SND_SOC_SIMPLE_MUX is not set
CONFIG_SND_SOC_SPDIF=m
+# CONFIG_SND_SOC_SRC4XXX_I2C is not set
# CONFIG_SND_SOC_SSM2305 is not set
# CONFIG_SND_SOC_SSM2518 is not set
# CONFIG_SND_SOC_SSM2602_SPI is not set
@@ -5955,13 +6374,16 @@ CONFIG_SND_SOC_SPDIF=m
# CONFIG_SND_SOC_TAS2562 is not set
# CONFIG_SND_SOC_TAS2764 is not set
# CONFIG_SND_SOC_TAS2770 is not set
+# CONFIG_SND_SOC_TAS2780 is not set
# CONFIG_SND_SOC_TAS5086 is not set
# CONFIG_SND_SOC_TAS571X is not set
# CONFIG_SND_SOC_TAS5720 is not set
+# CONFIG_SND_SOC_TAS5805M is not set
# CONFIG_SND_SOC_TAS6424 is not set
# CONFIG_SND_SOC_TDA7419 is not set
# CONFIG_SND_SOC_TFA9879 is not set
# CONFIG_SND_SOC_TFA989X is not set
+# CONFIG_SND_SOC_TLV320ADC3XXX is not set
# CONFIG_SND_SOC_TLV320AIC23_I2C is not set
# CONFIG_SND_SOC_TLV320AIC23_SPI is not set
# CONFIG_SND_SOC_TLV320AIC31XX is not set
@@ -5974,13 +6396,17 @@ CONFIG_SND_SOC_TS3A227E=m
# CONFIG_SND_SOC_TSCS42XX is not set
# CONFIG_SND_SOC_TSCS454 is not set
# CONFIG_SND_SOC_UDA1334 is not set
+CONFIG_SND_SOC_WCD_MBHC=m
+CONFIG_SND_SOC_WCD938X=m
+CONFIG_SND_SOC_WCD938X_SDW=m
# CONFIG_SND_SOC_WM8510 is not set
# CONFIG_SND_SOC_WM8523 is not set
# CONFIG_SND_SOC_WM8524 is not set
# CONFIG_SND_SOC_WM8580 is not set
# CONFIG_SND_SOC_WM8711 is not set
# CONFIG_SND_SOC_WM8728 is not set
-# CONFIG_SND_SOC_WM8731 is not set
+# CONFIG_SND_SOC_WM8731_I2C is not set
+# CONFIG_SND_SOC_WM8731_SPI is not set
# CONFIG_SND_SOC_WM8737 is not set
# CONFIG_SND_SOC_WM8741 is not set
# CONFIG_SND_SOC_WM8750 is not set
@@ -5992,12 +6418,15 @@ CONFIG_SND_SOC_TS3A227E=m
# CONFIG_SND_SOC_WM8804_SPI is not set
# CONFIG_SND_SOC_WM8903 is not set
# CONFIG_SND_SOC_WM8904 is not set
+# CONFIG_SND_SOC_WM8940 is not set
CONFIG_SND_SOC_WM8960=m
# CONFIG_SND_SOC_WM8962 is not set
# CONFIG_SND_SOC_WM8974 is not set
# CONFIG_SND_SOC_WM8978 is not set
# CONFIG_SND_SOC_WM8985 is not set
CONFIG_SND_SOC_WM8994=m
+# CONFIG_SND_SOC_WSA881X is not set
+# CONFIG_SND_SOC_WSA883X is not set
# CONFIG_SND_SOC_ZL38060 is not set
# CONFIG_SND_SOC_MAX9759 is not set
# CONFIG_SND_SOC_MT6351 is not set
@@ -6006,9 +6435,11 @@ CONFIG_SND_SOC_MT6358=m
# CONFIG_SND_SOC_NAU8315 is not set
# CONFIG_SND_SOC_NAU8540 is not set
# CONFIG_SND_SOC_NAU8810 is not set
+# CONFIG_SND_SOC_NAU8821 is not set
# CONFIG_SND_SOC_NAU8822 is not set
# CONFIG_SND_SOC_NAU8824 is not set
# CONFIG_SND_SOC_TPA6130A2 is not set
+CONFIG_SND_SOC_LPASS_MACRO_COMMON=m
# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set
# CONFIG_SND_SOC_LPASS_VA_MACRO is not set
# CONFIG_SND_SOC_LPASS_RX_MACRO is not set
@@ -6018,6 +6449,8 @@ CONFIG_SND_SOC_MT6358=m
CONFIG_SND_SIMPLE_CARD_UTILS=y
CONFIG_SND_SIMPLE_CARD=y
# CONFIG_SND_AUDIO_GRAPH_CARD is not set
+# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set
+# CONFIG_SND_TEST_COMPONENT is not set
CONFIG_SND_XEN_FRONTEND=m
CONFIG_SND_VIRTIO=m
@@ -6062,7 +6495,6 @@ CONFIG_HID_EZKEY=y
# CONFIG_HID_GFRM is not set
# CONFIG_HID_GLORIOUS is not set
# CONFIG_HID_HOLTEK is not set
-# CONFIG_HID_GOOGLE_HAMMER is not set
# CONFIG_HID_VIVALDI is not set
# CONFIG_HID_GT683R is not set
# CONFIG_HID_KEYTOUCH is not set
@@ -6070,6 +6502,8 @@ CONFIG_HID_EZKEY=y
# CONFIG_HID_UCLOGIC is not set
# CONFIG_HID_WALTOP is not set
# CONFIG_HID_VIEWSONIC is not set
+# CONFIG_HID_VRC2 is not set
+# CONFIG_HID_XIAOMI is not set
# CONFIG_HID_GYRATION is not set
# CONFIG_HID_ICADE is not set
CONFIG_HID_ITE=y
@@ -6079,6 +6513,7 @@ CONFIG_HID_KENSINGTON=y
# CONFIG_HID_LCPOWER is not set
# CONFIG_HID_LED is not set
# CONFIG_HID_LENOVO is not set
+# CONFIG_HID_LETSKETCH is not set
CONFIG_HID_LOGITECH=y
# CONFIG_HID_LOGITECH_HIDPP is not set
# CONFIG_LOGITECH_FF is not set
@@ -6088,10 +6523,13 @@ CONFIG_HID_LOGITECH=y
# CONFIG_HID_MAGICMOUSE is not set
# CONFIG_HID_MALTRON is not set
# CONFIG_HID_MAYFLASH is not set
+# CONFIG_HID_MEGAWORLD_FF is not set
# CONFIG_HID_REDRAGON is not set
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MONTEREY=y
# CONFIG_HID_MULTITOUCH is not set
+CONFIG_HID_NINTENDO=m
+# CONFIG_NINTENDO_FF is not set
# CONFIG_HID_NTI is not set
# CONFIG_HID_NTRIG is not set
# CONFIG_HID_ORTEK is not set
@@ -6100,14 +6538,15 @@ CONFIG_HID_MONTEREY=y
# CONFIG_HID_PETALYNX is not set
# CONFIG_HID_PICOLCD is not set
# CONFIG_HID_PLANTRONICS is not set
-CONFIG_HID_PLAYSTATION=m
-# CONFIG_PLAYSTATION_FF is not set
+# CONFIG_HID_PXRC is not set
+# CONFIG_HID_RAZER is not set
# CONFIG_HID_PRIMAX is not set
# CONFIG_HID_RETRODE is not set
# CONFIG_HID_ROCCAT is not set
# CONFIG_HID_SAITEK is not set
# CONFIG_HID_SAMSUNG is not set
# CONFIG_HID_SEMITEK is not set
+# CONFIG_HID_SIGMAMICRO is not set
# CONFIG_HID_SONY is not set
# CONFIG_HID_SPEEDLINK is not set
# CONFIG_HID_STEAM is not set
@@ -6119,6 +6558,7 @@ CONFIG_HID_PLAYSTATION=m
# CONFIG_HID_SMARTJOYPLUS is not set
# CONFIG_HID_TIVO is not set
# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_TOPRE is not set
# CONFIG_HID_THINGM is not set
# CONFIG_HID_THRUSTMASTER is not set
# CONFIG_HID_UDRAW_PS3 is not set
@@ -6146,6 +6586,7 @@ CONFIG_USB_HID=y
#
# CONFIG_I2C_HID_ACPI is not set
# CONFIG_I2C_HID_OF is not set
+# CONFIG_I2C_HID_OF_ELAN is not set
# CONFIG_I2C_HID_OF_GOODIX is not set
# end of I2C HID support
# end of HID support
@@ -6211,6 +6652,7 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y
# CONFIG_USB_HCD_BCMA is not set
# CONFIG_USB_HCD_SSB is not set
# CONFIG_USB_HCD_TEST_MODE is not set
+CONFIG_USB_XEN_HCD=m
#
# USB Device Class drivers
@@ -6267,6 +6709,7 @@ CONFIG_USB_DWC3_PCI=y
CONFIG_USB_DWC3_HAPS=y
CONFIG_USB_DWC3_MESON_G12A=y
CONFIG_USB_DWC3_OF_SIMPLE=y
+CONFIG_USB_DWC3_QCOM=y
CONFIG_USB_DWC3_IMX8MP=y
CONFIG_USB_DWC2=y
# CONFIG_USB_DWC2_HOST is not set
@@ -6367,6 +6810,7 @@ CONFIG_USB_SERIAL_DEBUG=m
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_FTDI_ELAN is not set
# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_QCOM_EUD is not set
CONFIG_APPLE_MFI_FASTCHARGE=m
# CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_LD is not set
@@ -6382,6 +6826,7 @@ CONFIG_USB_HSIC_USB3503=y
# CONFIG_USB_HSIC_USB4604 is not set
# CONFIG_USB_LINK_LAYER_TEST is not set
# CONFIG_USB_CHAOSKEY is not set
+# CONFIG_USB_ONBOARD_HUB is not set
#
# USB Physical Layer drivers
@@ -6475,7 +6920,6 @@ CONFIG_MMC_RICOH_MMC=y
CONFIG_MMC_SDHCI_ACPI=m
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_ARASAN=y
-CONFIG_MMC_SDHCI_OF_ASPEED=m
CONFIG_MMC_SDHCI_OF_AT91=m
CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_MMC_SDHCI_OF_DWCMSHC=m
@@ -6490,6 +6934,7 @@ CONFIG_MMC_SDHCI_MILBEAUT=m
CONFIG_MMC_SDHCI_IPROC=y
CONFIG_MMC_MESON_GX=y
CONFIG_MMC_MESON_MX_SDIO=m
+# CONFIG_MMC_SDHCI_MSM is not set
CONFIG_MMC_MXC=m
CONFIG_MMC_TIFM_SD=m
CONFIG_MMC_SPI=y
@@ -6518,6 +6963,19 @@ CONFIG_MMC_SDHCI_XENON=y
CONFIG_MMC_SDHCI_OMAP=m
CONFIG_MMC_SDHCI_AM654=m
CONFIG_MMC_SDHCI_EXTERNAL_DMA=y
+CONFIG_SCSI_UFSHCD=m
+# CONFIG_SCSI_UFS_BSG is not set
+# CONFIG_SCSI_UFS_HPB is not set
+# CONFIG_SCSI_UFS_HWMON is not set
+CONFIG_SCSI_UFSHCD_PCI=m
+# CONFIG_SCSI_UFS_DWC_TC_PCI is not set
+CONFIG_SCSI_UFSHCD_PLATFORM=m
+# CONFIG_SCSI_UFS_CDNS_PLATFORM is not set
+# CONFIG_SCSI_UFS_DWC_TC_PLATFORM is not set
+# CONFIG_SCSI_UFS_QCOM is not set
+# CONFIG_SCSI_UFS_MEDIATEK is not set
+# CONFIG_SCSI_UFS_HISI is not set
+# CONFIG_SCSI_UFS_EXYNOS is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@@ -6573,6 +7031,10 @@ CONFIG_LEDS_SYSCON=y
#
#
+# RGB LED drivers
+#
+
+#
# LED Triggers
#
CONFIG_LEDS_TRIGGERS=y
@@ -6597,6 +7059,10 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
# CONFIG_LEDS_TRIGGER_PATTERN is not set
CONFIG_LEDS_TRIGGER_AUDIO=m
# CONFIG_LEDS_TRIGGER_TTY is not set
+
+#
+# Simple LED drivers
+#
# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
CONFIG_EDAC_SUPPORT=y
@@ -6606,7 +7072,9 @@ CONFIG_EDAC_LEGACY_SYSFS=y
# CONFIG_EDAC_AL_MC is not set
CONFIG_EDAC_LAYERSCAPE=m
CONFIG_EDAC_THUNDERX=m
+# CONFIG_EDAC_SYNOPSYS is not set
CONFIG_EDAC_XGENE=m
+# CONFIG_EDAC_QCOM is not set
# CONFIG_EDAC_DMC520 is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
@@ -6638,6 +7106,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_HYM8563 is not set
# CONFIG_RTC_DRV_MAX6900 is not set
CONFIG_RTC_DRV_MAX77686=y
+# CONFIG_RTC_DRV_NCT3018Y is not set
CONFIG_RTC_DRV_RK808=m
# CONFIG_RTC_DRV_RS5C372 is not set
# CONFIG_RTC_DRV_ISL1208 is not set
@@ -6709,8 +7178,8 @@ CONFIG_RTC_DRV_DS3232_HWMON=y
# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_RP5C01 is not set
# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_OPTEE is not set
# CONFIG_RTC_DRV_ZYNQMP is not set
-# CONFIG_RTC_DRV_CROS_EC is not set
#
# on-CPU RTC drivers
@@ -6718,7 +7187,6 @@ CONFIG_RTC_DRV_DS3232_HWMON=y
# CONFIG_RTC_DRV_IMXDI is not set
CONFIG_RTC_DRV_FSL_FTM_ALARM=y
CONFIG_RTC_DRV_MESON_VRTC=m
-CONFIG_HAVE_S3C_RTC=y
CONFIG_RTC_DRV_S3C=y
# CONFIG_RTC_DRV_PL030 is not set
CONFIG_RTC_DRV_PL031=y
@@ -6754,6 +7222,7 @@ CONFIG_DMA_ACPI=y
CONFIG_DMA_OF=y
# CONFIG_ALTERA_MSGDMA is not set
# CONFIG_AMBA_PL08X is not set
+CONFIG_APPLE_ADMAC=y
CONFIG_BCM_SBA_RAID=m
CONFIG_DMA_BCM2835=y
# CONFIG_DMA_SUN6I is not set
@@ -6772,6 +7241,7 @@ CONFIG_MX3_IPU=y
CONFIG_MX3_IPU_IRQS=4
CONFIG_PL330_DMA=y
# CONFIG_PLX_DMA is not set
+CONFIG_TEGRA186_GPC_DMA=m
CONFIG_TEGRA20_APB_DMA=y
# CONFIG_TEGRA210_ADMA is not set
# CONFIG_UNIPHIER_MDMAC is not set
@@ -6783,6 +7253,8 @@ CONFIG_TEGRA20_APB_DMA=y
# CONFIG_MTK_HSDMA is not set
# CONFIG_MTK_CQDMA is not set
# CONFIG_MTK_UART_APDMA is not set
+# CONFIG_QCOM_BAM_DMA is not set
+# CONFIG_QCOM_GPI_DMA is not set
CONFIG_QCOM_HIDMA_MGMT=y
CONFIG_QCOM_HIDMA=y
# CONFIG_DW_DMAC is not set
@@ -6831,11 +7303,14 @@ CONFIG_VFIO_PCI_CORE=y
CONFIG_VFIO_PCI_MMAP=y
CONFIG_VFIO_PCI_INTX=y
CONFIG_VFIO_PCI=y
+# CONFIG_MLX5_VFIO_PCI is not set
# CONFIG_VFIO_PLATFORM is not set
# CONFIG_VFIO_MDEV is not set
# CONFIG_VIRT_DRIVERS is not set
+CONFIG_VIRTIO_ANCHOR=y
CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI_LIB=y
+CONFIG_VIRTIO_PCI_LIB_LEGACY=y
CONFIG_VIRTIO_MENU=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_PCI_LEGACY=y
@@ -6875,12 +7350,15 @@ CONFIG_XEN_GNTDEV=y
CONFIG_XEN_GRANT_DEV_ALLOC=y
# CONFIG_XEN_GRANT_DMA_ALLOC is not set
CONFIG_SWIOTLB_XEN=y
+CONFIG_XEN_PCI_STUB=y
+CONFIG_XEN_PCIDEV_STUB=m
# CONFIG_XEN_PVCALLS_FRONTEND is not set
# CONFIG_XEN_PVCALLS_BACKEND is not set
CONFIG_XEN_PRIVCMD=y
CONFIG_XEN_EFI=y
CONFIG_XEN_AUTO_XLATE=y
CONFIG_XEN_FRONT_PGDIR_SHBUF=m
+# CONFIG_XEN_VIRTIO is not set
# end of Xen driver support
# CONFIG_GREYBUS is not set
@@ -6896,7 +7374,6 @@ CONFIG_RTLLIB_CRYPTO_WEP=m
CONFIG_RTL8723BS=m
CONFIG_R8712U=m
CONFIG_R8188EU=m
-CONFIG_88EU_AP_MODE=y
CONFIG_RTS5208=m
# CONFIG_VT6655 is not set
# CONFIG_VT6656 is not set
@@ -6916,7 +7393,6 @@ CONFIG_RTS5208=m
# Analog to digital converters
#
# CONFIG_AD7816 is not set
-# CONFIG_AD7280 is not set
# end of Analog to digital converters
#
@@ -6926,12 +7402,6 @@ CONFIG_RTS5208=m
# end of Analog digital bi-direction converters
#
-# Capacitance to digital converters
-#
-# CONFIG_AD7746 is not set
-# end of Capacitance to digital converters
-
-#
# Direct Digital Synthesis
#
# CONFIG_AD9832 is not set
@@ -6960,18 +7430,8 @@ CONFIG_RTS5208=m
# CONFIG_FB_SM750 is not set
# CONFIG_MFD_NVEC is not set
# CONFIG_STAGING_MEDIA is not set
-
-#
-# Android
-#
-# end of Android
-
# CONFIG_STAGING_BOARD is not set
# CONFIG_LTE_GDM724X is not set
-# CONFIG_FIREWIRE_SERIAL is not set
-# CONFIG_GS_FPGABOOT is not set
-# CONFIG_UNISYSSPAR is not set
-# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set
# CONFIG_FB_TFT is not set
# CONFIG_KS7010 is not set
CONFIG_BCM_VIDEOCORE=m
@@ -6982,25 +7442,11 @@ CONFIG_PI433=m
# CONFIG_XIL_AXIS_FIFO is not set
# CONFIG_FIELDBUS_DEV is not set
# CONFIG_QLGE is not set
-# CONFIG_WFX is not set
+# CONFIG_VME_BUS is not set
# CONFIG_GOLDFISH is not set
-CONFIG_CHROME_PLATFORMS=y
-# CONFIG_CHROMEOS_TBMC is not set
-CONFIG_CROS_EC=y
-# CONFIG_CROS_EC_I2C is not set
-# CONFIG_CROS_EC_SPI is not set
-CONFIG_CROS_EC_PROTO=y
-# CONFIG_CROS_KBD_LED_BACKLIGHT is not set
-# CONFIG_CROS_EC_CHARDEV is not set
-CONFIG_CROS_EC_LIGHTBAR=y
-CONFIG_CROS_EC_VBC=y
-CONFIG_CROS_EC_DEBUGFS=y
-CONFIG_CROS_EC_SENSORHUB=y
-CONFIG_CROS_EC_SYSFS=y
-CONFIG_CROS_USBPD_NOTIFY=y
+# CONFIG_CHROME_PLATFORMS is not set
# CONFIG_MELLANOX_PLATFORM is not set
CONFIG_SURFACE_PLATFORMS=y
-# CONFIG_SURFACE_3_BUTTON is not set
# CONFIG_SURFACE_3_POWER_OPREGION is not set
# CONFIG_SURFACE_GPE is not set
# CONFIG_SURFACE_HOTPLUG is not set
@@ -7013,12 +7459,13 @@ CONFIG_COMMON_CLK=y
#
# Clock driver for ARM Reference designs
#
-# CONFIG_ICST is not set
+# CONFIG_CLK_ICST is not set
CONFIG_CLK_SP810=y
CONFIG_CLK_VEXPRESS_OSC=y
# end of Clock driver for ARM Reference designs
# CONFIG_LMK04832 is not set
+CONFIG_COMMON_CLK_APPLE_NCO=y
# CONFIG_COMMON_CLK_MAX77686 is not set
# CONFIG_COMMON_CLK_MAX9485 is not set
CONFIG_COMMON_CLK_RK808=y
@@ -7041,7 +7488,9 @@ CONFIG_CLK_QORIQ=y
CONFIG_CLK_LS1028A_PLLDIG=y
CONFIG_COMMON_CLK_XGENE=y
CONFIG_COMMON_CLK_PWM=y
+# CONFIG_COMMON_CLK_RS9_PCIE is not set
# CONFIG_COMMON_CLK_VC5 is not set
+# CONFIG_COMMON_CLK_VC7 is not set
# CONFIG_COMMON_CLK_FIXED_MMIO is not set
CONFIG_CLK_BCM2711_DVP=y
CONFIG_CLK_BCM2835=y
@@ -7061,6 +7510,8 @@ CONFIG_MXC_CLK=y
# CONFIG_CLK_IMX8MN is not set
# CONFIG_CLK_IMX8MP is not set
# CONFIG_CLK_IMX8MQ is not set
+# CONFIG_CLK_IMX8ULP is not set
+# CONFIG_CLK_IMX93 is not set
#
# Clock driver for MediaTek SoC
@@ -7097,6 +7548,11 @@ CONFIG_COMMON_CLK_MT6779=y
# CONFIG_COMMON_CLK_MT6779_VENCSYS is not set
# CONFIG_COMMON_CLK_MT6779_MFGCFG is not set
# CONFIG_COMMON_CLK_MT6779_AUDSYS is not set
+CONFIG_COMMON_CLK_MT6795=y
+CONFIG_COMMON_CLK_MT6795_MFGCFG=y
+CONFIG_COMMON_CLK_MT6795_MMSYS=y
+CONFIG_COMMON_CLK_MT6795_VDECSYS=y
+CONFIG_COMMON_CLK_MT6795_VENCSYS=y
CONFIG_COMMON_CLK_MT6797=y
# CONFIG_COMMON_CLK_MT6797_MMSYS is not set
# CONFIG_COMMON_CLK_MT6797_IMGSYS is not set
@@ -7106,6 +7562,8 @@ CONFIG_COMMON_CLK_MT7622=y
# CONFIG_COMMON_CLK_MT7622_ETHSYS is not set
# CONFIG_COMMON_CLK_MT7622_HIFSYS is not set
# CONFIG_COMMON_CLK_MT7622_AUDSYS is not set
+CONFIG_COMMON_CLK_MT7986=y
+CONFIG_COMMON_CLK_MT7986_ETHSYS=y
CONFIG_COMMON_CLK_MT8167=y
CONFIG_COMMON_CLK_MT8167_AUDSYS=y
CONFIG_COMMON_CLK_MT8167_IMGSYS=y
@@ -7126,6 +7584,7 @@ CONFIG_COMMON_CLK_MT8183=y
# CONFIG_COMMON_CLK_MT8183_MMSYS is not set
# CONFIG_COMMON_CLK_MT8183_VDECSYS is not set
# CONFIG_COMMON_CLK_MT8183_VENCSYS is not set
+CONFIG_COMMON_CLK_MT8186=y
CONFIG_COMMON_CLK_MT8192=y
# CONFIG_COMMON_CLK_MT8192_AUDSYS is not set
# CONFIG_COMMON_CLK_MT8192_CAMSYS is not set
@@ -7139,6 +7598,14 @@ CONFIG_COMMON_CLK_MT8192=y
# CONFIG_COMMON_CLK_MT8192_SCP_ADSP is not set
# CONFIG_COMMON_CLK_MT8192_VDECSYS is not set
# CONFIG_COMMON_CLK_MT8192_VENCSYS is not set
+CONFIG_COMMON_CLK_MT8195=y
+CONFIG_COMMON_CLK_MT8365=y
+CONFIG_COMMON_CLK_MT8365_APU=y
+CONFIG_COMMON_CLK_MT8365_CAM=y
+CONFIG_COMMON_CLK_MT8365_MFG=y
+CONFIG_COMMON_CLK_MT8365_MMSYS=y
+CONFIG_COMMON_CLK_MT8365_VDEC=y
+CONFIG_COMMON_CLK_MT8365_VENC=y
CONFIG_COMMON_CLK_MT8516=y
# CONFIG_COMMON_CLK_MT8516_AUDSYS is not set
# end of Clock driver for MediaTek SoC
@@ -7165,6 +7632,7 @@ CONFIG_ARMADA_37XX_CLK=y
CONFIG_ARMADA_AP806_SYSCON=y
CONFIG_ARMADA_AP_CPU_CLK=y
CONFIG_ARMADA_CP110_SYSCON=y
+# CONFIG_COMMON_CLK_QCOM is not set
CONFIG_COMMON_CLK_ROCKCHIP=y
CONFIG_CLK_PX30=y
CONFIG_CLK_RK3308=y
@@ -7176,11 +7644,7 @@ CONFIG_COMMON_CLK_SAMSUNG=y
CONFIG_EXYNOS_ARM64_COMMON_CLK=y
CONFIG_EXYNOS_AUDSS_CLK_CON=y
CONFIG_EXYNOS_CLKOUT=y
-CONFIG_CLK_SUNXI=y
-CONFIG_CLK_SUNXI_CLOCKS=y
-CONFIG_CLK_SUNXI_PRCM_SUN6I=y
-CONFIG_CLK_SUNXI_PRCM_SUN8I=y
-CONFIG_CLK_SUNXI_PRCM_SUN9I=y
+CONFIG_TESLA_FSD_COMMON_CLK=y
CONFIG_SUNXI_CCU=y
CONFIG_SUN50I_A64_CCU=y
CONFIG_SUN50I_A100_CCU=y
@@ -7188,7 +7652,7 @@ CONFIG_SUN50I_A100_R_CCU=y
CONFIG_SUN50I_H6_CCU=y
CONFIG_SUN50I_H616_CCU=y
CONFIG_SUN50I_H6_R_CCU=y
-# CONFIG_SUN8I_A83T_CCU is not set
+CONFIG_SUN6I_RTC_CCU=y
CONFIG_SUN8I_H3_CCU=y
# CONFIG_SUN8I_DE2_CCU is not set
CONFIG_SUN8I_R_CCU=y
@@ -7196,7 +7660,9 @@ CONFIG_CLK_TEGRA_BPMP=y
CONFIG_TEGRA_CLK_DFLL=y
CONFIG_CLK_UNIPHIER=y
# CONFIG_XILINX_VCU is not set
+# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set
CONFIG_HWSPINLOCK=y
+# CONFIG_HWSPINLOCK_QCOM is not set
# CONFIG_HWSPINLOCK_SUN6I is not set
#
@@ -7209,6 +7675,7 @@ CONFIG_CLKSRC_MMIO=y
CONFIG_ROCKCHIP_TIMER=y
CONFIG_SUN4I_TIMER=y
CONFIG_TEGRA_TIMER=y
+# CONFIG_TEGRA186_TIMER is not set
CONFIG_ARM_ARCH_TIMER=y
CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y
@@ -7217,12 +7684,14 @@ CONFIG_HISILICON_ERRATUM_161010101=y
CONFIG_ARM64_ERRATUM_858921=y
CONFIG_SUN50I_ERRATUM_UNKNOWN1=y
CONFIG_ARM_TIMER_SP804=y
+CONFIG_CLKSRC_EXYNOS_MCT=y
CONFIG_MTK_TIMER=y
CONFIG_TIMER_IMX_SYS_CTR=y
# CONFIG_MICROCHIP_PIT64B is not set
# end of Clock Source drivers
CONFIG_MAILBOX=y
+CONFIG_APPLE_MAILBOX=y
CONFIG_ARM_MHU=y
# CONFIG_ARM_MHU_V2 is not set
# CONFIG_IMX_MBOX is not set
@@ -7236,10 +7705,13 @@ CONFIG_BCM2835_MBOX=y
CONFIG_HI3660_MBOX=y
CONFIG_HI6220_MBOX=y
# CONFIG_MAILBOX_TEST is not set
+# CONFIG_QCOM_APCS_IPC is not set
CONFIG_TEGRA_HSP_MBOX=y
# CONFIG_XGENE_SLIMPRO_MBOX is not set
+# CONFIG_MTK_ADSP_MBOX is not set
# CONFIG_MTK_CMDQ_MBOX is not set
CONFIG_SUN6I_MSGBOX=y
+# CONFIG_QCOM_IPCC is not set
CONFIG_IOMMU_IOVA=y
CONFIG_IOMMU_API=y
CONFIG_IOMMU_SUPPORT=y
@@ -7251,6 +7723,7 @@ CONFIG_IOMMU_IO_PGTABLE=y
CONFIG_IOMMU_IO_PGTABLE_LPAE=y
# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set
# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set
+CONFIG_IOMMU_IO_PGTABLE_DART=y
# end of Generic IOMMU Pagetable Support
# CONFIG_IOMMU_DEBUGFS is not set
@@ -7267,9 +7740,12 @@ CONFIG_APPLE_DART=y
CONFIG_ARM_SMMU=y
# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set
CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y
+CONFIG_ARM_SMMU_QCOM=y
+# CONFIG_ARM_SMMU_QCOM_DEBUG is not set
CONFIG_ARM_SMMU_V3=y
# CONFIG_ARM_SMMU_V3_SVA is not set
# CONFIG_MTK_IOMMU is not set
+# CONFIG_QCOM_IOMMU is not set
# CONFIG_VIRTIO_IOMMU is not set
#
@@ -7281,11 +7757,24 @@ CONFIG_ARM_SMMU_V3=y
#
# Rpmsg drivers
#
-# CONFIG_RPMSG_QCOM_GLINK_RPM is not set
+CONFIG_RPMSG=m
+# CONFIG_RPMSG_CHAR is not set
+# CONFIG_RPMSG_CTRL is not set
+# CONFIG_RPMSG_NS is not set
+CONFIG_RPMSG_QCOM_GLINK=m
+CONFIG_RPMSG_QCOM_GLINK_RPM=m
+CONFIG_RPMSG_QCOM_GLINK_SMEM=m
+CONFIG_RPMSG_QCOM_SMD=m
# CONFIG_RPMSG_VIRTIO is not set
# end of Rpmsg drivers
-# CONFIG_SOUNDWIRE is not set
+CONFIG_SOUNDWIRE=m
+
+#
+# SoundWire Devices
+#
+# CONFIG_SOUNDWIRE_INTEL is not set
+CONFIG_SOUNDWIRE_QCOM=m
#
# SOC (System On Chip) specific Drivers
@@ -7303,6 +7792,14 @@ CONFIG_MESON_SECURE_PM_DOMAINS=y
# end of Amlogic SoC drivers
#
+# Apple SoC drivers
+#
+CONFIG_APPLE_PMGR_PWRSTATE=y
+CONFIG_APPLE_RTKIT=m
+CONFIG_APPLE_SART=m
+# end of Apple SoC drivers
+
+#
# Broadcom SoC drivers
#
CONFIG_BCM2835_POWER=y
@@ -7316,14 +7813,21 @@ CONFIG_SOC_BRCMSTB=y
# CONFIG_QUICC_ENGINE is not set
CONFIG_FSL_GUTS=y
CONFIG_DPAA2_CONSOLE=y
-# CONFIG_FSL_RCPM is not set
+CONFIG_FSL_RCPM=y
# end of NXP/Freescale QorIQ SoC drivers
#
+# fujitsu SoC drivers
+#
+# CONFIG_A64FX_DIAG is not set
+# end of fujitsu SoC drivers
+
+#
# i.MX SoC drivers
#
CONFIG_IMX_GPCV2_PM_DOMAINS=y
CONFIG_SOC_IMX8M=y
+CONFIG_SOC_IMX9=y
# end of i.MX SoC drivers
#
@@ -7347,7 +7851,30 @@ CONFIG_MTK_MMSYS=y
#
# Qualcomm SoC drivers
#
+CONFIG_QCOM_AOSS_QMP=m
+CONFIG_QCOM_COMMAND_DB=m
+CONFIG_QCOM_CPR=m
+CONFIG_QCOM_GENI_SE=m
+CONFIG_QCOM_GSBI=m
+CONFIG_QCOM_LLCC=m
+CONFIG_QCOM_OCMEM=m
+CONFIG_QCOM_PDR_HELPERS=m
CONFIG_QCOM_QMI_HELPERS=m
+CONFIG_QCOM_RMTFS_MEM=m
+CONFIG_QCOM_RPMH=m
+# CONFIG_QCOM_RPMHPD is not set
+CONFIG_QCOM_RPMPD=m
+CONFIG_QCOM_SMEM=m
+CONFIG_QCOM_SMD_RPM=m
+CONFIG_QCOM_SMEM_STATE=y
+CONFIG_QCOM_SMP2P=m
+CONFIG_QCOM_SMSM=m
+CONFIG_QCOM_SOCINFO=m
+CONFIG_QCOM_SPM=m
+# CONFIG_QCOM_STATS is not set
+CONFIG_QCOM_WCNSS_CTRL=m
+CONFIG_QCOM_APR=m
+CONFIG_QCOM_ICC_BWMON=m
# end of Qualcomm SoC drivers
CONFIG_ROCKCHIP_GRF=y
@@ -7355,6 +7882,7 @@ CONFIG_ROCKCHIP_GRF=y
CONFIG_ROCKCHIP_PM_DOMAINS=y
CONFIG_SOC_SAMSUNG=y
CONFIG_EXYNOS_CHIPID=y
+CONFIG_EXYNOS_USI=y
CONFIG_EXYNOS_PMU=y
CONFIG_EXYNOS_PM_DOMAINS=y
CONFIG_SUNXI_MBUS=y
@@ -7394,7 +7922,9 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m
# CONFIG_ARM_IMX_BUS_DEVFREQ is not set
# CONFIG_ARM_IMX8M_DDRC_DEVFREQ is not set
# CONFIG_ARM_TEGRA_DEVFREQ is not set
+# CONFIG_ARM_MEDIATEK_CCI_DEVFREQ is not set
# CONFIG_ARM_RK3399_DMC_DEVFREQ is not set
+# CONFIG_ARM_SUN8I_A33_MBUS_DEVFREQ is not set
# CONFIG_PM_DEVFREQ_EVENT is not set
CONFIG_EXTCON=y
@@ -7406,11 +7936,10 @@ CONFIG_EXTCON=y
# CONFIG_EXTCON_GPIO is not set
# CONFIG_EXTCON_MAX3355 is not set
# CONFIG_EXTCON_PTN5150 is not set
+# CONFIG_EXTCON_QCOM_SPMI_MISC is not set
# CONFIG_EXTCON_RT8973A is not set
# CONFIG_EXTCON_SM5502 is not set
CONFIG_EXTCON_USB_GPIO=y
-# CONFIG_EXTCON_USBC_CROS_EC is not set
-# CONFIG_EXTCON_USBC_TUSB320 is not set
CONFIG_MEMORY=y
# CONFIG_ARM_PL172_MPMC is not set
CONFIG_MTK_SMI=m
@@ -7436,8 +7965,14 @@ CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
#
# CONFIG_ADIS16201 is not set
# CONFIG_ADIS16209 is not set
+# CONFIG_ADXL313_I2C is not set
+# CONFIG_ADXL313_SPI is not set
# CONFIG_ADXL345_I2C is not set
# CONFIG_ADXL345_SPI is not set
+# CONFIG_ADXL355_I2C is not set
+# CONFIG_ADXL355_SPI is not set
+# CONFIG_ADXL367_SPI is not set
+# CONFIG_ADXL367_I2C is not set
# CONFIG_ADXL372_SPI is not set
# CONFIG_ADXL372_I2C is not set
# CONFIG_BMA180 is not set
@@ -7462,6 +7997,7 @@ CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
# CONFIG_MMA8452 is not set
# CONFIG_MMA9551 is not set
# CONFIG_MMA9553 is not set
+# CONFIG_MSA311 is not set
# CONFIG_MXC4005 is not set
# CONFIG_MXC6255 is not set
# CONFIG_SCA3000 is not set
@@ -7477,6 +8013,7 @@ CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
# CONFIG_AD7124 is not set
# CONFIG_AD7192 is not set
# CONFIG_AD7266 is not set
+# CONFIG_AD7280 is not set
# CONFIG_AD7291 is not set
# CONFIG_AD7292 is not set
# CONFIG_AD7298 is not set
@@ -7502,6 +8039,7 @@ CONFIG_EXYNOS_ADC=y
# CONFIG_HX711 is not set
# CONFIG_INA2XX_ADC is not set
# CONFIG_IMX7D_ADC is not set
+# CONFIG_IMX8QXP_ADC is not set
# CONFIG_LTC2471 is not set
# CONFIG_LTC2485 is not set
# CONFIG_LTC2496 is not set
@@ -7509,6 +8047,7 @@ CONFIG_EXYNOS_ADC=y
# CONFIG_MAX1027 is not set
# CONFIG_MAX11100 is not set
# CONFIG_MAX1118 is not set
+# CONFIG_MAX11205 is not set
# CONFIG_MAX1241 is not set
# CONFIG_MAX1363 is not set
# CONFIG_MAX9611 is not set
@@ -7522,6 +8061,7 @@ CONFIG_MESON_SARADC=y
# CONFIG_QCOM_SPMI_VADC is not set
# CONFIG_QCOM_SPMI_ADC5 is not set
CONFIG_ROCKCHIP_SARADC=m
+# CONFIG_RICHTEK_RTQ6056 is not set
# CONFIG_SD_ADC_MODULATOR is not set
# CONFIG_TI_ADC081C is not set
# CONFIG_TI_ADC0832 is not set
@@ -7543,6 +8083,12 @@ CONFIG_ROCKCHIP_SARADC=m
# end of Analog to digital converters
#
+# Analog to digital and digital to analog converters
+#
+# CONFIG_AD74413R is not set
+# end of Analog to digital and digital to analog converters
+
+#
# Analog Front Ends
#
# CONFIG_IIO_RESCALE is not set
@@ -7552,6 +8098,7 @@ CONFIG_ROCKCHIP_SARADC=m
# Amplifiers
#
# CONFIG_AD8366 is not set
+# CONFIG_ADA4250 is not set
# CONFIG_HMC425 is not set
# end of Amplifiers
@@ -7559,6 +8106,7 @@ CONFIG_ROCKCHIP_SARADC=m
# Capacitance to digital converters
#
# CONFIG_AD7150 is not set
+# CONFIG_AD7746 is not set
# end of Capacitance to digital converters
#
@@ -7571,15 +8119,15 @@ CONFIG_ROCKCHIP_SARADC=m
# CONFIG_IAQCORE is not set
# CONFIG_PMS7003 is not set
# CONFIG_SCD30_CORE is not set
+# CONFIG_SCD4X is not set
# CONFIG_SENSIRION_SGP30 is not set
# CONFIG_SENSIRION_SGP40 is not set
# CONFIG_SPS30_I2C is not set
# CONFIG_SPS30_SERIAL is not set
+# CONFIG_SENSEAIR_SUNRISE_CO2 is not set
# CONFIG_VZ89X is not set
# end of Chemical Sensors
-# CONFIG_IIO_CROS_EC_SENSORS_CORE is not set
-
#
# Hid Sensor IIO Common
#
@@ -7600,6 +8148,7 @@ CONFIG_ROCKCHIP_SARADC=m
#
# Digital to analog converters
#
+# CONFIG_AD3552R is not set
# CONFIG_AD5064 is not set
# CONFIG_AD5360 is not set
# CONFIG_AD5380 is not set
@@ -7610,6 +8159,7 @@ CONFIG_ROCKCHIP_SARADC=m
# CONFIG_AD5593R is not set
# CONFIG_AD5504 is not set
# CONFIG_AD5624R_SPI is not set
+# CONFIG_LTC2688 is not set
# CONFIG_AD5686_SPI is not set
# CONFIG_AD5696_I2C is not set
# CONFIG_AD5755 is not set
@@ -7619,6 +8169,7 @@ CONFIG_ROCKCHIP_SARADC=m
# CONFIG_AD5766 is not set
# CONFIG_AD5770R is not set
# CONFIG_AD5791 is not set
+# CONFIG_AD7293 is not set
# CONFIG_AD7303 is not set
# CONFIG_AD8801 is not set
# CONFIG_DPOT_DAC is not set
@@ -7643,6 +8194,12 @@ CONFIG_ROCKCHIP_SARADC=m
# end of IIO dummy driver
#
+# Filters
+#
+# CONFIG_ADMV8818 is not set
+# end of Filters
+
+#
# Frequency Synthesizers DDS/PLL
#
@@ -7657,6 +8214,10 @@ CONFIG_ROCKCHIP_SARADC=m
#
# CONFIG_ADF4350 is not set
# CONFIG_ADF4371 is not set
+# CONFIG_ADMV1013 is not set
+# CONFIG_ADMV1014 is not set
+# CONFIG_ADMV4420 is not set
+# CONFIG_ADRF6780 is not set
# end of Phase-Locked Loop (PLL) frequency synthesizers
# end of Frequency Synthesizers DDS/PLL
@@ -7712,6 +8273,8 @@ CONFIG_ROCKCHIP_SARADC=m
# CONFIG_ADIS16480 is not set
# CONFIG_BMI160_I2C is not set
# CONFIG_BMI160_SPI is not set
+# CONFIG_BOSCH_BNO055_SERIAL is not set
+# CONFIG_BOSCH_BNO055_I2C is not set
# CONFIG_FXOS8700_I2C is not set
# CONFIG_FXOS8700_SPI is not set
# CONFIG_KMX61 is not set
@@ -7749,6 +8312,7 @@ CONFIG_ROCKCHIP_SARADC=m
# CONFIG_JSA1212 is not set
# CONFIG_RPR0521 is not set
# CONFIG_LTR501 is not set
+# CONFIG_LTRF216A is not set
# CONFIG_LV0104CS is not set
# CONFIG_MAX44000 is not set
# CONFIG_MAX44009 is not set
@@ -7867,7 +8431,6 @@ CONFIG_ROCKCHIP_SARADC=m
#
# Proximity and distance sensors
#
-# CONFIG_CROS_EC_MKBP_PROXIMITY is not set
# CONFIG_ISL29501 is not set
# CONFIG_LIDAR_LITE_V2 is not set
# CONFIG_MB1232 is not set
@@ -7875,6 +8438,8 @@ CONFIG_ROCKCHIP_SARADC=m
# CONFIG_RFD77402 is not set
# CONFIG_SRF04 is not set
# CONFIG_SX9310 is not set
+# CONFIG_SX9324 is not set
+# CONFIG_SX9360 is not set
# CONFIG_SX9500 is not set
# CONFIG_SRF08 is not set
# CONFIG_VCNL3020 is not set
@@ -7901,16 +8466,16 @@ CONFIG_ROCKCHIP_SARADC=m
# CONFIG_TSYS01 is not set
# CONFIG_TSYS02D is not set
# CONFIG_MAX31856 is not set
+# CONFIG_MAX31865 is not set
# end of Temperature sensors
# CONFIG_NTB is not set
-# CONFIG_VME_BUS is not set
CONFIG_PWM=y
CONFIG_PWM_SYSFS=y
# CONFIG_PWM_DEBUG is not set
# CONFIG_PWM_ATMEL_TCB is not set
CONFIG_PWM_BCM2835=m
-CONFIG_PWM_CROS_EC=m
+# CONFIG_PWM_CLK is not set
# CONFIG_PWM_DWC is not set
# CONFIG_PWM_FSL_FTM is not set
# CONFIG_PWM_HIBVT is not set
@@ -7926,6 +8491,7 @@ CONFIG_PWM_ROCKCHIP=y
CONFIG_PWM_SAMSUNG=y
# CONFIG_PWM_SUN4I is not set
CONFIG_PWM_TEGRA=m
+# CONFIG_PWM_XILINX is not set
#
# IRQ chip support
@@ -7942,6 +8508,9 @@ CONFIG_ALPINE_MSI=y
# CONFIG_AL_FIC is not set
CONFIG_BRCMSTB_L2_IRQ=y
CONFIG_HISILICON_IRQ_MBIGEN=y
+CONFIG_SUN6I_R_INTC=y
+CONFIG_SUNXI_NMI_INTC=y
+# CONFIG_XILINX_INTC is not set
CONFIG_IMX_GPCV2=y
CONFIG_MVEBU_GICP=y
CONFIG_MVEBU_ICU=y
@@ -7951,10 +8520,14 @@ CONFIG_MVEBU_SEI=y
CONFIG_LS_EXTIRQ=y
CONFIG_LS_SCFG_MSI=y
CONFIG_PARTITION_PERCPU=y
+# CONFIG_QCOM_IRQ_COMBINER is not set
CONFIG_IRQ_UNIPHIER_AIDET=y
CONFIG_MESON_IRQ_GPIO=y
+# CONFIG_QCOM_PDC is not set
+# CONFIG_QCOM_MPM is not set
CONFIG_IMX_IRQSTEER=y
CONFIG_IMX_INTMUX=y
+CONFIG_IMX_MU_MSI=m
CONFIG_MST_IRQ=y
CONFIG_APPLE_AIC=y
# end of IRQ chip support
@@ -7965,11 +8538,14 @@ CONFIG_RESET_CONTROLLER=y
CONFIG_RESET_IMX7=y
CONFIG_RESET_MESON=y
# CONFIG_RESET_MESON_AUDIO_ARB is not set
+# CONFIG_RESET_QCOM_AOSS is not set
+# CONFIG_RESET_QCOM_PDC is not set
CONFIG_RESET_RASPBERRYPI=m
CONFIG_RESET_SCMI=y
CONFIG_RESET_SIMPLE=y
CONFIG_RESET_SUNXI=y
# CONFIG_RESET_TI_SYSCON is not set
+# CONFIG_RESET_TI_TPS380X is not set
CONFIG_RESET_UNIPHIER=y
CONFIG_RESET_UNIPHIER_GLUE=y
CONFIG_COMMON_RESET_HI3660=y
@@ -7989,21 +8565,33 @@ CONFIG_PHY_SUN6I_MIPI_DPHY=m
# CONFIG_PHY_SUN50I_USB3 is not set
CONFIG_PHY_MESON8B_USB2=y
CONFIG_PHY_MESON_GXL_USB2=y
+CONFIG_PHY_MESON_G12A_MIPI_DPHY_ANALOG=y
CONFIG_PHY_MESON_G12A_USB2=y
CONFIG_PHY_MESON_G12A_USB3_PCIE=y
CONFIG_PHY_MESON_AXG_PCIE=y
CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG=y
CONFIG_PHY_MESON_AXG_MIPI_DPHY=y
+
+#
+# PHY drivers for Broadcom platforms
+#
# CONFIG_BCM_KONA_USB2_PHY is not set
+# end of PHY drivers for Broadcom platforms
+
# CONFIG_PHY_CADENCE_TORRENT is not set
# CONFIG_PHY_CADENCE_DPHY is not set
+# CONFIG_PHY_CADENCE_DPHY_RX is not set
# CONFIG_PHY_CADENCE_SIERRA is not set
# CONFIG_PHY_CADENCE_SALVO is not set
# CONFIG_PHY_FSL_IMX8MQ_USB is not set
+# CONFIG_PHY_MIXEL_LVDS_PHY is not set
# CONFIG_PHY_MIXEL_MIPI_DPHY is not set
+# CONFIG_PHY_FSL_IMX8M_PCIE is not set
+# CONFIG_PHY_FSL_LYNX_28G is not set
CONFIG_PHY_HI6220_USB=y
# CONFIG_PHY_HI3660_USB is not set
# CONFIG_PHY_HI3670_USB is not set
+# CONFIG_PHY_HI3670_PCIE is not set
# CONFIG_PHY_HISTB_COMBPHY is not set
# CONFIG_PHY_HISI_INNO_USB2 is not set
CONFIG_PHY_MVEBU_A3700_COMPHY=y
@@ -8013,16 +8601,30 @@ CONFIG_PHY_MVEBU_A3700_UTMI=y
# CONFIG_PHY_MVEBU_CP110_UTMI is not set
# CONFIG_PHY_PXA_28NM_HSIC is not set
# CONFIG_PHY_PXA_28NM_USB2 is not set
+# CONFIG_PHY_MTK_PCIE is not set
# CONFIG_PHY_MTK_TPHY is not set
# CONFIG_PHY_MTK_UFS is not set
# CONFIG_PHY_MTK_XSPHY is not set
CONFIG_PHY_MTK_HDMI=m
CONFIG_PHY_MTK_MIPI_DSI=m
+# CONFIG_PHY_MTK_DP is not set
+# CONFIG_PHY_LAN966X_SERDES is not set
# CONFIG_PHY_CPCAP_USB is not set
# CONFIG_PHY_MAPPHONE_MDM6600 is not set
# CONFIG_PHY_OCELOT_SERDES is not set
+# CONFIG_PHY_QCOM_APQ8064_SATA is not set
+# CONFIG_PHY_QCOM_EDP is not set
+# CONFIG_PHY_QCOM_IPQ4019_USB is not set
+# CONFIG_PHY_QCOM_IPQ806X_SATA is not set
+# CONFIG_PHY_QCOM_PCIE2 is not set
+# CONFIG_PHY_QCOM_QMP is not set
+# CONFIG_PHY_QCOM_QUSB2 is not set
# CONFIG_PHY_QCOM_USB_HS is not set
+# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set
# CONFIG_PHY_QCOM_USB_HSIC is not set
+# CONFIG_PHY_QCOM_USB_HS_28NM is not set
+# CONFIG_PHY_QCOM_USB_SS is not set
+# CONFIG_PHY_QCOM_IPQ806X_USB is not set
# CONFIG_PHY_ROCKCHIP_DP is not set
# CONFIG_PHY_ROCKCHIP_DPHY_RX0 is not set
CONFIG_PHY_ROCKCHIP_EMMC=y
@@ -8030,7 +8632,9 @@ CONFIG_PHY_ROCKCHIP_EMMC=y
CONFIG_PHY_ROCKCHIP_INNO_USB2=y
# CONFIG_PHY_ROCKCHIP_INNO_CSIDPHY is not set
# CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY is not set
+# CONFIG_PHY_ROCKCHIP_NANENG_COMBO_PHY is not set
CONFIG_PHY_ROCKCHIP_PCIE=m
+# CONFIG_PHY_ROCKCHIP_SNPS_PCIE3 is not set
# CONFIG_PHY_ROCKCHIP_TYPEC is not set
# CONFIG_PHY_ROCKCHIP_USB is not set
CONFIG_PHY_EXYNOS_DP_VIDEO=y
@@ -8061,11 +8665,19 @@ CONFIG_ARM_PMU_ACPI=y
# CONFIG_ARM_SMMU_V3_PMU is not set
# CONFIG_ARM_DSU_PMU is not set
# CONFIG_FSL_IMX8_DDR_PMU is not set
+# CONFIG_QCOM_L2_PMU is not set
+# CONFIG_QCOM_L3_PMU is not set
CONFIG_THUNDERX2_PMU=m
# CONFIG_XGENE_PMU is not set
# CONFIG_ARM_SPE_PMU is not set
# CONFIG_ARM_DMC620_PMU is not set
+# CONFIG_MARVELL_CN10K_TAD_PMU is not set
+CONFIG_APPLE_M1_CPU_PMU=y
+# CONFIG_ALIBABA_UNCORE_DRW_PMU is not set
# CONFIG_HISI_PMU is not set
+# CONFIG_HISI_PCIE_PMU is not set
+# CONFIG_HNS3_PMU is not set
+# CONFIG_MARVELL_CN10K_DDR_PMU is not set
# end of Performance monitor support
CONFIG_RAS=y
@@ -8074,7 +8686,7 @@ CONFIG_RAS=y
#
# Android
#
-# CONFIG_ANDROID is not set
+# CONFIG_ANDROID_BINDER_IPC is not set
# end of Android
# CONFIG_LIBNVDIMM is not set
@@ -8082,45 +8694,46 @@ CONFIG_DAX=y
# CONFIG_DEV_DAX is not set
CONFIG_NVMEM=y
CONFIG_NVMEM_SYSFS=y
+CONFIG_NVMEM_APPLE_EFUSES=y
# CONFIG_NVMEM_IMX_IIM is not set
# CONFIG_NVMEM_IMX_OCOTP is not set
-# CONFIG_MTK_EFUSE is not set
+# CONFIG_NVMEM_LAYERSCAPE_SFP is not set
+# CONFIG_NVMEM_MESON_EFUSE is not set
+# CONFIG_NVMEM_MESON_MX_EFUSE is not set
+# CONFIG_NVMEM_MTK_EFUSE is not set
+# CONFIG_NVMEM_QCOM_QFPROM is not set
+# CONFIG_NVMEM_RMEM is not set
+# CONFIG_NVMEM_ROCKCHIP_EFUSE is not set
+# CONFIG_NVMEM_ROCKCHIP_OTP is not set
+# CONFIG_NVMEM_SNVS_LPGPR is not set
# CONFIG_NVMEM_SPMI_SDAM is not set
-# CONFIG_ROCKCHIP_EFUSE is not set
-# CONFIG_ROCKCHIP_OTP is not set
# CONFIG_NVMEM_SUNXI_SID is not set
-# CONFIG_UNIPHIER_EFUSE is not set
-# CONFIG_MESON_EFUSE is not set
-# CONFIG_MESON_MX_EFUSE is not set
-# CONFIG_NVMEM_SNVS_LPGPR is not set
-# CONFIG_NVMEM_RMEM is not set
+# CONFIG_NVMEM_U_BOOT_ENV is not set
+# CONFIG_NVMEM_UNIPHIER_EFUSE is not set
#
# HW tracing support
#
# CONFIG_STM is not set
# CONFIG_INTEL_TH is not set
+# CONFIG_HISI_PTT is not set
# end of HW tracing support
# CONFIG_FPGA is not set
# CONFIG_FSI is not set
CONFIG_TEE=y
-
-#
-# TEE drivers
-#
CONFIG_OPTEE=y
-CONFIG_OPTEE_SHM_NUM_PRIV_PAGES=1
-# end of TEE drivers
-
CONFIG_PM_OPP=y
# CONFIG_SIOX is not set
# CONFIG_SLIMBUS is not set
CONFIG_INTERCONNECT=y
# CONFIG_INTERCONNECT_IMX is not set
+# CONFIG_INTERCONNECT_QCOM is not set
# CONFIG_INTERCONNECT_SAMSUNG is not set
# CONFIG_COUNTER is not set
# CONFIG_MOST is not set
+# CONFIG_PECI is not set
+# CONFIG_HTE is not set
# end of Device Drivers
#
@@ -8181,7 +8794,7 @@ CONFIG_F2FS_FS_SECURITY=y
# CONFIG_F2FS_FAULT_INJECTION is not set
# CONFIG_F2FS_FS_COMPRESSION is not set
CONFIG_F2FS_IOSTAT=y
-# CONFIG_FS_DAX is not set
+# CONFIG_F2FS_UNFAIR_RWSEM is not set
CONFIG_FS_POSIX_ACL=y
CONFIG_EXPORTFS=y
# CONFIG_EXPORTFS_BLOCK_OPS is not set
@@ -8216,6 +8829,8 @@ CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y
#
# Caches
#
+CONFIG_NETFS_SUPPORT=m
+# CONFIG_NETFS_STATS is not set
# CONFIG_FSCACHE is not set
# end of Caches
@@ -8264,6 +8879,9 @@ CONFIG_TMPFS_INODE64=y
CONFIG_ARCH_SUPPORTS_HUGETLBFS=y
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
+CONFIG_ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y
+CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y
+# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set
CONFIG_MEMFD_CREATE=y
CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
CONFIG_CONFIGFS_FS=m
@@ -8331,7 +8949,6 @@ CONFIG_NFS_USE_KERNEL_DNS=y
CONFIG_NFS_DISABLE_UDP_SUPPORT=y
CONFIG_NFSD=m
CONFIG_NFSD_V2_ACL=y
-CONFIG_NFSD_V3=y
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
# CONFIG_NFSD_BLOCKLAYOUT is not set
@@ -8428,6 +9045,7 @@ CONFIG_IO_WQ=y
CONFIG_KEYS=y
# CONFIG_KEYS_REQUEST_CACHE is not set
# CONFIG_PERSISTENT_KEYRINGS is not set
+# CONFIG_TRUSTED_KEYS is not set
# CONFIG_ENCRYPTED_KEYS is not set
# CONFIG_KEY_DH_OPERATIONS is not set
# CONFIG_SECURITY_DMESG_RESTRICT is not set
@@ -8437,8 +9055,6 @@ CONFIG_SECURITY=y
# CONFIG_SECURITY_PATH is not set
CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
CONFIG_HARDENED_USERCOPY=y
-CONFIG_HARDENED_USERCOPY_FALLBACK=y
-# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set
CONFIG_FORTIFY_SOURCE=y
# CONFIG_STATIC_USERMODEHELPER is not set
# CONFIG_SECURITY_SMACK is not set
@@ -8465,6 +9081,8 @@ CONFIG_INIT_STACK_NONE=y
# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
# end of Memory initialization
+
+CONFIG_RANDSTRUCT_NONE=y
# end of Kernel hardening options
# end of Security options
@@ -8508,7 +9126,7 @@ CONFIG_CRYPTO_NULL2=y
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
# CONFIG_CRYPTO_TEST is not set
-CONFIG_CRYPTO_SIMD=m
+# end of Crypto core or helper
#
# Public-key cryptography
@@ -8521,84 +9139,95 @@ CONFIG_CRYPTO_ECDH=m
# CONFIG_CRYPTO_ECRDSA is not set
# CONFIG_CRYPTO_SM2 is not set
# CONFIG_CRYPTO_CURVE25519 is not set
+# end of Public-key cryptography
#
-# Authenticated Encryption with Associated Data
+# Block ciphers
#
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
-# CONFIG_CRYPTO_AEGIS128 is not set
-CONFIG_CRYPTO_SEQIV=m
-CONFIG_CRYPTO_ECHAINIV=m
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_AES_TI is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARIA is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+CONFIG_CRYPTO_SM4=m
+# CONFIG_CRYPTO_SM4_GENERIC is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# end of Block ciphers
#
-# Block modes
+# Length-preserving ciphers and modes
#
+# CONFIG_CRYPTO_ADIANTUM is not set
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_CHACHA20=m
CONFIG_CRYPTO_CBC=m
# CONFIG_CRYPTO_CFB is not set
CONFIG_CRYPTO_CTR=m
# CONFIG_CRYPTO_CTS is not set
CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_HCTR2 is not set
+# CONFIG_CRYPTO_KEYWRAP is not set
# CONFIG_CRYPTO_LRW is not set
# CONFIG_CRYPTO_OFB is not set
# CONFIG_CRYPTO_PCBC is not set
CONFIG_CRYPTO_XTS=m
-# CONFIG_CRYPTO_KEYWRAP is not set
CONFIG_CRYPTO_NHPOLY1305=m
-# CONFIG_CRYPTO_ADIANTUM is not set
-CONFIG_CRYPTO_ESSIV=m
+# end of Length-preserving ciphers and modes
#
-# Hash modes
+# AEAD (authenticated encryption with associated data) ciphers
#
-CONFIG_CRYPTO_CMAC=m
-CONFIG_CRYPTO_HMAC=y
-# CONFIG_CRYPTO_XCBC is not set
-# CONFIG_CRYPTO_VMAC is not set
+# CONFIG_CRYPTO_AEGIS128 is not set
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+CONFIG_CRYPTO_ECHAINIV=m
+CONFIG_CRYPTO_ESSIV=m
+# end of AEAD (authenticated encryption with associated data) ciphers
#
-# Digest
+# Hashes, digests, and MACs
#
-CONFIG_CRYPTO_CRC32C=y
-CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_XXHASH=m
CONFIG_CRYPTO_BLAKE2B=m
-CONFIG_CRYPTO_CRCT10DIF=y
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_GHASH=m
-CONFIG_CRYPTO_POLY1305=m
+CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_POLY1305=m
# CONFIG_CRYPTO_RMD160 is not set
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+# CONFIG_CRYPTO_SM3_GENERIC is not set
# CONFIG_CRYPTO_STREEBOG is not set
+# CONFIG_CRYPTO_VMAC is not set
# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_XCBC is not set
+CONFIG_CRYPTO_XXHASH=m
+# end of Hashes, digests, and MACs
#
-# Ciphers
+# CRCs (cyclic redundancy checks)
#
-CONFIG_CRYPTO_AES=y
-# CONFIG_CRYPTO_AES_TI is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-CONFIG_CRYPTO_ARC4=m
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_CAMELLIA is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-CONFIG_CRYPTO_DES=m
-# CONFIG_CRYPTO_FCRYPT is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-CONFIG_CRYPTO_CHACHA20=m
-# CONFIG_CRYPTO_SEED is not set
-# CONFIG_CRYPTO_SERPENT is not set
-CONFIG_CRYPTO_SM4=m
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_TWOFISH is not set
+CONFIG_CRYPTO_CRC32C=y
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=y
+CONFIG_CRYPTO_CRC64_ROCKSOFT=y
+# end of CRCs (cyclic redundancy checks)
#
# Compression
@@ -8609,9 +9238,10 @@ CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_LZ4 is not set
# CONFIG_CRYPTO_LZ4HC is not set
# CONFIG_CRYPTO_ZSTD is not set
+# end of Compression
#
-# Random Number Generation
+# Random number generation
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DRBG_MENU=m
@@ -8620,6 +9250,11 @@ CONFIG_CRYPTO_DRBG_HMAC=y
# CONFIG_CRYPTO_DRBG_CTR is not set
CONFIG_CRYPTO_DRBG=m
CONFIG_CRYPTO_JITTERENTROPY=m
+# end of Random number generation
+
+#
+# Userspace interface
+#
CONFIG_CRYPTO_USER_API=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
@@ -8627,7 +9262,38 @@ CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y
+# end of Userspace interface
+
CONFIG_CRYPTO_HASH_INFO=y
+CONFIG_CRYPTO_NHPOLY1305_NEON=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+
+#
+# Accelerated Cryptographic Algorithms for CPU (arm64)
+#
+CONFIG_CRYPTO_GHASH_ARM64_CE=m
+CONFIG_CRYPTO_POLY1305_NEON=m
+CONFIG_CRYPTO_SHA1_ARM64_CE=m
+CONFIG_CRYPTO_SHA256_ARM64=m
+CONFIG_CRYPTO_SHA2_ARM64_CE=m
+CONFIG_CRYPTO_SHA512_ARM64=m
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+# CONFIG_CRYPTO_SM3_NEON is not set
+CONFIG_CRYPTO_SM3_ARM64_CE=m
+# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set
+CONFIG_CRYPTO_AES_ARM64=m
+CONFIG_CRYPTO_AES_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=m
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_CRYPTO_SM4_ARM64_CE=m
+# CONFIG_CRYPTO_SM4_ARM64_CE_BLK is not set
+# CONFIG_CRYPTO_SM4_ARM64_NEON_BLK is not set
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=m
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+# end of Accelerated Cryptographic Algorithms for CPU (arm64)
+
# CONFIG_CRYPTO_HW is not set
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
@@ -8636,6 +9302,7 @@ CONFIG_X509_CERTIFICATE_PARSER=y
CONFIG_PKCS7_MESSAGE_PARSER=y
# CONFIG_PKCS7_TEST_KEY is not set
# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set
+# CONFIG_FIPS_SIGNATURE_SELFTEST is not set
#
# Certificates for signature checking
@@ -8664,7 +9331,6 @@ CONFIG_HAVE_ARCH_BITREVERSE=y
CONFIG_GENERIC_STRNCPY_FROM_USER=y
CONFIG_GENERIC_STRNLEN_USER=y
CONFIG_GENERIC_NET_UTILS=y
-CONFIG_GENERIC_FIND_FIRST_BIT=y
CONFIG_CORDIC=m
# CONFIG_PRIME_NUMBERS is not set
CONFIG_RATIONAL=y
@@ -8677,6 +9343,7 @@ CONFIG_ARCH_USE_SYM_ANNOTATIONS=y
#
# Crypto library routines
#
+CONFIG_CRYPTO_LIB_UTILS=y
CONFIG_CRYPTO_LIB_AES=y
CONFIG_CRYPTO_LIB_ARC4=m
CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
@@ -8691,14 +9358,14 @@ CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m
CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m
CONFIG_CRYPTO_LIB_POLY1305=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
+CONFIG_CRYPTO_LIB_SHA1=y
CONFIG_CRYPTO_LIB_SHA256=y
-CONFIG_CRYPTO_LIB_SM4=m
# end of Crypto library routines
-CONFIG_LIB_MEMNEQ=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=y
CONFIG_CRC_T10DIF=y
+CONFIG_CRC64_ROCKSOFT=y
CONFIG_CRC_ITU_T=y
CONFIG_CRC32=y
# CONFIG_CRC32_SELFTEST is not set
@@ -8706,11 +9373,11 @@ CONFIG_CRC32_SLICEBY8=y
# CONFIG_CRC32_SLICEBY4 is not set
# CONFIG_CRC32_SARWATE is not set
# CONFIG_CRC32_BIT is not set
-CONFIG_CRC64=m
+CONFIG_CRC64=y
# CONFIG_CRC4 is not set
CONFIG_CRC7=y
CONFIG_LIBCRC32C=y
-# CONFIG_CRC8 is not set
+CONFIG_CRC8=m
CONFIG_XXHASH=y
CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
# CONFIG_RANDOM32_SELFTEST is not set
@@ -8719,6 +9386,7 @@ CONFIG_ZLIB_DEFLATE=m
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
CONFIG_LZ4_DECOMPRESS=y
+CONFIG_ZSTD_COMMON=y
CONFIG_ZSTD_COMPRESS=m
CONFIG_ZSTD_DECOMPRESS=y
CONFIG_XZ_DEC=y
@@ -8728,6 +9396,7 @@ CONFIG_XZ_DEC_IA64=y
CONFIG_XZ_DEC_ARM=y
CONFIG_XZ_DEC_ARMTHUMB=y
CONFIG_XZ_DEC_SPARC=y
+# CONFIG_XZ_DEC_MICROLZMA is not set
CONFIG_XZ_DEC_BCJ=y
# CONFIG_XZ_DEC_TEST is not set
CONFIG_DECOMPRESS_GZIP=y
@@ -8762,7 +9431,6 @@ CONFIG_SWIOTLB=y
# CONFIG_DMA_RESTRICTED_POOL is not set
CONFIG_DMA_NONCOHERENT_MMAP=y
CONFIG_DMA_COHERENT_POOL=y
-CONFIG_DMA_REMAP=y
CONFIG_DMA_DIRECT_REMAP=y
CONFIG_DMA_CMA=y
CONFIG_DMA_PERNUMA_CMA=y
@@ -8780,6 +9448,7 @@ CONFIG_CMA_ALIGNMENT=8
# CONFIG_DMA_MAP_BENCHMARK is not set
CONFIG_SGL_ALLOC=y
CONFIG_CHECK_SIGNATURE=y
+# CONFIG_FORCE_NR_CPUS is not set
CONFIG_CPU_RMAP=y
CONFIG_DQL=y
CONFIG_GLOB=y
@@ -8804,6 +9473,7 @@ CONFIG_ARCH_STACKWALK=y
CONFIG_SBITMAP=y
# end of Library routines
+CONFIG_GENERIC_IOREMAP=y
CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
CONFIG_PLDMFW=y
@@ -8827,12 +9497,17 @@ CONFIG_SYMBOLIC_ERRNAME=y
CONFIG_DEBUG_BUGVERBOSE=y
# end of printk and dmesg options
-CONFIG_AS_HAS_NON_CONST_LEB128=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_MISC=y
#
# Compile-time checks and compiler options
#
-# CONFIG_DEBUG_INFO is not set
+CONFIG_AS_HAS_NON_CONST_LEB128=y
+CONFIG_DEBUG_INFO_NONE=y
+# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set
+# CONFIG_DEBUG_INFO_DWARF4 is not set
+# CONFIG_DEBUG_INFO_DWARF5 is not set
CONFIG_FRAME_WARN=2048
# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_READABLE_ASM is not set
@@ -8860,17 +9535,25 @@ CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
# CONFIG_UBSAN is not set
+CONFIG_HAVE_ARCH_KCSAN=y
# end of Generic Kernel Debugging Instruments
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MISC=y
+#
+# Networking Debugging
+#
+# CONFIG_NET_DEV_REFCNT_TRACKER is not set
+# CONFIG_NET_NS_REFCNT_TRACKER is not set
+# CONFIG_DEBUG_NET is not set
+# end of Networking Debugging
#
# Memory Debugging
#
# CONFIG_PAGE_EXTENSION is not set
# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_SLUB_DEBUG is not set
# CONFIG_PAGE_OWNER is not set
+# CONFIG_PAGE_TABLE_CHECK is not set
# CONFIG_PAGE_POISONING is not set
# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_ARCH_HAS_DEBUG_WX=y
@@ -8879,7 +9562,7 @@ CONFIG_GENERIC_PTDUMP=y
CONFIG_PTDUMP_CORE=y
# CONFIG_PTDUMP_DEBUGFS is not set
# CONFIG_DEBUG_OBJECTS is not set
-# CONFIG_SLUB_STATS is not set
+# CONFIG_SHRINKER_DEBUG is not set
CONFIG_HAVE_DEBUG_KMEMLEAK=y
# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_STACK_USAGE is not set
@@ -8958,6 +9641,7 @@ CONFIG_STACKTRACE=y
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_BUG_ON_DATA_CORRUPTION is not set
+# CONFIG_DEBUG_MAPLE_TREE is not set
# end of Debug kernel data structures
# CONFIG_DEBUG_CREDENTIALS is not set
@@ -8969,6 +9653,7 @@ CONFIG_STACKTRACE=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_REF_SCALE_TEST is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=21
+CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0
# CONFIG_RCU_TRACE is not set
# CONFIG_RCU_EQS_DEBUG is not set
# end of RCU Debugging
@@ -9006,13 +9691,12 @@ CONFIG_FUNCTION_ERROR_INJECTION=y
# CONFIG_FAULT_INJECTION is not set
CONFIG_ARCH_HAS_KCOV=y
CONFIG_CC_HAS_SANCOV_TRACE_PC=y
-# CONFIG_KCOV is not set
CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_LKDTM is not set
# CONFIG_TEST_MIN_HEAP is not set
# CONFIG_TEST_DIV64 is not set
-# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_TEST_REF_TRACKER is not set
# CONFIG_RBTREE_TEST is not set
# CONFIG_REED_SOLOMON_TEST is not set
# CONFIG_INTERVAL_TREE_TEST is not set
@@ -9029,9 +9713,9 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_TEST_BITMAP is not set
# CONFIG_TEST_UUID is not set
# CONFIG_TEST_XARRAY is not set
-# CONFIG_TEST_OVERFLOW is not set
+# CONFIG_TEST_MAPLE_TREE is not set
# CONFIG_TEST_RHASHTABLE is not set
-# CONFIG_TEST_HASH is not set
+# CONFIG_TEST_SIPHASH is not set
# CONFIG_TEST_IDA is not set
# CONFIG_TEST_LKM is not set
# CONFIG_TEST_BITOPS is not set
@@ -9046,11 +9730,15 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_TEST_STATIC_KEYS is not set
# CONFIG_TEST_KMOD is not set
# CONFIG_TEST_MEMCAT_P is not set
-# CONFIG_TEST_STACKINIT is not set
# CONFIG_TEST_MEMINIT is not set
# CONFIG_TEST_FREE_PAGES is not set
CONFIG_ARCH_USE_MEMTEST=y
# CONFIG_MEMTEST is not set
# CONFIG_HYPERV_TESTING is not set
# end of Kernel Testing and Coverage
+
+#
+# Rust hacking
+#
+# end of Rust hacking
# end of Kernel hacking