summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlice Ferrazzi <alicef@gentoo.org>2024-04-18 14:35:23 +0900
committerAlice Ferrazzi <alicef@gentoo.org>2024-04-18 14:35:23 +0900
commitb3f5d49128d1354118e104aacca7c92658e5de94 (patch)
treec9be98892d3798875f9e03f081c5e2beee406a41
parentLinux patch 6.8.1: fix patch filename (diff)
downloadlinux-patches-b3f5d49128d1354118e104aacca7c92658e5de94.tar.gz
linux-patches-b3f5d49128d1354118e104aacca7c92658e5de94.tar.bz2
linux-patches-b3f5d49128d1354118e104aacca7c92658e5de94.zip
Linux patch 6.8.76.8-10
Signed-off-by: Alice Ferrazzi <alicef@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1006_linux-6.8.7.patch6318
2 files changed, 6322 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 98a6896a..1380bd5c 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch: 1005_linux-6.8.6.patch
From: https://www.kernel.org
Desc: Linux 6.8.6
+Patch: 1006_linux-6.8.7.patch
+From: https://www.kernel.org
+Desc: Linux 6.8.7
+
Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
Desc: Enable link security restrictions by default.
diff --git a/1006_linux-6.8.7.patch b/1006_linux-6.8.7.patch
new file mode 100644
index 00000000..09ffaa63
--- /dev/null
+++ b/1006_linux-6.8.7.patch
@@ -0,0 +1,6318 @@
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
+index 9edb2860a3e19..e0a1be97fa759 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -439,12 +439,12 @@ The possible values in this file are:
+ - System is protected by retpoline
+ * - BHI: BHI_DIS_S
+ - System is protected by BHI_DIS_S
+- * - BHI: SW loop; KVM SW loop
++ * - BHI: SW loop, KVM SW loop
+ - System is protected by software clearing sequence
+- * - BHI: Syscall hardening
+- - Syscalls are hardened against BHI
+- * - BHI: Syscall hardening; KVM: SW loop
+- - System is protected from userspace attacks by syscall hardening; KVM is protected by software clearing sequence
++ * - BHI: Vulnerable
++ - System is vulnerable to BHI
++ * - BHI: Vulnerable, KVM: SW loop
++ - System is vulnerable; KVM is protected by software clearing sequence
+
+ Full mitigation might require a microcode update from the CPU
+ vendor. When the necessary microcode is not available, the kernel will
+@@ -661,18 +661,14 @@ kernel command line.
+ spectre_bhi=
+
+ [X86] Control mitigation of Branch History Injection
+- (BHI) vulnerability. Syscalls are hardened against BHI
+- regardless of this setting. This setting affects the deployment
++ (BHI) vulnerability. This setting affects the deployment
+ of the HW BHI control and the SW BHB clearing sequence.
+
+ on
+- unconditionally enable.
++ (default) Enable the HW or SW mitigation as
++ needed.
+ off
+- unconditionally disable.
+- auto
+- enable if hardware mitigation
+- control(BHI_DIS_S) is available, otherwise
+- enable alternate mitigation in KVM.
++ Disable the mitigation.
+
+ For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
+
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 95efaccca4539..31fdaf4fe9dd8 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3419,6 +3419,7 @@
+ reg_file_data_sampling=off [X86]
+ retbleed=off [X86]
+ spec_store_bypass_disable=off [X86,PPC]
++ spectre_bhi=off [X86]
+ spectre_v2_user=off [X86]
+ srbds=off [X86,INTEL]
+ ssbd=force-off [ARM64]
+@@ -6032,16 +6033,13 @@
+ See Documentation/admin-guide/laptops/sonypi.rst
+
+ spectre_bhi= [X86] Control mitigation of Branch History Injection
+- (BHI) vulnerability. Syscalls are hardened against BHI
+- reglardless of this setting. This setting affects the
++ (BHI) vulnerability. This setting affects the
+ deployment of the HW BHI control and the SW BHB
+ clearing sequence.
+
+- on - unconditionally enable.
+- off - unconditionally disable.
+- auto - (default) enable hardware mitigation
+- (BHI_DIS_S) if available, otherwise enable
+- alternate mitigation in KVM.
++ on - (default) Enable the HW or SW mitigation
++ as needed.
++ off - Disable the mitigation.
+
+ spectre_v2= [X86] Control mitigation of Spectre variant 2
+ (indirect branch speculation) vulnerability.
+diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml
+index c0d6a4fdff97e..e6dc5494baee2 100644
+--- a/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml
++++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml
+@@ -53,6 +53,15 @@ patternProperties:
+ compatible:
+ const: qcom,sm8150-dpu
+
++ "^displayport-controller@[0-9a-f]+$":
++ type: object
++ additionalProperties: true
++
++ properties:
++ compatible:
++ contains:
++ const: qcom,sm8150-dp
++
+ "^dsi@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+diff --git a/Makefile b/Makefile
+index c426d47f4b7bf..e6c0a00722eae 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 8
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+diff --git a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
+index ba7231b364bb8..7bab113ca6da7 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
+@@ -210,6 +210,7 @@ ov2680_to_mipi: endpoint {
+ remote-endpoint = <&mipi_from_sensor>;
+ clock-lanes = <0>;
+ data-lanes = <1>;
++ link-frequencies = /bits/ 64 <330000000>;
+ };
+ };
+ };
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index 31755a378c736..ff2a4a4d82204 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -79,10 +79,8 @@ static struct musb_hdrc_platform_data tusb_data = {
+ static struct gpiod_lookup_table tusb_gpio_table = {
+ .dev_id = "musb-tusb",
+ .table = {
+- GPIO_LOOKUP("gpio-0-15", 0, "enable",
+- GPIO_ACTIVE_HIGH),
+- GPIO_LOOKUP("gpio-48-63", 10, "int",
+- GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("gpio-0-31", 0, "enable", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("gpio-32-63", 26, "int", GPIO_ACTIVE_HIGH),
+ { }
+ },
+ };
+@@ -140,12 +138,11 @@ static int slot1_cover_open;
+ static int slot2_cover_open;
+ static struct device *mmc_device;
+
+-static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
++static struct gpiod_lookup_table nokia800_mmc_gpio_table = {
+ .dev_id = "mmci-omap.0",
+ .table = {
+ /* Slot switch, GPIO 96 */
+- GPIO_LOOKUP("gpio-80-111", 16,
+- "switch", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
+ { }
+ },
+ };
+@@ -153,12 +150,12 @@ static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
+ static struct gpiod_lookup_table nokia810_mmc_gpio_table = {
+ .dev_id = "mmci-omap.0",
+ .table = {
++ /* Slot switch, GPIO 96 */
++ GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
+ /* Slot index 1, VSD power, GPIO 23 */
+- GPIO_LOOKUP_IDX("gpio-16-31", 7,
+- "vsd", 1, GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP_IDX("gpio-0-31", 23, "vsd", 1, GPIO_ACTIVE_HIGH),
+ /* Slot index 1, VIO power, GPIO 9 */
+- GPIO_LOOKUP_IDX("gpio-0-15", 9,
+- "vio", 1, GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP_IDX("gpio-0-31", 9, "vio", 1, GPIO_ACTIVE_HIGH),
+ { }
+ },
+ };
+@@ -415,8 +412,6 @@ static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC];
+
+ static void __init n8x0_mmc_init(void)
+ {
+- gpiod_add_lookup_table(&nokia8xx_mmc_gpio_table);
+-
+ if (board_is_n810()) {
+ mmc1_data.slots[0].name = "external";
+
+@@ -429,6 +424,8 @@ static void __init n8x0_mmc_init(void)
+ mmc1_data.slots[1].name = "internal";
+ mmc1_data.slots[1].ban_openended = 1;
+ gpiod_add_lookup_table(&nokia810_mmc_gpio_table);
++ } else {
++ gpiod_add_lookup_table(&nokia800_mmc_gpio_table);
+ }
+
+ mmc1_data.nr_slots = 2;
+diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+index 3c42240e78e24..4aaf5a0c1ed8a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+@@ -41,7 +41,7 @@ usbotg1: usb@5b0d0000 {
+ interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,usbphy = <&usbphy1>;
+ fsl,usbmisc = <&usbmisc1 0>;
+- clocks = <&usb2_lpcg 0>;
++ clocks = <&usb2_lpcg IMX_LPCG_CLK_6>;
+ ahb-burst-config = <0x0>;
+ tx-burst-size-dword = <0x10>;
+ rx-burst-size-dword = <0x10>;
+@@ -58,7 +58,7 @@ usbmisc1: usbmisc@5b0d0200 {
+ usbphy1: usbphy@5b100000 {
+ compatible = "fsl,imx7ulp-usbphy";
+ reg = <0x5b100000 0x1000>;
+- clocks = <&usb2_lpcg 1>;
++ clocks = <&usb2_lpcg IMX_LPCG_CLK_7>;
+ power-domains = <&pd IMX_SC_R_USB_0_PHY>;
+ status = "disabled";
+ };
+@@ -67,8 +67,8 @@ usdhc1: mmc@5b010000 {
+ interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b010000 0x10000>;
+ clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
+- <&sdhc0_lpcg IMX_LPCG_CLK_0>,
+- <&sdhc0_lpcg IMX_LPCG_CLK_5>;
++ <&sdhc0_lpcg IMX_LPCG_CLK_5>,
++ <&sdhc0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "ahb", "per";
+ power-domains = <&pd IMX_SC_R_SDHC_0>;
+ status = "disabled";
+@@ -78,8 +78,8 @@ usdhc2: mmc@5b020000 {
+ interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b020000 0x10000>;
+ clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
+- <&sdhc1_lpcg IMX_LPCG_CLK_0>,
+- <&sdhc1_lpcg IMX_LPCG_CLK_5>;
++ <&sdhc1_lpcg IMX_LPCG_CLK_5>,
++ <&sdhc1_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "ahb", "per";
+ power-domains = <&pd IMX_SC_R_SDHC_1>;
+ fsl,tuning-start-tap = <20>;
+@@ -91,8 +91,8 @@ usdhc3: mmc@5b030000 {
+ interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b030000 0x10000>;
+ clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
+- <&sdhc2_lpcg IMX_LPCG_CLK_0>,
+- <&sdhc2_lpcg IMX_LPCG_CLK_5>;
++ <&sdhc2_lpcg IMX_LPCG_CLK_5>,
++ <&sdhc2_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "ahb", "per";
+ power-domains = <&pd IMX_SC_R_SDHC_2>;
+ status = "disabled";
+diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
+index b0bb77150adcc..67b3c7573233a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
+@@ -27,8 +27,8 @@ lpspi0: spi@5a000000 {
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&spi0_lpcg 0>,
+- <&spi0_lpcg 1>;
++ clocks = <&spi0_lpcg IMX_LPCG_CLK_0>,
++ <&spi0_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_SPI_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <60000000>;
+@@ -43,8 +43,8 @@ lpspi1: spi@5a010000 {
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&spi1_lpcg 0>,
+- <&spi1_lpcg 1>;
++ clocks = <&spi1_lpcg IMX_LPCG_CLK_0>,
++ <&spi1_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_SPI_1 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <60000000>;
+@@ -59,8 +59,8 @@ lpspi2: spi@5a020000 {
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&spi2_lpcg 0>,
+- <&spi2_lpcg 1>;
++ clocks = <&spi2_lpcg IMX_LPCG_CLK_0>,
++ <&spi2_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_SPI_2 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <60000000>;
+@@ -75,8 +75,8 @@ lpspi3: spi@5a030000 {
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&spi3_lpcg 0>,
+- <&spi3_lpcg 1>;
++ clocks = <&spi3_lpcg IMX_LPCG_CLK_0>,
++ <&spi3_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_SPI_3 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <60000000>;
+@@ -144,8 +144,8 @@ adma_pwm: pwm@5a190000 {
+ compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm";
+ reg = <0x5a190000 0x1000>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&adma_pwm_lpcg 1>,
+- <&adma_pwm_lpcg 0>;
++ clocks = <&adma_pwm_lpcg IMX_LPCG_CLK_4>,
++ <&adma_pwm_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_LCD_0_PWM_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+@@ -377,8 +377,8 @@ adc0: adc@5a880000 {
+ reg = <0x5a880000 0x10000>;
+ interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&adc0_lpcg 0>,
+- <&adc0_lpcg 1>;
++ clocks = <&adc0_lpcg IMX_LPCG_CLK_0>,
++ <&adc0_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_ADC_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+@@ -392,8 +392,8 @@ adc1: adc@5a890000 {
+ reg = <0x5a890000 0x10000>;
+ interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&adc1_lpcg 0>,
+- <&adc1_lpcg 1>;
++ clocks = <&adc1_lpcg IMX_LPCG_CLK_0>,
++ <&adc1_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_ADC_1 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+@@ -406,8 +406,8 @@ flexcan1: can@5a8d0000 {
+ reg = <0x5a8d0000 0x10000>;
+ interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&can0_lpcg 1>,
+- <&can0_lpcg 0>;
++ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
++ <&can0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <40000000>;
+@@ -427,8 +427,8 @@ flexcan2: can@5a8e0000 {
+ * CAN1 shares CAN0's clock and to enable CAN0's clock it
+ * has to be powered on.
+ */
+- clocks = <&can0_lpcg 1>,
+- <&can0_lpcg 0>;
++ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
++ <&can0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <40000000>;
+@@ -448,8 +448,8 @@ flexcan3: can@5a8f0000 {
+ * CAN2 shares CAN0's clock and to enable CAN0's clock it
+ * has to be powered on.
+ */
+- clocks = <&can0_lpcg 1>,
+- <&can0_lpcg 0>;
++ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
++ <&can0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <40000000>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
+index 7e510b21bbac5..764c1a08e3b11 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
+@@ -25,8 +25,8 @@ lsio_pwm0: pwm@5d000000 {
+ compatible = "fsl,imx27-pwm";
+ reg = <0x5d000000 0x10000>;
+ clock-names = "ipg", "per";
+- clocks = <&pwm0_lpcg 4>,
+- <&pwm0_lpcg 1>;
++ clocks = <&pwm0_lpcg IMX_LPCG_CLK_6>,
++ <&pwm0_lpcg IMX_LPCG_CLK_1>;
+ assigned-clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ #pwm-cells = <3>;
+@@ -38,8 +38,8 @@ lsio_pwm1: pwm@5d010000 {
+ compatible = "fsl,imx27-pwm";
+ reg = <0x5d010000 0x10000>;
+ clock-names = "ipg", "per";
+- clocks = <&pwm1_lpcg 4>,
+- <&pwm1_lpcg 1>;
++ clocks = <&pwm1_lpcg IMX_LPCG_CLK_6>,
++ <&pwm1_lpcg IMX_LPCG_CLK_1>;
+ assigned-clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ #pwm-cells = <3>;
+@@ -51,8 +51,8 @@ lsio_pwm2: pwm@5d020000 {
+ compatible = "fsl,imx27-pwm";
+ reg = <0x5d020000 0x10000>;
+ clock-names = "ipg", "per";
+- clocks = <&pwm2_lpcg 4>,
+- <&pwm2_lpcg 1>;
++ clocks = <&pwm2_lpcg IMX_LPCG_CLK_6>,
++ <&pwm2_lpcg IMX_LPCG_CLK_1>;
+ assigned-clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ #pwm-cells = <3>;
+@@ -64,8 +64,8 @@ lsio_pwm3: pwm@5d030000 {
+ compatible = "fsl,imx27-pwm";
+ reg = <0x5d030000 0x10000>;
+ clock-names = "ipg", "per";
+- clocks = <&pwm3_lpcg 4>,
+- <&pwm3_lpcg 1>;
++ clocks = <&pwm3_lpcg IMX_LPCG_CLK_6>,
++ <&pwm3_lpcg IMX_LPCG_CLK_1>;
+ assigned-clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ #pwm-cells = <3>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
+index 41c79d2ebdd62..f24b14744799e 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
+@@ -14,6 +14,7 @@ connector {
+ pinctrl-0 = <&pinctrl_usbcon1>;
+ type = "micro";
+ label = "otg";
++ vbus-supply = <&reg_usb1_vbus>;
+ id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+
+ port {
+@@ -183,7 +184,6 @@ &usb3_0 {
+ };
+
+ &usb3_phy0 {
+- vbus-supply = <&reg_usb1_vbus>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+index d5c400b355af5..f5491a608b2f3 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+@@ -14,6 +14,7 @@ connector {
+ pinctrl-0 = <&pinctrl_usbcon1>;
+ type = "micro";
+ label = "otg";
++ vbus-supply = <&reg_usb1_vbus>;
+ id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+
+ port {
+@@ -202,7 +203,6 @@ &usb3_0 {
+ };
+
+ &usb3_phy0 {
+- vbus-supply = <&reg_usb1_vbus>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
+index cafc1383115ab..cd96f9f75da16 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
+@@ -127,15 +127,15 @@ &flexcan1 {
+ };
+
+ &flexcan2 {
+- clocks = <&can1_lpcg 1>,
+- <&can1_lpcg 0>;
++ clocks = <&can1_lpcg IMX_LPCG_CLK_4>,
++ <&can1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clocks = <&clk IMX_SC_R_CAN_1 IMX_SC_PM_CLK_PER>;
+ fsl,clk-source = /bits/ 8 <1>;
+ };
+
+ &flexcan3 {
+- clocks = <&can2_lpcg 1>,
+- <&can2_lpcg 0>;
++ clocks = <&can2_lpcg IMX_LPCG_CLK_4>,
++ <&can2_lpcg IMX_LPCG_CLK_0>;
+ assigned-clocks = <&clk IMX_SC_R_CAN_2 IMX_SC_PM_CLK_PER>;
+ fsl,clk-source = /bits/ 8 <1>;
+ };
+diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
+index 1deb5d789c2e2..bfeb54f3a971f 100644
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -161,12 +161,18 @@ static inline unsigned long get_trans_granule(void)
+ #define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
+
+ /*
+- * Generate 'num' values from -1 to 30 with -1 rejected by the
+- * __flush_tlb_range() loop below.
++ * Generate 'num' values from -1 to 31 with -1 rejected by the
++ * __flush_tlb_range() loop below. Its return value is only
++ * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
++ * 'pages' is more than that, you must iterate over the overall
++ * range.
+ */
+-#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
+-#define __TLBI_RANGE_NUM(pages, scale) \
+- ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
++#define __TLBI_RANGE_NUM(pages, scale) \
++ ({ \
++ int __pages = min((pages), \
++ __TLBI_RANGE_PAGES(31, (scale))); \
++ (__pages >> (5 * (scale) + 1)) - 1; \
++ })
+
+ /*
+ * TLB Invalidation
+@@ -379,10 +385,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
+ * 3. If there is 1 page remaining, flush it through non-range operations. Range
+ * operations can only span an even number of pages. We save this for last to
+ * ensure 64KB start alignment is maintained for the LPA2 case.
+- *
+- * Note that certain ranges can be represented by either num = 31 and
+- * scale or num = 0 and scale + 1. The loop below favours the latter
+- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
+ */
+ #define __flush_tlb_range_op(op, start, pages, stride, \
+ asid, tlb_level, tlbi_user, lpa2) \
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 890ba8e48bc39..b07f8b007ed9b 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2612,31 +2612,16 @@ config MITIGATION_RFDS
+ stored in floating point, vector and integer registers.
+ See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
+
+-choice
+- prompt "Clear branch history"
++config MITIGATION_SPECTRE_BHI
++ bool "Mitigate Spectre-BHB (Branch History Injection)"
+ depends on CPU_SUP_INTEL
+- default SPECTRE_BHI_ON
++ default y
+ help
+ Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
+ where the branch history buffer is poisoned to speculatively steer
+ indirect branches.
+ See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
+
+-config SPECTRE_BHI_ON
+- bool "on"
+- help
+- Equivalent to setting spectre_bhi=on command line parameter.
+-config SPECTRE_BHI_OFF
+- bool "off"
+- help
+- Equivalent to setting spectre_bhi=off command line parameter.
+-config SPECTRE_BHI_AUTO
+- bool "auto"
+- help
+- Equivalent to setting spectre_bhi=auto command line parameter.
+-
+-endchoice
+-
+ endif
+
+ config ARCH_HAS_ADD_PAGES
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 09050641ce5d3..5b0dd07b1ef19 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1644,6 +1644,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
+ while (++i < cpuc->n_events) {
+ cpuc->event_list[i-1] = cpuc->event_list[i];
+ cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
++ cpuc->assign[i-1] = cpuc->assign[i];
+ }
+ cpuc->event_constraint[i-1] = NULL;
+ --cpuc->n_events;
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 9d159b771dc81..dddd3fc195efa 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -13,6 +13,7 @@
+ #include <asm/mpspec.h>
+ #include <asm/msr.h>
+ #include <asm/hardirq.h>
++#include <asm/io.h>
+
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
+
+@@ -96,7 +97,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
+
+ static inline u32 native_apic_mem_read(u32 reg)
+ {
+- return *((volatile u32 *)(APIC_BASE + reg));
++ return readl((void __iomem *)(APIC_BASE + reg));
+ }
+
+ static inline void native_apic_mem_eoi(void)
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 4667bc4b00ab8..75bd5ac7ac6a9 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1724,11 +1724,11 @@ static int x2apic_state;
+
+ static bool x2apic_hw_locked(void)
+ {
+- u64 ia32_cap;
++ u64 x86_arch_cap_msr;
+ u64 msr;
+
+- ia32_cap = x86_read_arch_cap_msr();
+- if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
++ x86_arch_cap_msr = x86_read_arch_cap_msr();
++ if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
+ rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
+ return (msr & LEGACY_XAPIC_DISABLED);
+ }
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 0d7238d88b38a..cbc8c88144e47 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -61,6 +61,8 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
+ u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+
++static u64 __ro_after_init x86_arch_cap_msr;
++
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+
+ void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
+@@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void)
+ x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
+ }
+
++ x86_arch_cap_msr = x86_read_arch_cap_msr();
++
+ /* Select the proper CPU mitigations before patching alternatives: */
+ spectre_v1_select_mitigation();
+ spectre_v2_select_mitigation();
+@@ -301,8 +305,6 @@ static const char * const taa_strings[] = {
+
+ static void __init taa_select_mitigation(void)
+ {
+- u64 ia32_cap;
+-
+ if (!boot_cpu_has_bug(X86_BUG_TAA)) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ return;
+@@ -341,9 +343,8 @@ static void __init taa_select_mitigation(void)
+ * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+ * update is required.
+ */
+- ia32_cap = x86_read_arch_cap_msr();
+- if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+- !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
++ if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
++ !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+ /*
+@@ -401,8 +402,6 @@ static const char * const mmio_strings[] = {
+
+ static void __init mmio_select_mitigation(void)
+ {
+- u64 ia32_cap;
+-
+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
+ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
+ cpu_mitigations_off()) {
+@@ -413,8 +412,6 @@ static void __init mmio_select_mitigation(void)
+ if (mmio_mitigation == MMIO_MITIGATION_OFF)
+ return;
+
+- ia32_cap = x86_read_arch_cap_msr();
+-
+ /*
+ * Enable CPU buffer clear mitigation for host and VMM, if also affected
+ * by MDS or TAA. Otherwise, enable mitigation for VMM only.
+@@ -437,7 +434,7 @@ static void __init mmio_select_mitigation(void)
+ * be propagated to uncore buffers, clearing the Fill buffers on idle
+ * is required irrespective of SMT state.
+ */
+- if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
++ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
+ static_branch_enable(&mds_idle_clear);
+
+ /*
+@@ -447,10 +444,10 @@ static void __init mmio_select_mitigation(void)
+ * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
+ * affected systems.
+ */
+- if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
++ if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
+ (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
+ boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
+- !(ia32_cap & ARCH_CAP_MDS_NO)))
++ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
+ mmio_mitigation = MMIO_MITIGATION_VERW;
+ else
+ mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
+@@ -508,7 +505,7 @@ static void __init rfds_select_mitigation(void)
+ if (rfds_mitigation == RFDS_MITIGATION_OFF)
+ return;
+
+- if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ else
+ rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
+@@ -659,8 +656,6 @@ void update_srbds_msr(void)
+
+ static void __init srbds_select_mitigation(void)
+ {
+- u64 ia32_cap;
+-
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return;
+
+@@ -669,8 +664,7 @@ static void __init srbds_select_mitigation(void)
+ * are only exposed to SRBDS when TSX is enabled or when CPU is affected
+ * by Processor MMIO Stale Data vulnerability.
+ */
+- ia32_cap = x86_read_arch_cap_msr();
+- if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
++ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+ !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+ srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+ else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+@@ -813,7 +807,7 @@ static void __init gds_select_mitigation(void)
+ /* Will verify below that mitigation _can_ be disabled */
+
+ /* No microcode */
+- if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
++ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
+ if (gds_mitigation == GDS_MITIGATION_FORCE) {
+ /*
+ * This only needs to be done on the boot CPU so do it
+@@ -1543,20 +1537,25 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
+ return SPECTRE_V2_RETPOLINE;
+ }
+
++static bool __ro_after_init rrsba_disabled;
++
+ /* Disable in-kernel use of non-RSB RET predictors */
+ static void __init spec_ctrl_disable_kernel_rrsba(void)
+ {
+- u64 ia32_cap;
++ if (rrsba_disabled)
++ return;
+
+- if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
++ if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
++ rrsba_disabled = true;
+ return;
++ }
+
+- ia32_cap = x86_read_arch_cap_msr();
++ if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
++ return;
+
+- if (ia32_cap & ARCH_CAP_RRSBA) {
+- x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+- update_spec_ctrl(x86_spec_ctrl_base);
+- }
++ x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
++ update_spec_ctrl(x86_spec_ctrl_base);
++ rrsba_disabled = true;
+ }
+
+ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
+@@ -1625,13 +1624,10 @@ static bool __init spec_ctrl_bhi_dis(void)
+ enum bhi_mitigations {
+ BHI_MITIGATION_OFF,
+ BHI_MITIGATION_ON,
+- BHI_MITIGATION_AUTO,
+ };
+
+ static enum bhi_mitigations bhi_mitigation __ro_after_init =
+- IS_ENABLED(CONFIG_SPECTRE_BHI_ON) ? BHI_MITIGATION_ON :
+- IS_ENABLED(CONFIG_SPECTRE_BHI_OFF) ? BHI_MITIGATION_OFF :
+- BHI_MITIGATION_AUTO;
++ IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
+
+ static int __init spectre_bhi_parse_cmdline(char *str)
+ {
+@@ -1642,8 +1638,6 @@ static int __init spectre_bhi_parse_cmdline(char *str)
+ bhi_mitigation = BHI_MITIGATION_OFF;
+ else if (!strcmp(str, "on"))
+ bhi_mitigation = BHI_MITIGATION_ON;
+- else if (!strcmp(str, "auto"))
+- bhi_mitigation = BHI_MITIGATION_AUTO;
+ else
+ pr_err("Ignoring unknown spectre_bhi option (%s)", str);
+
+@@ -1657,9 +1651,11 @@ static void __init bhi_select_mitigation(void)
+ return;
+
+ /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
+- if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
+- !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
+- return;
++ if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
++ spec_ctrl_disable_kernel_rrsba();
++ if (rrsba_disabled)
++ return;
++ }
+
+ if (spec_ctrl_bhi_dis())
+ return;
+@@ -1671,9 +1667,6 @@ static void __init bhi_select_mitigation(void)
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
+ pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
+
+- if (bhi_mitigation == BHI_MITIGATION_AUTO)
+- return;
+-
+ /* Mitigate syscalls when the mitigation is forced =on */
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
+ pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
+@@ -1907,8 +1900,6 @@ static void update_indir_branch_cond(void)
+ /* Update the static key controlling the MDS CPU buffer clear in idle */
+ static void update_mds_branch_idle(void)
+ {
+- u64 ia32_cap = x86_read_arch_cap_msr();
+-
+ /*
+ * Enable the idle clearing if SMT is active on CPUs which are
+ * affected only by MSBDS and not any other MDS variant.
+@@ -1923,7 +1914,7 @@ static void update_mds_branch_idle(void)
+ if (sched_smt_active()) {
+ static_branch_enable(&mds_idle_clear);
+ } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
+- (ia32_cap & ARCH_CAP_FBSDP_NO)) {
++ (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
+ static_branch_disable(&mds_idle_clear);
+ }
+ }
+@@ -2808,7 +2799,7 @@ static char *pbrsb_eibrs_state(void)
+ }
+ }
+
+-static const char * const spectre_bhi_state(void)
++static const char *spectre_bhi_state(void)
+ {
+ if (!boot_cpu_has_bug(X86_BUG_BHI))
+ return "; BHI: Not affected";
+@@ -2816,13 +2807,12 @@ static const char * const spectre_bhi_state(void)
+ return "; BHI: BHI_DIS_S";
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+ return "; BHI: SW loop, KVM: SW loop";
+- else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+- !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
++ else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
+ return "; BHI: Retpoline";
+- else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+- return "; BHI: Syscall hardening, KVM: SW loop";
++ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
++ return "; BHI: Vulnerable, KVM: SW loop";
+
+- return "; BHI: Vulnerable (Syscall hardening enabled)";
++ return "; BHI: Vulnerable";
+ }
+
+ static ssize_t spectre_v2_show_state(char *buf)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 785fedddb5f09..46603c6e400aa 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1327,25 +1327,25 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
+
+ u64 x86_read_arch_cap_msr(void)
+ {
+- u64 ia32_cap = 0;
++ u64 x86_arch_cap_msr = 0;
+
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
+
+- return ia32_cap;
++ return x86_arch_cap_msr;
+ }
+
+-static bool arch_cap_mmio_immune(u64 ia32_cap)
++static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
+ {
+- return (ia32_cap & ARCH_CAP_FBSDP_NO &&
+- ia32_cap & ARCH_CAP_PSDP_NO &&
+- ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
++ return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
++ x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
++ x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
+ }
+
+-static bool __init vulnerable_to_rfds(u64 ia32_cap)
++static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
+ {
+ /* The "immunity" bit trumps everything else: */
+- if (ia32_cap & ARCH_CAP_RFDS_NO)
++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
+ return false;
+
+ /*
+@@ -1353,7 +1353,7 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
+ * indicate that mitigation is needed because guest is running on a
+ * vulnerable hardware or may migrate to such hardware:
+ */
+- if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
+ return true;
+
+ /* Only consult the blacklist when there is no enumeration: */
+@@ -1362,11 +1362,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
+
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+- u64 ia32_cap = x86_read_arch_cap_msr();
++ u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
+
+ /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+ if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
+- !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
++ !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
+ setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
+
+ if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
+@@ -1378,7 +1378,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
+- !(ia32_cap & ARCH_CAP_SSB_NO) &&
++ !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+@@ -1386,15 +1386,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
+ * flag and protect from vendor-specific bugs via the whitelist.
+ */
+- if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
++ if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+ if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+- !(ia32_cap & ARCH_CAP_PBRSB_NO))
++ !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
+ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+ }
+
+ if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+- !(ia32_cap & ARCH_CAP_MDS_NO)) {
++ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
+ setup_force_cpu_bug(X86_BUG_MDS);
+ if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
+ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+@@ -1413,9 +1413,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ * TSX_CTRL check alone is not sufficient for cases when the microcode
+ * update is not present or running as guest that don't get TSX_CTRL.
+ */
+- if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
++ if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
+ (cpu_has(c, X86_FEATURE_RTM) ||
+- (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
++ (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
+ setup_force_cpu_bug(X86_BUG_TAA);
+
+ /*
+@@ -1441,7 +1441,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
+ * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
+ */
+- if (!arch_cap_mmio_immune(ia32_cap)) {
++ if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
+ if (cpu_matches(cpu_vuln_blacklist, MMIO))
+ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+ else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
+@@ -1449,7 +1449,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ }
+
+ if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
+- if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
++ if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
+ setup_force_cpu_bug(X86_BUG_RETBLEED);
+ }
+
+@@ -1467,15 +1467,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
+ * which means that AVX will be disabled.
+ */
+- if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
++ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
+ boot_cpu_has(X86_FEATURE_AVX))
+ setup_force_cpu_bug(X86_BUG_GDS);
+
+- if (vulnerable_to_rfds(ia32_cap))
++ if (vulnerable_to_rfds(x86_arch_cap_msr))
+ setup_force_cpu_bug(X86_BUG_RFDS);
+
+ /* When virtualized, eIBRS could be hidden, assume vulnerable */
+- if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
++ if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
+ !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
+ (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
+ boot_cpu_has(X86_FEATURE_HYPERVISOR)))
+@@ -1485,7 +1485,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ return;
+
+ /* Rogue Data Cache Load? No! */
+- if (ia32_cap & ARCH_CAP_RDCL_NO)
++ if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
+ return;
+
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index ff93c385ba5af..4529122e0cbdb 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1409,6 +1409,12 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
+ return 0;
+ }
+
++void blkg_init_queue(struct request_queue *q)
++{
++ INIT_LIST_HEAD(&q->blkg_list);
++ mutex_init(&q->blkcg_mutex);
++}
++
+ int blkcg_init_disk(struct gendisk *disk)
+ {
+ struct request_queue *q = disk->queue;
+@@ -1416,9 +1422,6 @@ int blkcg_init_disk(struct gendisk *disk)
+ bool preloaded;
+ int ret;
+
+- INIT_LIST_HEAD(&q->blkg_list);
+- mutex_init(&q->blkcg_mutex);
+-
+ new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
+ if (!new_blkg)
+ return -ENOMEM;
+diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
+index b927a4a0ad030..5b0bdc268ade9 100644
+--- a/block/blk-cgroup.h
++++ b/block/blk-cgroup.h
+@@ -188,6 +188,7 @@ struct blkcg_policy {
+ extern struct blkcg blkcg_root;
+ extern bool blkcg_debug_stats;
+
++void blkg_init_queue(struct request_queue *q);
+ int blkcg_init_disk(struct gendisk *disk);
+ void blkcg_exit_disk(struct gendisk *disk);
+
+@@ -481,6 +482,7 @@ struct blkcg {
+ };
+
+ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
++static inline void blkg_init_queue(struct request_queue *q) { }
+ static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
+ static inline void blkcg_exit_disk(struct gendisk *disk) { }
+ static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+diff --git a/block/blk-core.c b/block/blk-core.c
+index de771093b5268..99d684085719d 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -431,6 +431,8 @@ struct request_queue *blk_alloc_queue(int node_id)
+ init_waitqueue_head(&q->mq_freeze_wq);
+ mutex_init(&q->mq_freeze_lock);
+
++ blkg_init_queue(q);
++
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+ * See blk_register_queue() for details.
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 4b06402269869..b35c7aedca03e 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
+ return 0;
+ }
+
+-static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
+-{
+- int ret;
+-
+- ret = ivpu_rpm_get_if_active(vdev);
+- if (ret < 0)
+- return ret;
+-
+- *clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
+-
+- if (ret)
+- ivpu_rpm_put(vdev);
+-
+- return 0;
+-}
+-
+ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+ {
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+@@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
+ args->value = vdev->platform;
+ break;
+ case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
+- ret = ivpu_get_core_clock_rate(vdev, &args->value);
++ args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
+ break;
+ case DRM_IVPU_PARAM_NUM_CONTEXTS:
+ args->value = ivpu_get_context_count(vdev);
+@@ -530,7 +514,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
+ vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
+ vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
+ atomic64_set(&vdev->unique_id_counter, 0);
+- xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
++ xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
+ lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
+ INIT_LIST_HEAD(&vdev->bo_list);
+diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
+index b2909168a0a69..094c659d2800b 100644
+--- a/drivers/accel/ivpu/ivpu_hw.h
++++ b/drivers/accel/ivpu/ivpu_hw.h
+@@ -21,6 +21,7 @@ struct ivpu_hw_ops {
+ u32 (*profiling_freq_get)(struct ivpu_device *vdev);
+ void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
+ u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
++ u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
+ u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
+ u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
+ u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
+@@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
+ return vdev->hw->ops->reg_pll_freq_get(vdev);
+ };
+
++static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
++{
++ return vdev->hw->ops->ratio_to_freq(vdev, ratio);
++}
++
+ static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
+ {
+ return vdev->hw->ops->reg_telemetry_offset_get(vdev);
+diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
+index 89af1006df558..32bb772e03cf9 100644
+--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
++++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
+@@ -805,12 +805,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena
+ /* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
+ }
+
+-static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
++static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+ {
+ u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
+ u32 cpu_clock;
+
+- if ((config & 0xff) == PLL_RATIO_4_3)
++ if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
+ cpu_clock = pll_clock * 2 / 4;
+ else
+ cpu_clock = pll_clock * 2 / 5;
+@@ -829,7 +829,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
+ if (!ivpu_is_silicon(vdev))
+ return PLL_SIMULATION_FREQ;
+
+- return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
++ return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
+ }
+
+ static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
+@@ -1052,6 +1052,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
+ .profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
+ .profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
+ .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
++ .ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
+ .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
+ .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
+ .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
+diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
+index a1523d0b1ef36..0be353ad872d0 100644
+--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
++++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
+@@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
+ return PLL_RATIO_TO_FREQ(pll_curr_ratio);
+ }
+
++static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
++{
++ return PLL_RATIO_TO_FREQ(ratio);
++}
++
+ static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
+ {
+ return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
+@@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
+ .profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
+ .profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
+ .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
++ .ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
+ .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
+ .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
+ .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
+diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
+index fa66c39b57eca..f4c5392483656 100644
+--- a/drivers/accel/ivpu/ivpu_ipc.c
++++ b/drivers/accel/ivpu/ivpu_ipc.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (C) 2020-2023 Intel Corporation
++ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+ #include <linux/genalloc.h>
+@@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
+ spin_lock_init(&ipc->cons_lock);
+ INIT_LIST_HEAD(&ipc->cons_list);
+ INIT_LIST_HEAD(&ipc->cb_msg_list);
+- drmm_mutex_init(&vdev->drm, &ipc->lock);
++ ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
++ if (ret) {
++ ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
++ goto err_free_rx;
++ }
+ ivpu_ipc_reset(vdev);
+ return 0;
+
+diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
+index 5f73854234ba9..a15d30d0943af 100644
+--- a/drivers/accel/ivpu/ivpu_pm.c
++++ b/drivers/accel/ivpu/ivpu_pm.c
+@@ -74,10 +74,10 @@ static int ivpu_resume(struct ivpu_device *vdev)
+ {
+ int ret;
+
+- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
++retry:
+ pci_restore_state(to_pci_dev(vdev->drm.dev));
++ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+
+-retry:
+ ret = ivpu_hw_power_up(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
+@@ -100,6 +100,7 @@ static int ivpu_resume(struct ivpu_device *vdev)
+ ivpu_mmu_disable(vdev);
+ err_power_down:
+ ivpu_hw_power_down(vdev);
++ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
+ if (!ivpu_fw_is_cold_boot(vdev)) {
+ ivpu_pm_prepare_cold_boot(vdev);
+diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
+index a26e7793ec4ef..75e9aac43228a 100644
+--- a/drivers/acpi/numa/hmat.c
++++ b/drivers/acpi/numa/hmat.c
+@@ -59,9 +59,8 @@ struct target_cache {
+ };
+
+ enum {
+- NODE_ACCESS_CLASS_0 = 0,
+- NODE_ACCESS_CLASS_1,
+- NODE_ACCESS_CLASS_GENPORT_SINK,
++ NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL = ACCESS_COORDINATE_MAX,
++ NODE_ACCESS_CLASS_GENPORT_SINK_CPU,
+ NODE_ACCESS_CLASS_MAX,
+ };
+
+@@ -127,7 +126,8 @@ static struct memory_target *acpi_find_genport_target(u32 uid)
+ /**
+ * acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port
+ * @uid: ACPI unique id
+- * @coord: The access coordinates written back out for the generic port
++ * @coord: The access coordinates written back out for the generic port.
++ * Expect 2 levels array.
+ *
+ * Return: 0 on success. Errno on failure.
+ *
+@@ -143,7 +143,10 @@ int acpi_get_genport_coordinates(u32 uid,
+ if (!target)
+ return -ENOENT;
+
+- *coord = target->coord[NODE_ACCESS_CLASS_GENPORT_SINK];
++ coord[ACCESS_COORDINATE_LOCAL] =
++ target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL];
++ coord[ACCESS_COORDINATE_CPU] =
++ target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_CPU];
+
+ return 0;
+ }
+@@ -374,11 +377,11 @@ static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_px
+
+ if (target && target->processor_pxm == init_pxm) {
+ hmat_update_target_access(target, type, value,
+- NODE_ACCESS_CLASS_0);
++ ACCESS_COORDINATE_LOCAL);
+ /* If the node has a CPU, update access 1 */
+ if (node_state(pxm_to_node(init_pxm), N_CPU))
+ hmat_update_target_access(target, type, value,
+- NODE_ACCESS_CLASS_1);
++ ACCESS_COORDINATE_CPU);
+ }
+ }
+
+@@ -697,7 +700,8 @@ static void hmat_update_target_attrs(struct memory_target *target,
+ int i;
+
+ /* Don't update for generic port if there's no device handle */
+- if (access == NODE_ACCESS_CLASS_GENPORT_SINK &&
++ if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
++ access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
+ !(*(u16 *)target->gen_port_device_handle))
+ return;
+
+@@ -709,7 +713,8 @@ static void hmat_update_target_attrs(struct memory_target *target,
+ */
+ if (target->processor_pxm != PXM_INVAL) {
+ cpu_nid = pxm_to_node(target->processor_pxm);
+- if (access == 0 || node_state(cpu_nid, N_CPU)) {
++ if (access == ACCESS_COORDINATE_LOCAL ||
++ node_state(cpu_nid, N_CPU)) {
+ set_bit(target->processor_pxm, p_nodes);
+ return;
+ }
+@@ -737,7 +742,9 @@ static void hmat_update_target_attrs(struct memory_target *target,
+ list_for_each_entry(initiator, &initiators, node) {
+ u32 value;
+
+- if (access == 1 && !initiator->has_cpu) {
++ if ((access == ACCESS_COORDINATE_CPU ||
++ access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
++ !initiator->has_cpu) {
+ clear_bit(initiator->processor_pxm, p_nodes);
+ continue;
+ }
+@@ -775,15 +782,19 @@ static void hmat_update_generic_target(struct memory_target *target)
+ static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
+
+ hmat_update_target_attrs(target, p_nodes,
+- NODE_ACCESS_CLASS_GENPORT_SINK);
++ NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL);
++ hmat_update_target_attrs(target, p_nodes,
++ NODE_ACCESS_CLASS_GENPORT_SINK_CPU);
+ }
+
+ static void hmat_register_target_initiators(struct memory_target *target)
+ {
+ static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
+
+- __hmat_register_target_initiators(target, p_nodes, 0);
+- __hmat_register_target_initiators(target, p_nodes, 1);
++ __hmat_register_target_initiators(target, p_nodes,
++ ACCESS_COORDINATE_LOCAL);
++ __hmat_register_target_initiators(target, p_nodes,
++ ACCESS_COORDINATE_CPU);
+ }
+
+ static void hmat_register_target_cache(struct memory_target *target)
+@@ -854,8 +865,8 @@ static void hmat_register_target(struct memory_target *target)
+ if (!target->registered) {
+ hmat_register_target_initiators(target);
+ hmat_register_target_cache(target);
+- hmat_register_target_perf(target, NODE_ACCESS_CLASS_0);
+- hmat_register_target_perf(target, NODE_ACCESS_CLASS_1);
++ hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
++ hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
+ target->registered = true;
+ }
+ mutex_unlock(&target_lock);
+@@ -927,7 +938,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
+ return NOTIFY_OK;
+
+ mutex_lock(&target_lock);
+- hmat_update_target_attrs(target, p_nodes, 1);
++ hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU);
+ mutex_unlock(&target_lock);
+
+ perf = &target->coord[1];
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 617f3e0e963d5..eb4ca85d16ffb 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1802,7 +1802,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
+ if (dep->honor_dep)
+ adev->flags.honor_deps = 1;
+
+- adev->dep_unmet++;
++ if (!dep->met)
++ adev->dep_unmet++;
+ }
+ }
+ }
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index be3412cdb22e7..c449d60d9bb96 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2539,7 +2539,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
+ bool cdl_enabled;
+ u64 val;
+
+- if (ata_id_major_version(dev->id) < 12)
++ if (ata_id_major_version(dev->id) < 11)
+ goto not_supported;
+
+ if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 2f4c588376410..e954976891a9f 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -4745,7 +4745,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ * bail out.
+ */
+ if (ap->pflags & ATA_PFLAG_SUSPENDED)
+- goto unlock;
++ goto unlock_ap;
+
+ if (!sdev)
+ continue;
+@@ -4758,7 +4758,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ if (do_resume) {
+ ret = scsi_resume_device(sdev);
+ if (ret == -EWOULDBLOCK)
+- goto unlock;
++ goto unlock_scan;
+ dev->flags &= ~ATA_DFLAG_RESUMING;
+ }
+ ret = scsi_rescan_device(sdev);
+@@ -4766,12 +4766,13 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (ret)
+- goto unlock;
++ goto unlock_ap;
+ }
+ }
+
+-unlock:
++unlock_ap:
+ spin_unlock_irqrestore(ap->lock, flags);
++unlock_scan:
+ mutex_unlock(&ap->scsi_scan_mutex);
+
+ /* Reschedule with a delay if scsi_rescan_device() returned an error */
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index 1c05640461dd1..a73b0c9a401ad 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -126,7 +126,7 @@ static void node_access_release(struct device *dev)
+ }
+
+ static struct node_access_nodes *node_init_node_access(struct node *node,
+- unsigned int access)
++ enum access_coordinate_class access)
+ {
+ struct node_access_nodes *access_node;
+ struct device *dev;
+@@ -191,7 +191,7 @@ static struct attribute *access_attrs[] = {
+ * @access: The access class the for the given attributes
+ */
+ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+- unsigned int access)
++ enum access_coordinate_class access)
+ {
+ struct node_access_nodes *c;
+ struct node *node;
+@@ -689,7 +689,7 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
+ */
+ int register_memory_node_under_compute_node(unsigned int mem_nid,
+ unsigned int cpu_nid,
+- unsigned int access)
++ enum access_coordinate_class access)
+ {
+ struct node *init_node, *targ_node;
+ struct node_access_nodes *initiator, *target;
+diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
+index 1a3e6aafbdcc3..af5cb818f84d6 100644
+--- a/drivers/cxl/acpi.c
++++ b/drivers/cxl/acpi.c
+@@ -530,13 +530,15 @@ static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
+ if (kstrtou32(acpi_device_uid(hb), 0, &uid))
+ return -EINVAL;
+
+- rc = acpi_get_genport_coordinates(uid, &dport->hb_coord);
++ rc = acpi_get_genport_coordinates(uid, dport->hb_coord);
+ if (rc < 0)
+ return rc;
+
+ /* Adjust back to picoseconds from nanoseconds */
+- dport->hb_coord.read_latency *= 1000;
+- dport->hb_coord.write_latency *= 1000;
++ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
++ dport->hb_coord[i].read_latency *= 1000;
++ dport->hb_coord[i].write_latency *= 1000;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
+index 0363ca434ef45..fbf167f9d59d4 100644
+--- a/drivers/cxl/core/cdat.c
++++ b/drivers/cxl/core/cdat.c
+@@ -162,15 +162,22 @@ static int cxl_cdat_endpoint_process(struct cxl_port *port,
+ static int cxl_port_perf_data_calculate(struct cxl_port *port,
+ struct xarray *dsmas_xa)
+ {
+- struct access_coordinate c;
++ struct access_coordinate ep_c;
++ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct dsmas_entry *dent;
+ int valid_entries = 0;
+ unsigned long index;
+ int rc;
+
+- rc = cxl_endpoint_get_perf_coordinates(port, &c);
++ rc = cxl_endpoint_get_perf_coordinates(port, &ep_c);
+ if (rc) {
+- dev_dbg(&port->dev, "Failed to retrieve perf coordinates.\n");
++ dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
++ return rc;
++ }
++
++ rc = cxl_hb_get_perf_coordinates(port, coord);
++ if (rc) {
++ dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
+ return rc;
+ }
+
+@@ -185,18 +192,19 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
+ xa_for_each(dsmas_xa, index, dent) {
+ int qos_class;
+
+- dent->coord.read_latency = dent->coord.read_latency +
+- c.read_latency;
+- dent->coord.write_latency = dent->coord.write_latency +
+- c.write_latency;
+- dent->coord.read_bandwidth = min_t(int, c.read_bandwidth,
+- dent->coord.read_bandwidth);
+- dent->coord.write_bandwidth = min_t(int, c.write_bandwidth,
+- dent->coord.write_bandwidth);
+-
++ cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c);
++ /*
++ * Keeping the host bridge coordinates separate from the dsmas
++ * coordinates in order to allow calculation of access class
++ * 0 and 1 for region later.
++ */
++ cxl_coordinates_combine(&coord[ACCESS_COORDINATE_LOCAL],
++ &coord[ACCESS_COORDINATE_LOCAL],
++ &dent->coord);
+ dent->entries = 1;
+- rc = cxl_root->ops->qos_class(cxl_root, &dent->coord, 1,
+- &qos_class);
++ rc = cxl_root->ops->qos_class(cxl_root,
++ &coord[ACCESS_COORDINATE_LOCAL],
++ 1, &qos_class);
+ if (rc != 1)
+ continue;
+
+@@ -484,4 +492,26 @@ void cxl_switch_parse_cdat(struct cxl_port *port)
+ }
+ EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
+
++/**
++ * cxl_coordinates_combine - Combine the two input coordinates
++ *
++ * @out: Output coordinate of c1 and c2 combined
++ * @c1: input coordinates
++ * @c2: input coordinates
++ */
++void cxl_coordinates_combine(struct access_coordinate *out,
++ struct access_coordinate *c1,
++ struct access_coordinate *c2)
++{
++ if (c1->write_bandwidth && c2->write_bandwidth)
++ out->write_bandwidth = min(c1->write_bandwidth,
++ c2->write_bandwidth);
++ out->write_latency = c1->write_latency + c2->write_latency;
++
++ if (c1->read_bandwidth && c2->read_bandwidth)
++ out->read_bandwidth = min(c1->read_bandwidth,
++ c2->read_bandwidth);
++ out->read_latency = c1->read_latency + c2->read_latency;
++}
++
+ MODULE_IMPORT_NS(CXL);
+diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
+index 9adda4795eb78..f0f54aeccc872 100644
+--- a/drivers/cxl/core/mbox.c
++++ b/drivers/cxl/core/mbox.c
+@@ -915,7 +915,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
+
+ payload->handles[i++] = gen->hdr.handle;
+ dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
+- le16_to_cpu(payload->handles[i]));
++ le16_to_cpu(payload->handles[i - 1]));
+
+ if (i == max_handles) {
+ payload->nr_recs = i;
+@@ -958,13 +958,14 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
+ .payload_in = &log_type,
+ .size_in = sizeof(log_type),
+ .payload_out = payload,
+- .size_out = mds->payload_size,
+ .min_out = struct_size(payload, records, 0),
+ };
+
+ do {
+ int rc, i;
+
++ mbox_cmd.size_out = mds->payload_size;
++
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ if (rc) {
+ dev_err_ratelimited(dev,
+diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
+index e59d9d37aa650..4ae441ef32174 100644
+--- a/drivers/cxl/core/port.c
++++ b/drivers/cxl/core/port.c
+@@ -2096,18 +2096,41 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
+ }
+ EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
+
+-static void combine_coordinates(struct access_coordinate *c1,
+- struct access_coordinate *c2)
++/**
++ * cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator
++ * and host bridge
++ *
++ * @port: endpoint cxl_port
++ * @coord: output access coordinates
++ *
++ * Return: errno on failure, 0 on success.
++ */
++int cxl_hb_get_perf_coordinates(struct cxl_port *port,
++ struct access_coordinate *coord)
+ {
+- if (c2->write_bandwidth)
+- c1->write_bandwidth = min(c1->write_bandwidth,
+- c2->write_bandwidth);
+- c1->write_latency += c2->write_latency;
++ struct cxl_port *iter = port;
++ struct cxl_dport *dport;
++
++ if (!is_cxl_endpoint(port))
++ return -EINVAL;
++
++ dport = iter->parent_dport;
++ while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
++ iter = to_cxl_port(iter->dev.parent);
++ dport = iter->parent_dport;
++ }
++
++ coord[ACCESS_COORDINATE_LOCAL] =
++ dport->hb_coord[ACCESS_COORDINATE_LOCAL];
++ coord[ACCESS_COORDINATE_CPU] =
++ dport->hb_coord[ACCESS_COORDINATE_CPU];
+
+- if (c2->read_bandwidth)
+- c1->read_bandwidth = min(c1->read_bandwidth,
+- c2->read_bandwidth);
+- c1->read_latency += c2->read_latency;
++ return 0;
++}
++
++static bool parent_port_is_cxl_root(struct cxl_port *port)
++{
++ return is_cxl_root(to_cxl_port(port->dev.parent));
+ }
+
+ /**
+@@ -2129,30 +2152,31 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
+ struct cxl_dport *dport;
+ struct pci_dev *pdev;
+ unsigned int bw;
++ bool is_cxl_root;
+
+ if (!is_cxl_endpoint(port))
+ return -EINVAL;
+
+- dport = iter->parent_dport;
+-
+ /*
+- * Exit the loop when the parent port of the current port is cxl root.
+- * The iterative loop starts at the endpoint and gathers the
+- * latency of the CXL link from the current iter to the next downstream
+- * port each iteration. If the parent is cxl root then there is
+- * nothing to gather.
++ * Exit the loop when the parent port of the current iter port is cxl
++ * root. The iterative loop starts at the endpoint and gathers the
++ * latency of the CXL link from the current device/port to the connected
++ * downstream port each iteration.
+ */
+- while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
+- combine_coordinates(&c, &dport->sw_coord);
+- c.write_latency += dport->link_latency;
+- c.read_latency += dport->link_latency;
+-
+- iter = to_cxl_port(iter->dev.parent);
++ do {
+ dport = iter->parent_dport;
+- }
++ iter = to_cxl_port(iter->dev.parent);
++ is_cxl_root = parent_port_is_cxl_root(iter);
+
+- /* Augment with the generic port (host bridge) perf data */
+- combine_coordinates(&c, &dport->hb_coord);
++ /*
++ * There's no valid access_coordinate for a root port since RPs do not
++ * have CDAT and therefore needs to be skipped.
++ */
++ if (!is_cxl_root)
++ cxl_coordinates_combine(&c, &c, &dport->sw_coord);
++ c.write_latency += dport->link_latency;
++ c.read_latency += dport->link_latency;
++ } while (!is_cxl_root);
+
+ /* Get the calculated PCI paths bandwidth */
+ pdev = to_pci_dev(port->uport_dev->parent);
+diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
+index 372786f809555..3c42f984eeafa 100644
+--- a/drivers/cxl/core/regs.c
++++ b/drivers/cxl/core/regs.c
+@@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
+ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
+ struct cxl_register_map *map)
+ {
++ u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
+ int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
+ u64 offset = ((u64)reg_hi << 32) |
+ (reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
+@@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
+ if (offset > pci_resource_len(pdev, bar)) {
+ dev_warn(&pdev->dev,
+ "BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
+- &pdev->resource[bar], &offset, map->reg_type);
++ &pdev->resource[bar], &offset, reg_type);
+ return false;
+ }
+
+- map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
++ map->reg_type = reg_type;
+ map->resource = pci_resource_start(pdev, bar) + offset;
+ map->max_size = pci_resource_len(pdev, bar) - offset;
+ return true;
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index 003feebab79b5..de477eb7f5d54 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -671,7 +671,7 @@ struct cxl_dport {
+ struct cxl_port *port;
+ struct cxl_regs regs;
+ struct access_coordinate sw_coord;
+- struct access_coordinate hb_coord;
++ struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
+ long link_latency;
+ };
+
+@@ -879,9 +879,15 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
+
+ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
+ struct access_coordinate *coord);
++int cxl_hb_get_perf_coordinates(struct cxl_port *port,
++ struct access_coordinate *coord);
+
+ void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
+
++void cxl_coordinates_combine(struct access_coordinate *out,
++ struct access_coordinate *c1,
++ struct access_coordinate *c2);
++
+ /*
+ * Unit test builds overrides this to __weak, find the 'strong' version
+ * of these symbols in tools/testing/cxl/.
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index f2556a8e94015..9bc2e10381afd 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -790,7 +790,7 @@ static void ffa_notification_info_get(void)
+
+ part_id = packed_id_list[ids_processed++];
+
+- if (!ids_count[list]) { /* Global Notification */
++ if (ids_count[list] == 1) { /* Global Notification */
+ __do_sched_recv_cb(part_id, 0, false);
+ continue;
+ }
+diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
+index 3505735185033..130d13e9cd6be 100644
+--- a/drivers/firmware/arm_scmi/raw_mode.c
++++ b/drivers/firmware/arm_scmi/raw_mode.c
+@@ -921,7 +921,7 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
+ rd->raw = raw;
+ filp->private_data = rd;
+
+- return 0;
++ return nonseekable_open(inode, filp);
+ }
+
+ static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
+@@ -950,6 +950,7 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .write = scmi_dbg_raw_mode_reset_write,
++ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ };
+
+@@ -959,6 +960,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
++ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ };
+
+@@ -975,6 +977,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
++ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ };
+
+@@ -998,6 +1001,7 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_notif_read,
+ .poll = scmi_test_dbg_raw_mode_notif_poll,
++ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ };
+
+@@ -1021,6 +1025,7 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_errors_read,
+ .poll = scmi_test_dbg_raw_mode_errors_poll,
++ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+index b9a15d51eb5c3..ad44012cc01e2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+@@ -390,6 +390,12 @@ static int vpe_hw_init(void *handle)
+ struct amdgpu_vpe *vpe = &adev->vpe;
+ int ret;
+
++ /* Power on VPE */
++ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
++ AMD_PG_STATE_UNGATE);
++ if (ret)
++ return ret;
++
+ ret = vpe_load_microcode(vpe);
+ if (ret)
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 4f3bfdc75b37d..0afe86bcc932b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -1630,7 +1630,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
+ active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ }
+
+- active_rb_bitmap |= global_active_rb_bitmap;
++ active_rb_bitmap &= global_active_rb_bitmap;
+ adev->gfx.config.backend_enable_mask = active_rb_bitmap;
+ adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 4d7188912edfe..fd4505fa4f670 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -450,10 +450,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
+ {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+- return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+- return false;
+ default:
+ return true;
+ }
+@@ -714,7 +712,10 @@ static int soc21_common_early_init(void *handle)
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_JPEG |
+ AMD_PG_SUPPORT_GFX_PG;
+- adev->external_rev_id = adev->rev_id + 0x1;
++ if (adev->rev_id == 0)
++ adev->external_rev_id = 0x1;
++ else
++ adev->external_rev_id = adev->rev_id + 0x10;
+ break;
+ default:
+ /* FIXME: not supported yet */
+@@ -832,10 +833,35 @@ static int soc21_common_suspend(void *handle)
+ return soc21_common_hw_fini(adev);
+ }
+
++static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
++{
++ u32 sol_reg1, sol_reg2;
++
++ /* Will reset for the following suspend abort cases.
++ * 1) Only reset dGPU side.
++ * 2) S3 suspend got aborted and TOS is active.
++ */
++ if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
++ !adev->suspend_complete) {
++ sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
++ msleep(100);
++ sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
++
++ return (sol_reg1 != sol_reg2);
++ }
++
++ return false;
++}
++
+ static int soc21_common_resume(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ if (soc21_need_reset_on_resume(adev)) {
++ dev_info(adev->dev, "S3 suspend aborted, resetting...");
++ soc21_asic_reset(adev);
++ }
++
+ return soc21_common_hw_init(adev);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
+index 8e7b763cfdb7e..fd23cd348552b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
+@@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
+
+ WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
+
++ ring->wptr = 0;
++
+ data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
+ data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
+ WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index c0e71543389a9..c0ae1a97498b5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1997,6 +1997,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
+ while (halt_if_hws_hang)
+ schedule();
++ kfd_hws_hang(dqm);
+ return -ETIME;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 1eb0f82e9dfae..718e533ab46dd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6257,19 +6257,16 @@ create_stream_for_sink(struct drm_connector *connector,
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
+
+- if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
++ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
++ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
++ stream->signal == SIGNAL_TYPE_EDP) {
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+- stream->use_vsc_sdp_for_colorimetry = false;
+- if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+- stream->use_vsc_sdp_for_colorimetry =
+- aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
+- } else {
+- if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
+- stream->use_vsc_sdp_for_colorimetry = true;
+- }
++ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
++ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
++
+ if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
+ tf = TRANSFER_FUNC_GAMMA_22;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+index 16e72d623630c..08c494a7a21ba 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+@@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
+
+ static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
+ {
+- struct drm_device *dev = connector->dev;
+-
+- return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+- dev->mode_config.max_height);
++ /* Maximum resolution supported by DWB */
++ return drm_add_modes_noedid(connector, 3840, 2160);
+ }
+
+ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+index 12f3e8aa46d8d..6ad4f4efec5dd 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+@@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
+ return display_count;
+ }
+
+-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
++static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
++ bool safe_to_lower, bool disable)
+ {
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++ struct pipe_ctx *pipe = safe_to_lower
++ ? &context->res_ctx.pipe_ctx[i]
++ : &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->top_pipe || pipe->prev_odm_pipe)
+ continue;
+- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+- dc_is_virtual_signal(pipe->stream->signal))) {
++ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
++ !pipe->stream->link_enc)) {
+ if (disable) {
+- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
++ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
++ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
++
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+@@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+- dcn316_disable_otg_wa(clk_mgr_base, context, true);
++ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+- dcn316_disable_otg_wa(clk_mgr_base, context, false);
++ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+
+ update_dispclk = true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+index 5cc7f8da209c5..61986e5cb4919 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+@@ -436,6 +436,15 @@ bool dc_state_add_plane(
+ goto out;
+ }
+
++ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
++ /* ODM combine could prevent us from supporting more planes
++ * we will reset ODM slice count back to 1 when all planes have
++ * been removed to maximize the amount of planes supported when
++ * new planes are added.
++ */
++ resource_update_pipes_for_stream_with_slice_count(
++ state, dc->current_state, dc->res_pool, stream, 1);
++
+ otg_master_pipe = resource_get_otg_master_for_stream(
+ &state->res_ctx, stream);
+ if (otg_master_pipe)
+diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+index f07a4c7e48bc2..52eab8fccb7f1 100644
+--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+@@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+-
+- // Setup manual flow control for EOF via TRIG_A
+- optc->funcs->setup_manual_trigger(optc);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+index bb98156b2fa1d..949131bd1ecb2 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+@@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+- if (!en && !adev->in_s0ix)
++ if (!en && !adev->in_s0ix) {
++ /* Adds a GFX reset as workaround just before sending the
++ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
++ * an invalid state.
++ */
++ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
++ SMU_RESET_MODE_2, NULL);
++ if (ret)
++ return ret;
++
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
++ }
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
+index ebb6d8ebd44eb..1e9259416980e 100644
+--- a/drivers/gpu/drm/ast/ast_dp.c
++++ b/drivers/gpu/drm/ast/ast_dp.c
+@@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
+ {
+ struct ast_device *ast = to_ast_device(dev);
+ u8 video_on_off = on;
++ u32 i = 0;
+
+ // Video On/Off
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
+@@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
+ ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
+ // wait 1 ms
+ mdelay(1);
++ if (++i > 200)
++ break;
+ }
+ }
+ }
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index 871e4e2129d6d..0683a129b3628 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ unsigned int total_modes_count = 0;
+ struct drm_client_offset *offsets;
+ unsigned int connector_count = 0;
++ /* points to modes protected by mode_config.mutex */
+ struct drm_display_mode **modes;
+ struct drm_crtc **crtcs;
+ int i, ret = 0;
+@@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ drm_client_pick_crtcs(client, connectors, connector_count,
+ crtcs, modes, 0, width, height);
+ }
+- mutex_unlock(&dev->mode_config.mutex);
+
+ drm_client_modeset_release(client);
+
+@@ -875,6 +875,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ modeset->y = offset->y;
+ }
+ }
++ mutex_unlock(&dev->mode_config.mutex);
+
+ mutex_unlock(&client->modeset_mutex);
+ out:
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
+index c985ebb6831a3..6e36a15284537 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
+@@ -2521,7 +2521,7 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
+ if (IS_DG2(i915))
+ intel_cdclk_pcode_pre_notify(state);
+
+- if (pipe == INVALID_PIPE ||
++ if (new_cdclk_state->disable_pipes ||
+ old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+@@ -2553,7 +2553,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
+ if (IS_DG2(i915))
+ intel_cdclk_pcode_post_notify(state);
+
+- if (pipe != INVALID_PIPE &&
++ if (!new_cdclk_state->disable_pipes &&
+ old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+@@ -3036,6 +3036,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
+ return NULL;
+
+ cdclk_state->pipe = INVALID_PIPE;
++ cdclk_state->disable_pipes = false;
+
+ return &cdclk_state->base;
+ }
+@@ -3214,6 +3215,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+ if (ret)
+ return ret;
+
++ new_cdclk_state->disable_pipes = true;
++
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset required for cdclk change\n");
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
+index 48fd7d39e0cd9..71bc032bfef16 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
+@@ -51,6 +51,9 @@ struct intel_cdclk_state {
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
++
++ /* update cdclk with pipes disabled */
++ bool disable_pipes;
+ };
+
+ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 12a29363e5dfe..31aa5d54fdf07 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -4229,7 +4229,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
+ static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
+ const struct intel_crtc_state *crtc_state2)
+ {
++ /*
++ * FIXME the modeset sequence is currently wrong and
++ * can't deal with bigjoiner + port sync at the same time.
++ */
+ return crtc_state1->hw.active && crtc_state2->hw.active &&
++ !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
+ crtc_state1->output_types == crtc_state2->output_types &&
+ crtc_state1->output_format == crtc_state2->output_format &&
+ crtc_state1->lane_count == crtc_state2->lane_count &&
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 360e90601ff93..4e8545126e246 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -2756,7 +2756,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
+ intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
+ int pixel_clock;
+
+- if (has_seamless_m_n(connector))
++ /*
++ * FIXME all joined pipes share the same transcoder.
++ * Need to account for that when updating M/N live.
++ */
++ if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
+ pipe_config->update_m_n = true;
+
+ if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 4faaf4b3fc53b..925776ba1392e 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -1368,6 +1368,17 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
+ return;
+ }
+
++ /*
++ * FIXME figure out what is wrong with PSR+bigjoiner and
++ * fix it. Presumably something related to the fact that
++ * PSR is a transcoder level feature.
++ */
++ if (crtc_state->bigjoiner_pipes) {
++ drm_dbg_kms(&dev_priv->drm,
++ "PSR disabled due to bigjoiner\n");
++ return;
++ }
++
+ if (CAN_PANEL_REPLAY(intel_dp))
+ crtc_state->has_panel_replay = true;
+ else
+diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
+index eb5bd07439020..f542ee1db1d97 100644
+--- a/drivers/gpu/drm/i915/display/intel_vrr.c
++++ b/drivers/gpu/drm/i915/display/intel_vrr.c
+@@ -117,6 +117,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmin, vmax;
+
++ /*
++ * FIXME all joined pipes share the same transcoder.
++ * Need to account for that during VRR toggle/push/etc.
++ */
++ if (crtc_state->bigjoiner_pipes)
++ return;
++
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return;
+
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index fd60e49b8ec4d..792a4c60a20c2 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1295,6 +1295,10 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
+ if (adreno_is_a618(gpu))
+ gpu->ubwc_config.highest_bank_bit = 14;
+
++ if (adreno_is_a619(gpu))
++ /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
++ gpu->ubwc_config.highest_bank_bit = 13;
++
+ if (adreno_is_a619_holi(gpu))
+ gpu->ubwc_config.highest_bank_bit = 13;
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+index ef871239adb2a..68fae048a9a83 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+@@ -459,15 +459,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0600, entry,
+ (u32 *)&perf->enable_bw_release);
+- debugfs_create_u32("threshold_low", 0600, entry,
++ debugfs_create_u32("threshold_low", 0400, entry,
+ (u32 *)&perf->perf_cfg->max_bw_low);
+- debugfs_create_u32("threshold_high", 0600, entry,
++ debugfs_create_u32("threshold_high", 0400, entry,
+ (u32 *)&perf->perf_cfg->max_bw_high);
+- debugfs_create_u32("min_core_ib", 0600, entry,
++ debugfs_create_u32("min_core_ib", 0400, entry,
+ (u32 *)&perf->perf_cfg->min_core_ib);
+- debugfs_create_u32("min_llcc_ib", 0600, entry,
++ debugfs_create_u32("min_llcc_ib", 0400, entry,
+ (u32 *)&perf->perf_cfg->min_llcc_ib);
+- debugfs_create_u32("min_dram_ib", 0600, entry,
++ debugfs_create_u32("min_dram_ib", 0400, entry,
+ (u32 *)&perf->perf_cfg->min_dram_ib);
+ debugfs_create_file("perf_mode", 0600, entry,
+ (u32 *)perf, &dpu_core_perf_mode_fops);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+index 946dd0135dffc..6a0a74832fb64 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+@@ -525,14 +525,14 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
+ int ret;
+
+ if (!irq_cb) {
+- DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
+- DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
++ DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ return -EINVAL;
+ }
+
+ if (!dpu_core_irq_is_valid(irq_idx)) {
+- DPU_ERROR("invalid IRQ=[%d, %d]\n",
+- DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
++ DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 4c72124ffb5d4..78464c395c3d9 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -598,6 +598,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+ ret = dp_display_usbpd_configure_cb(&pdev->dev);
+ if (ret) { /* link train failed */
+ dp->hpd_state = ST_DISCONNECTED;
++ pm_runtime_put_sync(&pdev->dev);
+ } else {
+ dp->hpd_state = ST_MAINLINK_READY;
+ }
+@@ -655,6 +656,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ dp_display_host_phy_exit(dp);
+ dp->hpd_state = ST_DISCONNECTED;
+ dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
++ pm_runtime_put_sync(&pdev->dev);
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
+index e3f61c39df69b..80166f702a0db 100644
+--- a/drivers/gpu/drm/msm/msm_fb.c
++++ b/drivers/gpu/drm/msm/msm_fb.c
+@@ -89,7 +89,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+
+ for (i = 0; i < n; i++) {
+ ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
+- drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
++ drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
+ fb->base.id, i, msm_fb->iova[i], ret);
+ if (ret)
+ return ret;
+@@ -176,7 +176,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct msm_format *format;
+ int ret, i, n;
+
+- drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
++ drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)\n",
+ mode_cmd, mode_cmd->width, mode_cmd->height,
+ (char *)&mode_cmd->pixel_format);
+
+@@ -232,7 +232,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+
+ refcount_set(&msm_fb->dirtyfb, 1);
+
+- drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
++ drm_dbg_state(dev, "create: FB ID: %d (%p)\n", fb->base.id, fb);
+
+ return fb;
+
+diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
+index 84c21ec2ceeae..af6a6fcb11736 100644
+--- a/drivers/gpu/drm/msm/msm_kms.c
++++ b/drivers/gpu/drm/msm/msm_kms.c
+@@ -149,7 +149,7 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc)
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return -ENXIO;
+- drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
++ drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
+ return vblank_ctrl_queue_work(priv, crtc, true);
+ }
+
+@@ -160,7 +160,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return;
+- drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
++ drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
+ vblank_ctrl_queue_work(priv, crtc, false);
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+index 4bf486b571013..cb05f7f48a98b 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
+ return ERR_PTR(-EINVAL);
+ }
+
++static void of_fini(void *p)
++{
++ kfree(p);
++}
++
+ const struct nvbios_source
+ nvbios_of = {
+ .name = "OpenFirmware",
+ .init = of_init,
+- .fini = (void(*)(void *))kfree,
++ .fini = of_fini,
+ .read = of_read,
+ .size = of_size,
+ .rw = false,
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index f38385fe76bbb..b91019cd5acb1 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -502,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ mapping_set_unevictable(mapping);
+
+ for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
++ /* Can happen if the last fault only partially filled this
++ * section of the pages array before failing. In that case
++ * we skip already filled pages.
++ */
++ if (pages[i])
++ continue;
++
+ pages[i] = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(pages[i])) {
+ ret = PTR_ERR(pages[i]);
+ pages[i] = NULL;
+- goto err_pages;
++ goto err_unlock;
+ }
+ }
+
+@@ -514,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
+ NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
+ if (ret)
+- goto err_pages;
++ goto err_unlock;
+
+ ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ if (ret)
+@@ -537,8 +544,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+
+ err_map:
+ sg_free_table(sgt);
+-err_pages:
+- drm_gem_shmem_put_pages(&bo->base);
+ err_unlock:
+ dma_resv_unlock(obj->resv);
+ err_bo:
+diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
+index 368d26da0d6a2..9febc8b73f09e 100644
+--- a/drivers/gpu/drm/qxl/qxl_release.c
++++ b/drivers/gpu/drm/qxl/qxl_release.c
+@@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
+ signed long timeout)
+ {
+ struct qxl_device *qdev;
++ struct qxl_release *release;
++ int count = 0, sc = 0;
++ bool have_drawable_releases;
+ unsigned long cur, end = jiffies + timeout;
+
+ qdev = container_of(fence->lock, struct qxl_device, release_lock);
++ release = container_of(fence, struct qxl_release, base);
++ have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
+
+- if (!wait_event_timeout(qdev->release_event,
+- (dma_fence_is_signaled(fence) ||
+- (qxl_io_notify_oom(qdev), 0)),
+- timeout))
+- return 0;
++retry:
++ sc++;
++
++ if (dma_fence_is_signaled(fence))
++ goto signaled;
++
++ qxl_io_notify_oom(qdev);
++
++ for (count = 0; count < 11; count++) {
++ if (!qxl_queue_garbage_collect(qdev, true))
++ break;
++
++ if (dma_fence_is_signaled(fence))
++ goto signaled;
++ }
++
++ if (dma_fence_is_signaled(fence))
++ goto signaled;
++
++ if (have_drawable_releases || sc < 4) {
++ if (sc > 2)
++ /* back off */
++ usleep_range(500, 1000);
++
++ if (time_after(jiffies, end))
++ return 0;
++
++ if (have_drawable_releases && sc > 300) {
++ DMA_FENCE_WARN(fence,
++ "failed to wait on release %llu after spincount %d\n",
++ fence->context & ~0xf0000000, sc);
++ goto signaled;
++ }
++ goto retry;
++ }
++ /*
++ * yeah, original sync_obj_wait gave up after 3 spins when
++ * have_drawable_releases is not set.
++ */
+
++signaled:
+ cur = jiffies;
+ if (time_after(cur, end))
+ return 0;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index c7d90f96d16a6..0a304706e0132 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -666,11 +666,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
+ [vmw_dma_map_populate] = "Caching DMA mappings.",
+ [vmw_dma_map_bind] = "Giving up DMA mappings early."};
+
+- /* TTM currently doesn't fully support SEV encryption. */
+- if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
+- return -EINVAL;
+-
+- if (vmw_force_coherent)
++ /*
++ * When running with SEV we always want dma mappings, because
++ * otherwise ttm tt pool pages will bounce through swiotlb running
++ * out of available space.
++ */
++ if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
+ dev_priv->map_mode = vmw_dma_alloc_coherent;
+ else if (vmw_restrict_iommu)
+ dev_priv->map_mode = vmw_dma_map_bind;
+diff --git a/drivers/gpu/drm/xe/xe_display.c b/drivers/gpu/drm/xe/xe_display.c
+index e4db069f0db3f..6ec375c1c4b6c 100644
+--- a/drivers/gpu/drm/xe/xe_display.c
++++ b/drivers/gpu/drm/xe/xe_display.c
+@@ -108,11 +108,6 @@ int xe_display_create(struct xe_device *xe)
+ xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
+
+ drmm_mutex_init(&xe->drm, &xe->sb_lock);
+- drmm_mutex_init(&xe->drm, &xe->display.backlight.lock);
+- drmm_mutex_init(&xe->drm, &xe->display.audio.mutex);
+- drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex);
+- drmm_mutex_init(&xe->drm, &xe->display.pps.mutex);
+- drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex);
+ xe->enabled_irq_mask = ~0;
+
+ err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
+diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
+index 174ed2185481e..a6f43446c779a 100644
+--- a/drivers/gpu/drm/xe/xe_hwmon.c
++++ b/drivers/gpu/drm/xe/xe_hwmon.c
+@@ -288,7 +288,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
+ * As y can be < 2, we compute tau4 = (4 | x) << y
+ * and then add 2 when doing the final right shift to account for units
+ */
+- tau4 = ((1 << x_w) | x) << y;
++ tau4 = (u64)((1 << x_w) | x) << y;
+
+ /* val in hwmon interface units (millisec) */
+ out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
+@@ -328,7 +328,7 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
+ r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
+ x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
+ y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
+- tau4 = ((1 << x_w) | x) << y;
++ tau4 = (u64)((1 << x_w) | x) << y;
+ max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
+
+ if (val > max_win)
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 5dba58f322f03..d7e10f1311aaf 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -4381,9 +4381,11 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
+ }
+
+ dev_iommu_priv_set(dev, info);
+- ret = device_rbtree_insert(iommu, info);
+- if (ret)
+- goto free;
++ if (pdev && pci_ats_supported(pdev)) {
++ ret = device_rbtree_insert(iommu, info);
++ if (ret)
++ goto free;
++ }
+
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
+ ret = intel_pasid_alloc_table(dev);
+@@ -4410,7 +4412,8 @@ static void intel_iommu_release_device(struct device *dev)
+ struct intel_iommu *iommu = info->iommu;
+
+ mutex_lock(&iommu->iopf_lock);
+- device_rbtree_remove(info);
++ if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
++ device_rbtree_remove(info);
+ mutex_unlock(&iommu->iopf_lock);
+
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
+diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
+index cf43e798eca49..44083d01852db 100644
+--- a/drivers/iommu/intel/perfmon.c
++++ b/drivers/iommu/intel/perfmon.c
+@@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
+ iommu_pmu_set_filter(domain, event->attr.config1,
+ IOMMU_PMU_FILTER_DOMAIN, idx,
+ event->attr.config1);
+- iommu_pmu_set_filter(pasid, event->attr.config1,
++ iommu_pmu_set_filter(pasid, event->attr.config2,
+ IOMMU_PMU_FILTER_PASID, idx,
+ event->attr.config1);
+ iommu_pmu_set_filter(ats, event->attr.config2,
+diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
+index ec47ec81f0ecd..4d269df0082fb 100644
+--- a/drivers/iommu/intel/svm.c
++++ b/drivers/iommu/intel/svm.c
+@@ -67,7 +67,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
+ struct page *pages;
+ int irq, ret;
+
+- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
++ pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+ if (!pages) {
+ pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
+ iommu->name);
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 82c9bb404ccca..4f3c35f1320d4 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1474,7 +1474,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ for (j = 0; j < i; j++)
+ if (r1_bio->bios[j])
+ rdev_dec_pending(conf->mirrors[j].rdev, mddev);
+- free_r1bio(r1_bio);
++ mempool_free(r1_bio, &conf->r1bio_pool);
+ allow_barrier(conf, bio->bi_iter.bi_sector);
+
+ if (bio->bi_opf & REQ_NOWAIT) {
+diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
+index 5741adf09a2ef..559a172ebc6cb 100644
+--- a/drivers/media/cec/core/cec-adap.c
++++ b/drivers/media/cec/core/cec-adap.c
+@@ -1151,20 +1151,6 @@ void cec_received_msg_ts(struct cec_adapter *adap,
+ if (valid_la && min_len) {
+ /* These messages have special length requirements */
+ switch (cmd) {
+- case CEC_MSG_TIMER_STATUS:
+- if (msg->msg[2] & 0x10) {
+- switch (msg->msg[2] & 0xf) {
+- case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
+- case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
+- if (msg->len < 5)
+- valid_la = false;
+- break;
+- }
+- } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
+- if (msg->len < 5)
+- valid_la = false;
+- }
+- break;
+ case CEC_MSG_RECORD_ON:
+ switch (msg->msg[2]) {
+ case CEC_OP_RECORD_SRC_OWN:
+diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
+index 9fb8995b43a1c..13fa8588e38c1 100644
+--- a/drivers/mmc/host/omap.c
++++ b/drivers/mmc/host/omap.c
+@@ -1119,10 +1119,25 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
+
+ host = slot->host;
+
+- if (slot->vsd)
+- gpiod_set_value(slot->vsd, power_on);
+- if (slot->vio)
+- gpiod_set_value(slot->vio, power_on);
++ if (power_on) {
++ if (slot->vsd) {
++ gpiod_set_value(slot->vsd, power_on);
++ msleep(1);
++ }
++ if (slot->vio) {
++ gpiod_set_value(slot->vio, power_on);
++ msleep(1);
++ }
++ } else {
++ if (slot->vio) {
++ gpiod_set_value(slot->vio, power_on);
++ msleep(50);
++ }
++ if (slot->vsd) {
++ gpiod_set_value(slot->vsd, power_on);
++ msleep(50);
++ }
++ }
+
+ if (slot->pdata->set_power != NULL)
+ slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
+@@ -1259,18 +1274,18 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+ slot->pdata = &host->pdata->slots[id];
+
+ /* Check for some optional GPIO controls */
+- slot->vsd = gpiod_get_index_optional(host->dev, "vsd",
+- id, GPIOD_OUT_LOW);
++ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
++ id, GPIOD_OUT_LOW);
+ if (IS_ERR(slot->vsd))
+ return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
+ "error looking up VSD GPIO\n");
+- slot->vio = gpiod_get_index_optional(host->dev, "vio",
+- id, GPIOD_OUT_LOW);
++ slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
++ id, GPIOD_OUT_LOW);
+ if (IS_ERR(slot->vio))
+ return dev_err_probe(host->dev, PTR_ERR(slot->vio),
+ "error looking up VIO GPIO\n");
+- slot->cover = gpiod_get_index_optional(host->dev, "cover",
+- id, GPIOD_IN);
++ slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
++ id, GPIOD_IN);
+ if (IS_ERR(slot->cover))
+ return dev_err_probe(host->dev, PTR_ERR(slot->cover),
+ "error looking up cover switch GPIO\n");
+@@ -1384,13 +1399,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
+ if (IS_ERR(host->virt_base))
+ return PTR_ERR(host->virt_base);
+
+- host->slot_switch = gpiod_get_optional(host->dev, "switch",
+- GPIOD_OUT_LOW);
+- if (IS_ERR(host->slot_switch))
+- return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
+- "error looking up slot switch GPIO\n");
+-
+-
+ INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
+ INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
+
+@@ -1409,6 +1417,12 @@ static int mmc_omap_probe(struct platform_device *pdev)
+ host->dev = &pdev->dev;
+ platform_set_drvdata(pdev, host);
+
++ host->slot_switch = devm_gpiod_get_optional(host->dev, "switch",
++ GPIOD_OUT_LOW);
++ if (IS_ERR(host->slot_switch))
++ return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
++ "error looking up slot switch GPIO\n");
++
+ host->id = pdev->id;
+ host->irq = irq;
+ host->phys_base = res->start;
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 40ae44c9945b1..22b97505fa536 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -998,20 +998,173 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
+ mutex_unlock(&priv->reg_mutex);
+ }
+
+-/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
+- * 802.1Q™-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
+- * must only be propagated to C-VLAN and MAC Bridge components. That means
+- * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
+- * these frames are supposed to be processed by the CPU (software). So we make
+- * the switch only forward them to the CPU port. And if received from a CPU
+- * port, forward to a single port. The software is responsible of making the
+- * switch conform to the latter by setting a single port as destination port on
+- * the special tag.
++/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
++ * of the Open Systems Interconnection basic reference model (OSI/RM) are
++ * described; the medium access control (MAC) and logical link control (LLC)
++ * sublayers. The MAC sublayer is the one facing the physical layer.
+ *
+- * This switch intellectual property cannot conform to this part of the standard
+- * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
+- * DAs, it also includes :22-FF which the scope of propagation is not supposed
+- * to be restricted for these MAC DAs.
++ * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
++ * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
++ * of the Bridge, at least two Ports, and higher layer entities with at least a
++ * Spanning Tree Protocol Entity included.
++ *
++ * Each Bridge Port also functions as an end station and shall provide the MAC
++ * Service to an LLC Entity. Each instance of the MAC Service is provided to a
++ * distinct LLC Entity that supports protocol identification, multiplexing, and
++ * demultiplexing, for protocol data unit (PDU) transmission and reception by
++ * one or more higher layer entities.
++ *
++ * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
++ * Entity associated with each Bridge Port is modeled as being directly
++ * connected to the attached Local Area Network (LAN).
++ *
++ * On the switch with CPU port architecture, CPU port functions as Management
++ * Port, and the Management Port functionality is provided by software which
++ * functions as an end station. Software is connected to an IEEE 802 LAN that is
++ * wholly contained within the system that incorporates the Bridge. Software
++ * provides access to the LLC Entity associated with each Bridge Port by the
++ * value of the source port field on the special tag on the frame received by
++ * software.
++ *
++ * We call frames that carry control information to determine the active
++ * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
++ * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
++ * Protocol Data Units (MVRPDUs), and frames from other link constrained
++ * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
++ * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
++ * forwarded by a Bridge. Permanently configured entries in the filtering
++ * database (FDB) ensure that such frames are discarded by the Forwarding
++ * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
++ *
++ * Each of the reserved MAC addresses specified in Table 8-1
++ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
++ * permanently configured in the FDB in C-VLAN components and ERs.
++ *
++ * Each of the reserved MAC addresses specified in Table 8-2
++ * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
++ * configured in the FDB in S-VLAN components.
++ *
++ * Each of the reserved MAC addresses specified in Table 8-3
++ * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
++ * TPMR components.
++ *
++ * The FDB entries for reserved MAC addresses shall specify filtering for all
++ * Bridge Ports and all VIDs. Management shall not provide the capability to
++ * modify or remove entries for reserved MAC addresses.
++ *
++ * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
++ * propagation of PDUs within a Bridged Network, as follows:
++ *
++ * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
++ * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
++ * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
++ * PDUs transmitted using this destination address, or any other addresses
++ * that appear in Table 8-1, Table 8-2, and Table 8-3
++ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
++ * therefore travel no further than those stations that can be reached via a
++ * single individual LAN from the originating station.
++ *
++ * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
++ * address that no conformant S-VLAN component, C-VLAN component, or MAC
++ * Bridge can forward; however, this address is relayed by a TPMR component.
++ * PDUs using this destination address, or any of the other addresses that
++ * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
++ * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
++ * any TPMRs but will propagate no further than the nearest S-VLAN component,
++ * C-VLAN component, or MAC Bridge.
++ *
++ * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
++ * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
++ * relayed by TPMR components and S-VLAN components. PDUs using this
++ * destination address, or any of the other addresses that appear in Table 8-1
++ * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
++ * will be relayed by TPMR components and S-VLAN components but will propagate
++ * no further than the nearest C-VLAN component or MAC Bridge.
++ *
++ * Because the LLC Entity associated with each Bridge Port is provided via CPU
++ * port, we must not filter these frames but forward them to CPU port.
++ *
++ * In a Bridge, the transmission Port is majorly decided by ingress and egress
++ * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
++ * For link-local frames, only CPU port should be designated as destination port
++ * in the FDB, and the other functions of the Forwarding Process must not
++ * interfere with the decision of the transmission Port. We call this process
++ * trapping frames to CPU port.
++ *
++ * Therefore, on the switch with CPU port architecture, link-local frames must
++ * be trapped to CPU port, and certain link-local frames received by a Port of a
++ * Bridge comprising a TPMR component or an S-VLAN component must be excluded
++ * from it.
++ *
++ * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
++ * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
++ * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
++ * doesn't count) of this architecture will either function as a standard MAC
++ * Bridge or a standard VLAN Bridge.
++ *
++ * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
++ * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
++ * we don't need to relay PDUs using the destination addresses specified on the
++ * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
++ * section where they must be relayed by TPMR components.
++ *
++ * One option to trap link-local frames to CPU port is to add static FDB entries
++ * with CPU port designated as destination port. However, because that
++ * Independent VLAN Learning (IVL) is being used on every VID, each entry only
++ * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
++ * Bridge component or a C-VLAN component, there would have to be 16 times 4096
++ * entries. This switch intellectual property can only hold a maximum of 2048
++ * entries. Using this option, there also isn't a mechanism to prevent
++ * link-local frames from being discarded when the spanning tree Port State of
++ * the reception Port is discarding.
++ *
++ * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
++ * registers. Whilst this applies to every VID, it doesn't contain all of the
++ * reserved MAC addresses without affecting the remaining Standard Group MAC
++ * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
++ * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
++ * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
++ * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
++ * The latter option provides better but not complete conformance.
++ *
++ * This switch intellectual property also does not provide a mechanism to trap
++ * link-local frames with specific destination addresses to CPU port by Bridge,
++ * to conform to the filtering rules for the distinct Bridge components.
++ *
++ * Therefore, regardless of the type of the Bridge component, link-local frames
++ * with these destination addresses will be trapped to CPU port:
++ *
++ * 01-80-C2-00-00-[00,01,02,03,0E]
++ *
++ * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
++ *
++ * Link-local frames with these destination addresses won't be trapped to CPU
++ * port which won't conform to IEEE Std 802.1Q-2022:
++ *
++ * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
++ *
++ * In a Bridge comprising an S-VLAN component:
++ *
++ * Link-local frames with these destination addresses will be trapped to CPU
++ * port which won't conform to IEEE Std 802.1Q-2022:
++ *
++ * 01-80-C2-00-00-00
++ *
++ * Link-local frames with these destination addresses won't be trapped to CPU
++ * port which won't conform to IEEE Std 802.1Q-2022:
++ *
++ * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
++ *
++ * To trap link-local frames to CPU port as conformant as this switch
++ * intellectual property can allow, link-local frames are made to be regarded as
++ * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
++ * property only lets the frames regarded as BPDUs bypass the spanning tree Port
++ * State function of the Forwarding Process.
++ *
++ * The only remaining interference is the ingress rules. When the reception Port
++ * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
++ * There doesn't seem to be a mechanism on the switch intellectual property to
++ * have link-local frames bypass this function of the Forwarding Process.
+ */
+ static void
+ mt753x_trap_frames(struct mt7530_priv *priv)
+@@ -1019,35 +1172,43 @@ mt753x_trap_frames(struct mt7530_priv *priv)
+ /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
+ * VLAN-untagged.
+ */
+- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
+- MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
+- MT753X_BPDU_PORT_FW_MASK,
+- MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+- MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+- MT753X_BPDU_CPU_ONLY);
++ mt7530_rmw(priv, MT753X_BPC,
++ MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
++ MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
++ MT753X_BPDU_PORT_FW_MASK,
++ MT753X_PAE_BPDU_FR |
++ MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++ MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_BPDU_CPU_ONLY);
+
+ /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
+ * them VLAN-untagged.
+ */
+- mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
+- MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
+- MT753X_R01_PORT_FW_MASK,
+- MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+- MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+- MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+- MT753X_BPDU_CPU_ONLY);
++ mt7530_rmw(priv, MT753X_RGAC1,
++ MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
++ MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
++ MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
++ MT753X_R02_BPDU_FR |
++ MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++ MT753X_R01_BPDU_FR |
++ MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_BPDU_CPU_ONLY);
+
+ /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
+ * them VLAN-untagged.
+ */
+- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
+- MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
+- MT753X_R03_PORT_FW_MASK,
+- MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+- MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+- MT753X_BPDU_CPU_ONLY);
++ mt7530_rmw(priv, MT753X_RGAC2,
++ MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
++ MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
++ MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
++ MT753X_R0E_BPDU_FR |
++ MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++ MT753X_R03_BPDU_FR |
++ MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_BPDU_CPU_ONLY);
+ }
+
+ static int
+diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
+index 75bc9043c8c0a..ddefeb69afda1 100644
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -65,6 +65,7 @@ enum mt753x_id {
+
+ /* Registers for BPDU and PAE frame control*/
+ #define MT753X_BPC 0x24
++#define MT753X_PAE_BPDU_FR BIT(25)
+ #define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
+ #define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
+ #define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
+@@ -75,20 +76,24 @@ enum mt753x_id {
+
+ /* Register for :01 and :02 MAC DA frame control */
+ #define MT753X_RGAC1 0x28
++#define MT753X_R02_BPDU_FR BIT(25)
+ #define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
+ #define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
+ #define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
+ #define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
++#define MT753X_R01_BPDU_FR BIT(9)
+ #define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
+ #define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
+ #define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
+
+ /* Register for :03 and :0E MAC DA frame control */
+ #define MT753X_RGAC2 0x2c
++#define MT753X_R0E_BPDU_FR BIT(25)
+ #define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
+ #define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
+ #define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
+ #define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
++#define MT753X_R03_BPDU_FR BIT(9)
+ #define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
+ #define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
+ #define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 633b321d7fdd9..4db689372980e 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -362,7 +362,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+- size = io_sq->bounce_buf_ctrl.buffer_size *
++ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
+ io_sq->bounce_buf_ctrl.buffers_num;
+
+ dev_node = dev_to_node(ena_dev->dmadev);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 5482015411f2f..95ed32542edfe 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -696,8 +696,11 @@ void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
+ {
+ bool print_once = true;
++ bool is_xdp_ring;
+ u32 i;
+
++ is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
++
+ for (i = 0; i < tx_ring->ring_size; i++) {
+ struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
+
+@@ -717,10 +720,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
+
+ ena_unmap_tx_buff(tx_ring, tx_info);
+
+- dev_kfree_skb_any(tx_info->skb);
++ if (is_xdp_ring)
++ xdp_return_frame(tx_info->xdpf);
++ else
++ dev_kfree_skb_any(tx_info->skb);
+ }
+- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+- tx_ring->qid));
++
++ if (!is_xdp_ring)
++ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
++ tx_ring->qid));
+ }
+
+ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
+@@ -3421,10 +3429,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
+ {
+ struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
+- int i, budget, rc;
++ int qid, budget, rc;
+ int io_queue_count;
+
+ io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
++
+ /* Make sure the driver doesn't turn the device in other process */
+ smp_rmb();
+
+@@ -3437,27 +3446,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
+ if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
+- budget = ENA_MONITORED_TX_QUEUES;
++ budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
+
+- for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
+- tx_ring = &adapter->tx_ring[i];
+- rx_ring = &adapter->rx_ring[i];
++ qid = adapter->last_monitored_tx_qid;
++
++ while (budget) {
++ qid = (qid + 1) % io_queue_count;
++
++ tx_ring = &adapter->tx_ring[qid];
++ rx_ring = &adapter->rx_ring[qid];
+
+ rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+ if (unlikely(rc))
+ return;
+
+- rc = !ENA_IS_XDP_INDEX(adapter, i) ?
++ rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
+ check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
+ if (unlikely(rc))
+ return;
+
+ budget--;
+- if (!budget)
+- break;
+ }
+
+- adapter->last_monitored_tx_qid = i % io_queue_count;
++ adapter->last_monitored_tx_qid = qid;
+ }
+
+ /* trigger napi schedule after 2 consecutive detections */
+diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
+index fc1c4ef73ba32..34d73c72f7803 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_xdp.c
++++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
+@@ -89,7 +89,7 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
+
+ rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
+ if (unlikely(rc))
+- return rc;
++ goto err;
+
+ ena_tx_ctx.req_id = req_id;
+
+@@ -112,7 +112,9 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
+
+ error_unmap_dma:
+ ena_unmap_tx_buff(tx_ring, tx_info);
++err:
+ tx_info->xdpf = NULL;
++
+ return rc;
+ }
+
+diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
+index 7658a72867675..dd4f0965bbe64 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.c
++++ b/drivers/net/ethernet/amd/pds_core/core.c
+@@ -595,6 +595,16 @@ void pdsc_fw_up(struct pdsc *pdsc)
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
+ }
+
++void pdsc_pci_reset_thread(struct work_struct *work)
++{
++ struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
++ struct pci_dev *pdev = pdsc->pdev;
++
++ pci_dev_get(pdev);
++ pci_reset_function(pdev);
++ pci_dev_put(pdev);
++}
++
+ static void pdsc_check_pci_health(struct pdsc *pdsc)
+ {
+ u8 fw_status;
+@@ -609,8 +619,8 @@ static void pdsc_check_pci_health(struct pdsc *pdsc)
+ if (fw_status != PDS_RC_BAD_PCI)
+ return;
+
+- pdsc_reset_prepare(pdsc->pdev);
+- pdsc_reset_done(pdsc->pdev);
++ /* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
++ queue_work(pdsc->wq, &pdsc->pci_reset_work);
+ }
+
+ void pdsc_health_thread(struct work_struct *work)
+diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
+index 110c4b826b22d..401ff56eba0dc 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.h
++++ b/drivers/net/ethernet/amd/pds_core/core.h
+@@ -197,6 +197,7 @@ struct pdsc {
+ struct pdsc_qcq notifyqcq;
+ u64 last_eid;
+ struct pdsc_viftype *viftype_status;
++ struct work_struct pci_reset_work;
+ };
+
+ /** enum pds_core_dbell_bits - bitwise composition of dbell values.
+@@ -283,9 +284,6 @@ int pdsc_devcmd_init(struct pdsc *pdsc);
+ int pdsc_devcmd_reset(struct pdsc *pdsc);
+ int pdsc_dev_init(struct pdsc *pdsc);
+
+-void pdsc_reset_prepare(struct pci_dev *pdev);
+-void pdsc_reset_done(struct pci_dev *pdev);
+-
+ int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
+ irq_handler_t handler, void *data);
+ void pdsc_intr_free(struct pdsc *pdsc, int index);
+@@ -315,5 +313,6 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
+
+ void pdsc_fw_down(struct pdsc *pdsc);
+ void pdsc_fw_up(struct pdsc *pdsc);
++void pdsc_pci_reset_thread(struct work_struct *work);
+
+ #endif /* _PDSC_H_ */
+diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
+index e65a1632df505..bfb79c5aac391 100644
+--- a/drivers/net/ethernet/amd/pds_core/dev.c
++++ b/drivers/net/ethernet/amd/pds_core/dev.c
+@@ -229,6 +229,9 @@ int pdsc_devcmd_reset(struct pdsc *pdsc)
+ .reset.opcode = PDS_CORE_CMD_RESET,
+ };
+
++ if (!pdsc_is_fw_running(pdsc))
++ return 0;
++
+ return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+ }
+
+diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
+index 0050c5894563b..a375d612d2875 100644
+--- a/drivers/net/ethernet/amd/pds_core/main.c
++++ b/drivers/net/ethernet/amd/pds_core/main.c
+@@ -238,6 +238,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
+ snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
+ pdsc->wq = create_singlethread_workqueue(wq_name);
+ INIT_WORK(&pdsc->health_work, pdsc_health_thread);
++ INIT_WORK(&pdsc->pci_reset_work, pdsc_pci_reset_thread);
+ timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
+ pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
+
+@@ -468,7 +469,7 @@ static void pdsc_restart_health_thread(struct pdsc *pdsc)
+ mod_timer(&pdsc->wdtimer, jiffies + 1);
+ }
+
+-void pdsc_reset_prepare(struct pci_dev *pdev)
++static void pdsc_reset_prepare(struct pci_dev *pdev)
+ {
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+
+@@ -477,10 +478,11 @@ void pdsc_reset_prepare(struct pci_dev *pdev)
+
+ pdsc_unmap_bars(pdsc);
+ pci_release_regions(pdev);
+- pci_disable_device(pdev);
++ if (pci_is_enabled(pdev))
++ pci_disable_device(pdev);
+ }
+
+-void pdsc_reset_done(struct pci_dev *pdev)
++static void pdsc_reset_done(struct pci_dev *pdev)
+ {
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+ struct device *dev = pdsc->dev;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 39845d556bafc..5e6e32d708e24 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -11526,6 +11526,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ /* VF-reps may need to be re-opened after the PF is re-opened */
+ if (BNXT_PF(bp))
+ bnxt_vf_reps_open(bp);
++ if (bp->ptp_cfg)
++ atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
+ bnxt_ptp_init_rtc(bp, true);
+ bnxt_ptp_cfg_tstamp_filters(bp);
+ return 0;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+index 93f9bd55020f2..195c02dc06830 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -210,6 +210,9 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
+ if (err)
+ return;
+
++ if (edev->ulp_tbl->msix_requested)
++ bnxt_fill_msix_vecs(bp, edev->msix_entries);
++
+ if (aux_priv) {
+ struct auxiliary_device *adev;
+
+@@ -392,12 +395,13 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
+ if (!edev)
+ goto aux_dev_uninit;
+
++ aux_priv->edev = edev;
++
+ ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+ if (!ulp)
+ goto aux_dev_uninit;
+
+ edev->ulp_tbl = ulp;
+- aux_priv->edev = edev;
+ bp->edev = edev;
+ bnxt_set_edev_info(edev, bp);
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 66203a90f052b..42db213fb69a6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -4721,18 +4721,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
++ }
+
+- /* Set chan/link to backpressure TL3 instead of TL2 */
+- rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
++ /* Set chan/link to backpressure TL3 instead of TL2 */
++ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+- * This sticky mode is known to cause SQ stalls when multiple
+- * SQs are mapped to same SMQ and transmitting pkts at a time.
+- */
+- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+- cfg &= ~BIT_ULL(15);
+- rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+- }
++ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
++ * This sticky mode is known to cause SQ stalls when multiple
++ * SQs are mapped to same SMQ and transmitting pkts at a time.
++ */
++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
++ cfg &= ~BIT_ULL(15);
++ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+
+ ltdefs = rvu->kpu.lt_def;
+ /* Calibrate X2P bus to check if CGX/LBK links are fine */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+index 1e77bbf5d22a1..1723e9912ae07 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+@@ -382,6 +382,7 @@ static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
+ otx2_qos_read_txschq_cfg_tl(node, cfg);
+ cnt = cfg->static_node_pos[node->level];
+ cfg->schq_contig_list[node->level][cnt] = node->schq;
++ cfg->schq_index_used[node->level][cnt] = true;
+ cfg->schq_contig[node->level]++;
+ cfg->static_node_pos[node->level]++;
+ otx2_qos_read_txschq_cfg_schq(node, cfg);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+index 86f1854698b4e..883c044852f1d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+@@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *
+ }
+
+ static inline u8
++mlx5e_ptp_metadata_fifo_peek(struct mlx5e_ptp_metadata_fifo *fifo)
++{
++ return fifo->data[fifo->mask & fifo->cc];
++}
++
++static inline void
+ mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
+ {
+- return fifo->data[fifo->mask & fifo->cc++];
++ fifo->cc++;
+ }
+
+ static inline void
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+index 34adf8c3f81a0..922bc5b7c10e3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+@@ -83,24 +83,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+
+ txq_ix = mlx5e_qid_from_qos(chs, node_qid);
+
+- WARN_ON(node_qid > priv->htb_max_qos_sqs);
+- if (node_qid == priv->htb_max_qos_sqs) {
+- struct mlx5e_sq_stats *stats, **stats_list = NULL;
+-
+- if (priv->htb_max_qos_sqs == 0) {
+- stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+- sizeof(*stats_list),
+- GFP_KERNEL);
+- if (!stats_list)
+- return -ENOMEM;
+- }
++ WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
++ if (!priv->htb_qos_sq_stats) {
++ struct mlx5e_sq_stats **stats_list;
++
++ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
++ sizeof(*stats_list), GFP_KERNEL);
++ if (!stats_list)
++ return -ENOMEM;
++
++ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
++ }
++
++ if (!priv->htb_qos_sq_stats[node_qid]) {
++ struct mlx5e_sq_stats *stats;
++
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+- if (!stats) {
+- kvfree(stats_list);
++ if (!stats)
+ return -ENOMEM;
+- }
+- if (stats_list)
+- WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
++
+ WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
+ /* Order htb_max_qos_sqs increment after writing the array pointer.
+ * Pairs with smp_load_acquire in en_stats.c.
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+index f675b1926340f..f66bbc8464645 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
+
+ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
+ {
++ mutex_lock(selq->state_lock);
+ WARN_ON_ONCE(selq->is_prepared);
+
+ kvfree(selq->standby);
+@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
+
+ kvfree(selq->standby);
+ selq->standby = NULL;
++ mutex_unlock(selq->state_lock);
+ }
+
+ void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index cc51ce16df14a..93461b0c5703b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -451,6 +451,23 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
+
+ mutex_lock(&priv->state_lock);
+
++ /* If RXFH is configured, changing the channels number is allowed only if
++ * it does not require resizing the RSS table. This is because the previous
++ * configuration may no longer be compatible with the new RSS table.
++ */
++ if (netif_is_rxfh_configured(priv->netdev)) {
++ int cur_rqt_size = mlx5e_rqt_size(priv->mdev, cur_params->num_channels);
++ int new_rqt_size = mlx5e_rqt_size(priv->mdev, count);
++
++ if (new_rqt_size != cur_rqt_size) {
++ err = -EINVAL;
++ netdev_err(priv->netdev,
++ "%s: RXFH is configured, block changing channels number that affects RSS table size (new: %d, current: %d)\n",
++ __func__, new_rqt_size, cur_rqt_size);
++ goto out;
++ }
++ }
++
+ /* Don't allow changing the number of channels if HTB offload is active,
+ * because the numeration of the QoS SQs will change, while per-queue
+ * qdiscs are attached.
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index c8e8f512803ef..952f1f98138cc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5695,9 +5695,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
+ kfree(priv->tx_rates);
+ kfree(priv->txq2sq);
+ destroy_workqueue(priv->wq);
+- mutex_lock(&priv->state_lock);
+ mlx5e_selq_cleanup(&priv->selq);
+- mutex_unlock(&priv->state_lock);
+ free_cpumask_var(priv->scratchpad.cpumask);
+
+ for (i = 0; i < priv->htb_max_qos_sqs; i++)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index 2fa076b23fbea..e21a3b4128ce8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -398,6 +398,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+ u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
+
++ mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist);
++
+ mlx5e_skb_cb_hwtstamp_init(skb);
+ mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
+ metadata_index);
+@@ -496,9 +498,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+
+ err_drop:
+ stats->dropped++;
+- if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+- mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
+- be32_to_cpu(eseg->flow_table_metadata));
+ dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
+ }
+@@ -657,7 +656,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
+ {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ eseg->flow_table_metadata =
+- cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
++ cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
+ }
+
+ static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index e6bfa7e4f146c..cf085a478e3e4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
+ return err;
+ }
+
++static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
++ struct mlx5_pkt_reformat *p2)
++{
++ return p1->owner == p2->owner &&
++ (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
++ p1->id == p2->id :
++ mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
++ mlx5_fs_dr_action_get_pkt_reformat_id(p2));
++}
++
+ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+ struct mlx5_flow_destination *d2)
+ {
+@@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
+ (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
+- (d1->vport.pkt_reformat->id ==
+- d2->vport.pkt_reformat->id) : true)) ||
++ mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat,
++ d2->vport.pkt_reformat) : true)) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ d1->ft == d2->ft) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+@@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
+ }
+ trace_mlx5_fs_set_fte(fte, false);
+
++ /* Link newly added rules into the tree. */
+ for (i = 0; i < handle->num_rules; i++) {
+- if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
++ if (!handle->rule[i]->node.parent) {
+ tree_add_node(&handle->rule[i]->node, &fte->node);
+ trace_mlx5_fs_add_rule(handle->rule[i]);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index bccf6e53556c6..131a836c127e3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1480,6 +1480,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
+ if (err)
+ goto err_register;
+
++ err = mlx5_crdump_enable(dev);
++ if (err)
++ mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err);
++
++ err = mlx5_hwmon_dev_register(dev);
++ if (err)
++ mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
++
+ mutex_unlock(&dev->intf_state_mutex);
+ return 0;
+
+@@ -1505,7 +1513,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
+ int err;
+
+ devl_lock(devlink);
++ devl_register(devlink);
+ err = mlx5_init_one_devl_locked(dev);
++ if (err)
++ devl_unregister(devlink);
+ devl_unlock(devlink);
+ return err;
+ }
+@@ -1517,6 +1528,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
+ devl_lock(devlink);
+ mutex_lock(&dev->intf_state_mutex);
+
++ mlx5_hwmon_dev_unregister(dev);
++ mlx5_crdump_disable(dev);
+ mlx5_unregister_device(dev);
+
+ if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+@@ -1534,6 +1547,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
+ mlx5_function_teardown(dev, true);
+ out:
+ mutex_unlock(&dev->intf_state_mutex);
++ devl_unregister(devlink);
+ devl_unlock(devlink);
+ }
+
+@@ -1680,16 +1694,20 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
+ }
+
+ devl_lock(devlink);
++ devl_register(devlink);
++
+ err = mlx5_devlink_params_register(priv_to_devlink(dev));
+- devl_unlock(devlink);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
+ goto query_hca_caps_err;
+ }
+
++ devl_unlock(devlink);
+ return 0;
+
+ query_hca_caps_err:
++ devl_unregister(devlink);
++ devl_unlock(devlink);
+ mlx5_function_disable(dev, true);
+ out:
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+@@ -1702,6 +1720,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
+
+ devl_lock(devlink);
+ mlx5_devlink_params_unregister(priv_to_devlink(dev));
++ devl_unregister(devlink);
+ devl_unlock(devlink);
+ if (dev->state != MLX5_DEVICE_STATE_UP)
+ return;
+@@ -1943,16 +1962,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ goto err_init_one;
+ }
+
+- err = mlx5_crdump_enable(dev);
+- if (err)
+- dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
+-
+- err = mlx5_hwmon_dev_register(dev);
+- if (err)
+- mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
+-
+ pci_save_state(pdev);
+- devlink_register(devlink);
+ return 0;
+
+ err_init_one:
+@@ -1973,16 +1983,9 @@ static void remove_one(struct pci_dev *pdev)
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+- /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
+- * devlink notify APIs.
+- * Hence, we must drain them before unregistering the devlink.
+- */
+ mlx5_drain_fw_reset(dev);
+ mlx5_drain_health_wq(dev);
+- devlink_unregister(devlink);
+ mlx5_sriov_disable(pdev, false);
+- mlx5_hwmon_dev_unregister(dev);
+- mlx5_crdump_disable(dev);
+ mlx5_uninit_one(dev);
+ mlx5_pci_close(dev);
+ mlx5_mdev_uninit(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index 4dcf995cb1a20..6bac8ad70ba60 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -19,6 +19,7 @@
+ #define MLX5_IRQ_CTRL_SF_MAX 8
+ /* min num of vectors for SFs to be enabled */
+ #define MLX5_IRQ_VEC_COMP_BASE_SF 2
++#define MLX5_IRQ_VEC_COMP_BASE 1
+
+ #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
+ #define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
+@@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
+ return;
+ }
+
++ vecidx -= MLX5_IRQ_VEC_COMP_BASE;
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
+ }
+
+@@ -585,7 +587,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
+ struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
+ struct mlx5_irq_pool *pool = table->pcif_pool;
+ struct irq_affinity_desc af_desc;
+- int offset = 1;
++ int offset = MLX5_IRQ_VEC_COMP_BASE;
+
+ if (!pool->xa_num_irqs.max)
+ offset = 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+index 169c2c68ed5c2..e3bf8c7e4baa6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+@@ -95,24 +95,28 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
+ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
+ {
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+- struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
++ struct mlx5_core_dev *mdev = sf_dev->mdev;
++ struct devlink *devlink;
+
+- mlx5_drain_health_wq(sf_dev->mdev);
+- devlink_unregister(devlink);
+- if (mlx5_dev_is_lightweight(sf_dev->mdev))
+- mlx5_uninit_one_light(sf_dev->mdev);
++ devlink = priv_to_devlink(mdev);
++ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
++ mlx5_drain_health_wq(mdev);
++ if (mlx5_dev_is_lightweight(mdev))
++ mlx5_uninit_one_light(mdev);
+ else
+- mlx5_uninit_one(sf_dev->mdev);
+- iounmap(sf_dev->mdev->iseg);
+- mlx5_mdev_uninit(sf_dev->mdev);
++ mlx5_uninit_one(mdev);
++ iounmap(mdev->iseg);
++ mlx5_mdev_uninit(mdev);
+ mlx5_devlink_free(devlink);
+ }
+
+ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
+ {
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
++ struct mlx5_core_dev *mdev = sf_dev->mdev;
+
+- mlx5_unload_one(sf_dev->mdev, false);
++ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
++ mlx5_unload_one(mdev, false);
+ }
+
+ static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
+diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
+index e5ec0a363aff8..31f75b4a67fd7 100644
+--- a/drivers/net/ethernet/micrel/ks8851.h
++++ b/drivers/net/ethernet/micrel/ks8851.h
+@@ -368,7 +368,6 @@ union ks8851_tx_hdr {
+ * @rdfifo: FIFO read callback
+ * @wrfifo: FIFO write callback
+ * @start_xmit: start_xmit() implementation callback
+- * @rx_skb: rx_skb() implementation callback
+ * @flush_tx_work: flush_tx_work() implementation callback
+ *
+ * The @statelock is used to protect information in the structure which may
+@@ -423,8 +422,6 @@ struct ks8851_net {
+ struct sk_buff *txp, bool irq);
+ netdev_tx_t (*start_xmit)(struct sk_buff *skb,
+ struct net_device *dev);
+- void (*rx_skb)(struct ks8851_net *ks,
+- struct sk_buff *skb);
+ void (*flush_tx_work)(struct ks8851_net *ks);
+ };
+
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index 0bf13b38b8f5b..d4cdf3d4f5525 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -231,16 +231,6 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
+ rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
+ }
+
+-/**
+- * ks8851_rx_skb - receive skbuff
+- * @ks: The device state.
+- * @skb: The skbuff
+- */
+-static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
+-{
+- ks->rx_skb(ks, skb);
+-}
+-
+ /**
+ * ks8851_rx_pkts - receive packets from the host
+ * @ks: The device information.
+@@ -309,7 +299,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
+ ks8851_dbg_dumpkkt(ks, rxpkt);
+
+ skb->protocol = eth_type_trans(skb, ks->netdev);
+- ks8851_rx_skb(ks, skb);
++ __netif_rx(skb);
+
+ ks->netdev->stats.rx_packets++;
+ ks->netdev->stats.rx_bytes += rxlen;
+@@ -340,6 +330,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ unsigned long flags;
+ unsigned int status;
+
++ local_bh_disable();
++
+ ks8851_lock(ks, &flags);
+
+ status = ks8851_rdreg16(ks, KS_ISR);
+@@ -416,6 +408,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ if (status & IRQ_LCI)
+ mii_check_link(&ks->mii);
+
++ local_bh_enable();
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
+index 2a7f298542670..381b9cd285ebd 100644
+--- a/drivers/net/ethernet/micrel/ks8851_par.c
++++ b/drivers/net/ethernet/micrel/ks8851_par.c
+@@ -210,16 +210,6 @@ static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
+ iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
+ }
+
+-/**
+- * ks8851_rx_skb_par - receive skbuff
+- * @ks: The device state.
+- * @skb: The skbuff
+- */
+-static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
+-{
+- netif_rx(skb);
+-}
+-
+ static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
+ {
+ return ks8851_rdreg16_par(ks, KS_TXQCR);
+@@ -298,7 +288,6 @@ static int ks8851_probe_par(struct platform_device *pdev)
+ ks->rdfifo = ks8851_rdfifo_par;
+ ks->wrfifo = ks8851_wrfifo_par;
+ ks->start_xmit = ks8851_start_xmit_par;
+- ks->rx_skb = ks8851_rx_skb_par;
+
+ #define STD_IRQ (IRQ_LCI | /* Link Change */ \
+ IRQ_RXI | /* RX done */ \
+diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
+index 54f2eac11a631..55f6f9f6d030e 100644
+--- a/drivers/net/ethernet/micrel/ks8851_spi.c
++++ b/drivers/net/ethernet/micrel/ks8851_spi.c
+@@ -298,16 +298,6 @@ static unsigned int calc_txlen(unsigned int len)
+ return ALIGN(len + 4, 4);
+ }
+
+-/**
+- * ks8851_rx_skb_spi - receive skbuff
+- * @ks: The device state
+- * @skb: The skbuff
+- */
+-static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
+-{
+- netif_rx(skb);
+-}
+-
+ /**
+ * ks8851_tx_work - process tx packet(s)
+ * @work: The work strucutre what was scheduled.
+@@ -435,7 +425,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
+ ks->rdfifo = ks8851_rdfifo_spi;
+ ks->wrfifo = ks8851_wrfifo_spi;
+ ks->start_xmit = ks8851_start_xmit_spi;
+- ks->rx_skb = ks8851_rx_skb_spi;
+ ks->flush_tx_work = ks8851_flush_tx_work_spi;
+
+ #define STD_IRQ (IRQ_LCI | /* Link Change */ \
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+index 3a1b1a1f5a195..60dd2fd603a85 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+@@ -731,7 +731,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
+ bool sgmii = false, inband_aneg = false;
+ int err;
+
+- if (port->conf.inband) {
++ if (conf->inband) {
+ if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
+ conf->portmode == PHY_INTERFACE_MODE_QSGMII)
+ inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+@@ -948,7 +948,7 @@ int sparx5_port_pcs_set(struct sparx5 *sparx5,
+ if (err)
+ return -EINVAL;
+
+- if (port->conf.inband) {
++ if (conf->inband) {
+ /* Enable/disable 1G counters in ASM */
+ spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+ ASM_PORT_CFG_CSC_STAT_DIS,
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 097a8db0d1d99..7f00fca0c538c 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -830,7 +830,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ __be16 sport;
+ int err;
+
+- if (!pskb_inet_may_pull(skb))
++ if (!skb_vlan_inet_prepare(skb))
+ return -EINVAL;
+
+ if (!gs4)
+@@ -937,7 +937,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ __be16 sport;
+ int err;
+
+- if (!pskb_inet_may_pull(skb))
++ if (!skb_vlan_inet_prepare(skb))
+ return -EINVAL;
+
+ if (!gs6)
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index d7ce4a1011ea2..ec14bf2a9af05 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3768,6 +3768,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
+ struct netlink_ext_ack *extack)
+ {
+ struct virtnet_info *vi = netdev_priv(dev);
++ bool update = false;
+ int i;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+@@ -3775,13 +3776,28 @@ static int virtnet_set_rxfh(struct net_device *dev,
+ return -EOPNOTSUPP;
+
+ if (rxfh->indir) {
++ if (!vi->has_rss)
++ return -EOPNOTSUPP;
++
+ for (i = 0; i < vi->rss_indir_table_size; ++i)
+ vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
++ update = true;
+ }
+- if (rxfh->key)
++
++ if (rxfh->key) {
++ /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
++ * device provides hash calculation capabilities, that is,
++ * hash_key is configured.
++ */
++ if (!vi->has_rss && !vi->has_rss_hash_report)
++ return -EOPNOTSUPP;
++
+ memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
++ update = true;
++ }
+
+- virtnet_commit_rss_command(vi);
++ if (update)
++ virtnet_commit_rss_command(vi);
+
+ return 0;
+ }
+@@ -4686,13 +4702,15 @@ static int virtnet_probe(struct virtio_device *vdev)
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
+ vi->has_rss_hash_report = true;
+
+- if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
++ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
+ vi->has_rss = true;
+
+- if (vi->has_rss || vi->has_rss_hash_report) {
+ vi->rss_indir_table_size =
+ virtio_cread16(vdev, offsetof(struct virtio_net_config,
+ rss_max_indirection_table_length));
++ }
++
++ if (vi->has_rss || vi->has_rss_hash_report) {
+ vi->rss_key_size =
+ virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+
+diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
+index 68d80559fddc2..eb5eddeb73f72 100644
+--- a/drivers/platform/chrome/cros_ec_uart.c
++++ b/drivers/platform/chrome/cros_ec_uart.c
+@@ -263,12 +263,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
+ if (!ec_dev)
+ return -ENOMEM;
+
+- ret = devm_serdev_device_open(dev, serdev);
+- if (ret) {
+- dev_err(dev, "Unable to open UART device");
+- return ret;
+- }
+-
+ serdev_device_set_drvdata(serdev, ec_dev);
+ init_waitqueue_head(&ec_uart->response.wait_queue);
+
+@@ -280,14 +274,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
+ return ret;
+ }
+
+- ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
+- if (ret < 0) {
+- dev_err(dev, "Failed to set up host baud rate (%d)", ret);
+- return ret;
+- }
+-
+- serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
+-
+ /* Initialize ec_dev for cros_ec */
+ ec_dev->phys_name = dev_name(dev);
+ ec_dev->dev = dev;
+@@ -301,6 +287,20 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
+
+ serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
+
++ ret = devm_serdev_device_open(dev, serdev);
++ if (ret) {
++ dev_err(dev, "Unable to open UART device");
++ return ret;
++ }
++
++ ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
++ if (ret < 0) {
++ dev_err(dev, "Failed to set up host baud rate (%d)", ret);
++ return ret;
++ }
++
++ serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
++
+ return cros_ec_register(ec_dev);
+ }
+
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 1abc62b07d24c..05c38e43f140a 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -1792,7 +1792,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
+ if (dev_is_sata(device)) {
+ struct ata_link *link = &device->sata_dev.ap->link;
+
+- rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
++ rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
+ smp_ata_check_ready_type);
+ } else {
+ msleep(2000);
+diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
+index 26e6b3e3af431..dcde55c8ee5de 100644
+--- a/drivers/scsi/qla2xxx/qla_edif.c
++++ b/drivers/scsi/qla2xxx/qla_edif.c
+@@ -1100,7 +1100,7 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (fcport->edif.enable) {
+- if (pcnt > app_req.num_ports)
++ if (pcnt >= app_req.num_ports)
+ break;
+
+ app_reply->elem[pcnt].rekey_count =
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 86210e4dd0d35..b2d02dacaebd9 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -285,6 +285,7 @@ sg_open(struct inode *inode, struct file *filp)
+ int dev = iminor(inode);
+ int flags = filp->f_flags;
+ struct request_queue *q;
++ struct scsi_device *device;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ int retval;
+@@ -301,11 +302,12 @@ sg_open(struct inode *inode, struct file *filp)
+
+ /* This driver's module count bumped by fops_get in <linux/fs.h> */
+ /* Prevent the device driver from vanishing while we sleep */
+- retval = scsi_device_get(sdp->device);
++ device = sdp->device;
++ retval = scsi_device_get(device);
+ if (retval)
+ goto sg_put;
+
+- retval = scsi_autopm_get_device(sdp->device);
++ retval = scsi_autopm_get_device(device);
+ if (retval)
+ goto sdp_put;
+
+@@ -313,7 +315,7 @@ sg_open(struct inode *inode, struct file *filp)
+ * check if O_NONBLOCK. Permits SCSI commands to be issued
+ * during error recovery. Tread carefully. */
+ if (!((flags & O_NONBLOCK) ||
+- scsi_block_when_processing_errors(sdp->device))) {
++ scsi_block_when_processing_errors(device))) {
+ retval = -ENXIO;
+ /* we are in error recovery for this device */
+ goto error_out;
+@@ -344,7 +346,7 @@ sg_open(struct inode *inode, struct file *filp)
+
+ if (sdp->open_cnt < 1) { /* no existing opens */
+ sdp->sgdebug = 0;
+- q = sdp->device->request_queue;
++ q = device->request_queue;
+ sdp->sg_tablesize = queue_max_segments(q);
+ }
+ sfp = sg_add_sfp(sdp);
+@@ -370,10 +372,11 @@ sg_open(struct inode *inode, struct file *filp)
+ error_mutex_locked:
+ mutex_unlock(&sdp->open_rel_lock);
+ error_out:
+- scsi_autopm_put_device(sdp->device);
++ scsi_autopm_put_device(device);
+ sdp_put:
+- scsi_device_put(sdp->device);
+- goto sg_put;
++ kref_put(&sdp->d_ref, sg_device_destroy);
++ scsi_device_put(device);
++ return retval;
+ }
+
+ /* Release resources associated with a successful sg_open()
+@@ -2207,6 +2210,7 @@ sg_remove_sfp_usercontext(struct work_struct *work)
+ {
+ struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
+ struct sg_device *sdp = sfp->parentdp;
++ struct scsi_device *device = sdp->device;
+ Sg_request *srp;
+ unsigned long iflags;
+
+@@ -2232,8 +2236,8 @@ sg_remove_sfp_usercontext(struct work_struct *work)
+ "sg_remove_sfp: sfp=0x%p\n", sfp));
+ kfree(sfp);
+
+- scsi_device_put(sdp->device);
+ kref_put(&sdp->d_ref, sg_device_destroy);
++ scsi_device_put(device);
+ module_put(THIS_MODULE);
+ }
+
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 045f666b4f12a..32686c79c41d6 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2799,9 +2799,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+ r = vhost_get_avail_idx(vq, &avail_idx);
+ if (unlikely(r))
+ return false;
++
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
++ if (vq->avail_idx != vq->last_avail_idx) {
++ /* Since we have updated avail_idx, the following
++ * call to vhost_get_vq_desc() will read available
++ * ring entries. Make sure that read happens after
++ * the avail_idx read.
++ */
++ smp_rmb();
++ return false;
++ }
+
+- return vq->avail_idx == vq->last_avail_idx;
++ return true;
+ }
+ EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
+
+@@ -2838,9 +2848,19 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+ &vq->avail->idx, r);
+ return false;
+ }
++
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
++ if (vq->avail_idx != vq->last_avail_idx) {
++ /* Since we have updated avail_idx, the following
++ * call to vhost_get_vq_desc() will read available
++ * ring entries. Make sure that read happens after
++ * the avail_idx read.
++ */
++ smp_rmb();
++ return true;
++ }
+
+- return vq->avail_idx != vq->last_avail_idx;
++ return false;
+ }
+ EXPORT_SYMBOL_GPL(vhost_enable_notify);
+
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 08102883f560a..ab5a833e7d6ad 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1128,6 +1128,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ if (ret)
+ return ret;
+
++ ret = btrfs_record_root_in_trans(trans, node->root);
++ if (ret)
++ return ret;
+ ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+ return ret;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 5ceb995709b56..6e2715e3f3aa0 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4476,6 +4476,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ struct btrfs_trans_handle *trans;
+ struct btrfs_block_rsv block_rsv;
+ u64 root_flags;
++ u64 qgroup_reserved = 0;
+ int ret;
+
+ down_write(&fs_info->subvol_sem);
+@@ -4520,12 +4521,20 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
+ if (ret)
+ goto out_undead;
++ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_release;
+ }
++ ret = btrfs_record_root_in_trans(trans, root);
++ if (ret) {
++ btrfs_abort_transaction(trans, ret);
++ goto out_end_trans;
++ }
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++ qgroup_reserved = 0;
+ trans->block_rsv = &block_rsv;
+ trans->bytes_reserved = block_rsv.size;
+
+@@ -4584,7 +4593,9 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ ret = btrfs_end_transaction(trans);
+ inode->i_flags |= S_DEAD;
+ out_release:
+- btrfs_subvolume_release_metadata(root, &block_rsv);
++ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
++ if (qgroup_reserved)
++ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+ out_undead:
+ if (ret) {
+ spin_lock(&dest->root_item_lock);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index bd19aed66605a..6b93fae74403d 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -603,6 +603,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ int ret;
+ dev_t anon_dev;
+ u64 objectid;
++ u64 qgroup_reserved = 0;
+
+ root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
+ if (!root_item)
+@@ -640,13 +641,18 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ trans_num_items, false);
+ if (ret)
+ goto out_new_inode_args;
++ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+- btrfs_subvolume_release_metadata(root, &block_rsv);
+- goto out_new_inode_args;
++ goto out_release_rsv;
+ }
++ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
++ if (ret)
++ goto out;
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++ qgroup_reserved = 0;
+ trans->block_rsv = &block_rsv;
+ trans->bytes_reserved = block_rsv.size;
+ /* Tree log can't currently deal with an inode which is a new root. */
+@@ -757,9 +763,11 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ out:
+ trans->block_rsv = NULL;
+ trans->bytes_reserved = 0;
+- btrfs_subvolume_release_metadata(root, &block_rsv);
+-
+ btrfs_end_transaction(trans);
++out_release_rsv:
++ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
++ if (qgroup_reserved)
++ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+ out_new_inode_args:
+ btrfs_new_inode_args_destroy(&new_inode_args);
+ out_inode:
+@@ -781,6 +789,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ struct btrfs_pending_snapshot *pending_snapshot;
+ unsigned int trans_num_items;
+ struct btrfs_trans_handle *trans;
++ struct btrfs_block_rsv *block_rsv;
++ u64 qgroup_reserved = 0;
+ int ret;
+
+ /* We do not support snapshotting right now. */
+@@ -817,19 +827,19 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ goto free_pending;
+ }
+
+- btrfs_init_block_rsv(&pending_snapshot->block_rsv,
+- BTRFS_BLOCK_RSV_TEMP);
++ block_rsv = &pending_snapshot->block_rsv;
++ btrfs_init_block_rsv(block_rsv, BTRFS_BLOCK_RSV_TEMP);
+ /*
+ * 1 to add dir item
+ * 1 to add dir index
+ * 1 to update parent inode item
+ */
+ trans_num_items = create_subvol_num_items(inherit) + 3;
+- ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
+- &pending_snapshot->block_rsv,
++ ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, block_rsv,
+ trans_num_items, false);
+ if (ret)
+ goto free_pending;
++ qgroup_reserved = block_rsv->qgroup_rsv_reserved;
+
+ pending_snapshot->dentry = dentry;
+ pending_snapshot->root = root;
+@@ -842,6 +852,13 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ ret = PTR_ERR(trans);
+ goto fail;
+ }
++ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
++ if (ret) {
++ btrfs_end_transaction(trans);
++ goto fail;
++ }
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++ qgroup_reserved = 0;
+
+ trans->pending_snapshot = pending_snapshot;
+
+@@ -871,7 +888,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ if (ret && pending_snapshot->snap)
+ pending_snapshot->snap->anon_dev = 0;
+ btrfs_put_root(pending_snapshot->snap);
+- btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
++ btrfs_block_rsv_release(fs_info, block_rsv, (u64)-1, NULL);
++ if (qgroup_reserved)
++ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+ free_pending:
+ if (pending_snapshot->anon_dev)
+ free_anon_bdev(pending_snapshot->anon_dev);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index a78c6694959aa..132802bd80999 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -4432,6 +4432,8 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
+ BTRFS_QGROUP_RSV_META_PREALLOC);
+ trace_qgroup_meta_convert(root, num_bytes);
+ qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
++ if (!sb_rdonly(fs_info->sb))
++ add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
+ }
+
+ /*
+diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
+index 603ad1459368c..5260677ad51e2 100644
+--- a/fs/btrfs/root-tree.c
++++ b/fs/btrfs/root-tree.c
+@@ -539,13 +539,3 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ }
+ return ret;
+ }
+-
+-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+- struct btrfs_block_rsv *rsv)
+-{
+- struct btrfs_fs_info *fs_info = root->fs_info;
+- u64 qgroup_to_release;
+-
+- btrfs_block_rsv_release(fs_info, rsv, (u64)-1, &qgroup_to_release);
+- btrfs_qgroup_convert_reserved_meta(root, qgroup_to_release);
+-}
+diff --git a/fs/btrfs/root-tree.h b/fs/btrfs/root-tree.h
+index 8b2c3859e4647..de0175cbdb1e5 100644
+--- a/fs/btrfs/root-tree.h
++++ b/fs/btrfs/root-tree.h
+@@ -8,8 +8,6 @@ struct fscrypt_str;
+ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ int nitems, bool use_global_rsv);
+-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+- struct btrfs_block_rsv *rsv);
+ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 sequence,
+ const struct fscrypt_str *name);
+diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
+index 25b3349595e00..865d4af4b3035 100644
+--- a/fs/btrfs/tests/extent-io-tests.c
++++ b/fs/btrfs/tests/extent-io-tests.c
+@@ -11,6 +11,7 @@
+ #include "btrfs-tests.h"
+ #include "../ctree.h"
+ #include "../extent_io.h"
++#include "../disk-io.h"
+ #include "../btrfs_inode.h"
+
+ #define PROCESS_UNLOCK (1 << 0)
+@@ -105,9 +106,11 @@ static void dump_extent_io_tree(const struct extent_io_tree *tree)
+ }
+ }
+
+-static int test_find_delalloc(u32 sectorsize)
++static int test_find_delalloc(u32 sectorsize, u32 nodesize)
+ {
+- struct inode *inode;
++ struct btrfs_fs_info *fs_info;
++ struct btrfs_root *root = NULL;
++ struct inode *inode = NULL;
+ struct extent_io_tree *tmp;
+ struct page *page;
+ struct page *locked_page = NULL;
+@@ -121,12 +124,27 @@ static int test_find_delalloc(u32 sectorsize)
+
+ test_msg("running find delalloc tests");
+
++ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
++ if (!fs_info) {
++ test_std_err(TEST_ALLOC_FS_INFO);
++ return -ENOMEM;
++ }
++
++ root = btrfs_alloc_dummy_root(fs_info);
++ if (IS_ERR(root)) {
++ test_std_err(TEST_ALLOC_ROOT);
++ ret = PTR_ERR(root);
++ goto out;
++ }
++
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto out;
+ }
+ tmp = &BTRFS_I(inode)->io_tree;
++ BTRFS_I(inode)->root = root;
+
+ /*
+ * Passing NULL as we don't have fs_info but tracepoints are not used
+@@ -316,6 +334,8 @@ static int test_find_delalloc(u32 sectorsize)
+ process_page_range(inode, 0, total_dirty - 1,
+ PROCESS_UNLOCK | PROCESS_RELEASE);
+ iput(inode);
++ btrfs_free_dummy_root(root);
++ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+ }
+
+@@ -794,7 +814,7 @@ int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
+
+ test_msg("running extent I/O tests");
+
+- ret = test_find_delalloc(sectorsize);
++ ret = test_find_delalloc(sectorsize, nodesize);
+ if (ret)
+ goto out;
+
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index bf8e64c766b63..f1705ae59e4a9 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -747,14 +747,6 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
+ h->reloc_reserved = reloc_reserved;
+ }
+
+- /*
+- * Now that we have found a transaction to be a part of, convert the
+- * qgroup reservation from prealloc to pertrans. A different transaction
+- * can't race in and free our pertrans out from under us.
+- */
+- if (qgroup_reserved)
+- btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+-
+ got_it:
+ if (!current->journal_info)
+ current->journal_info = h;
+@@ -788,8 +780,15 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
+ * not just freed.
+ */
+ btrfs_end_transaction(h);
+- return ERR_PTR(ret);
++ goto reserve_fail;
+ }
++ /*
++ * Now that we have found a transaction to be a part of, convert the
++ * qgroup reservation from prealloc to pertrans. A different transaction
++ * can't race in and free our pertrans out from under us.
++ */
++ if (qgroup_reserved)
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+
+ return h;
+
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 1340d77124ae4..ee9caf7916fb9 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -795,8 +795,10 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
+ ihold(inode);
+
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+- ceph_inode_to_fs_client(inode)->write_congested)
++ ceph_inode_to_fs_client(inode)->write_congested) {
++ redirty_page_for_writepage(wbc, page);
+ return AOP_WRITEPAGE_ACTIVATE;
++ }
+
+ wait_on_page_fscache(page);
+
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 7fb4aae974124..e8d8e3e633cdb 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4775,13 +4775,13 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+- spin_lock(&mdsc->cap_unlink_delay_lock);
++ spin_lock(&mdsc->cap_delay_lock);
+ ci->i_ceph_flags |= CEPH_I_FLUSH;
+ if (!list_empty(&ci->i_cap_delay_list))
+ list_del_init(&ci->i_cap_delay_list);
+ list_add_tail(&ci->i_cap_delay_list,
+ &mdsc->cap_unlink_delay_list);
+- spin_unlock(&mdsc->cap_unlink_delay_lock);
++ spin_unlock(&mdsc->cap_delay_lock);
+
+ /*
+ * Fire the work immediately, because the MDS maybe
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 3ab9c268a8bb3..360b686c3c67c 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -2504,7 +2504,7 @@ static void ceph_cap_unlink_work(struct work_struct *work)
+ struct ceph_client *cl = mdsc->fsc->client;
+
+ doutc(cl, "begin\n");
+- spin_lock(&mdsc->cap_unlink_delay_lock);
++ spin_lock(&mdsc->cap_delay_lock);
+ while (!list_empty(&mdsc->cap_unlink_delay_list)) {
+ struct ceph_inode_info *ci;
+ struct inode *inode;
+@@ -2516,15 +2516,15 @@ static void ceph_cap_unlink_work(struct work_struct *work)
+
+ inode = igrab(&ci->netfs.inode);
+ if (inode) {
+- spin_unlock(&mdsc->cap_unlink_delay_lock);
++ spin_unlock(&mdsc->cap_delay_lock);
+ doutc(cl, "on %p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+ ceph_check_caps(ci, CHECK_CAPS_FLUSH);
+ iput(inode);
+- spin_lock(&mdsc->cap_unlink_delay_lock);
++ spin_lock(&mdsc->cap_delay_lock);
+ }
+ }
+- spin_unlock(&mdsc->cap_unlink_delay_lock);
++ spin_unlock(&mdsc->cap_delay_lock);
+ doutc(cl, "done\n");
+ }
+
+@@ -5404,7 +5404,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
+ INIT_LIST_HEAD(&mdsc->cap_wait_list);
+ spin_lock_init(&mdsc->cap_delay_lock);
+ INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
+- spin_lock_init(&mdsc->cap_unlink_delay_lock);
+ INIT_LIST_HEAD(&mdsc->snap_flush_list);
+ spin_lock_init(&mdsc->snap_flush_lock);
+ mdsc->last_cap_flush_tid = 1;
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 03f8ff00874f7..b88e804152241 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -461,9 +461,8 @@ struct ceph_mds_client {
+ struct delayed_work delayed_work; /* delayed work */
+ unsigned long last_renew_caps; /* last time we renewed our caps */
+ struct list_head cap_delay_list; /* caps with delayed release */
+- spinlock_t cap_delay_lock; /* protects cap_delay_list */
+ struct list_head cap_unlink_delay_list; /* caps with delayed release for unlink */
+- spinlock_t cap_unlink_delay_lock; /* protects cap_unlink_delay_list */
++ spinlock_t cap_delay_lock; /* protects cap_delay_list and cap_unlink_delay_list */
+ struct list_head snap_flush_list; /* cap_snaps ready to flush */
+ spinlock_t snap_flush_lock;
+
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index ffa4565c275a7..b7f943f33366d 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -634,11 +634,18 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
+ * each file a separate locking class. Let's differentiate on
+ * whether the file has mmap or not for now.
+ *
+- * Both paths of the branch look the same. They're supposed to
++ * For similar reasons, writable and readonly files are given different
++ * lockdep key, because the writable file /sys/power/resume may call vfs
++ * lookup helpers for arbitrary paths and readonly files can be read by
++ * overlayfs from vfs helpers when sysfs is a lower layer of overalyfs.
++ *
++ * All three cases look the same. They're supposed to
+ * look that way and give @of->mutex different static lockdep keys.
+ */
+ if (has_mmap)
+ mutex_init(&of->mutex);
++ else if (file->f_mode & FMODE_WRITE)
++ mutex_init(&of->mutex);
+ else
+ mutex_init(&of->mutex);
+
+diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c
+index 902b326e1e560..87dcaae32ff87 100644
+--- a/fs/proc/bootconfig.c
++++ b/fs/proc/bootconfig.c
+@@ -62,12 +62,12 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size)
+ break;
+ dst += ret;
+ }
+- if (ret >= 0 && boot_command_line[0]) {
+- ret = snprintf(dst, rest(dst, end), "# Parameters from bootloader:\n# %s\n",
+- boot_command_line);
+- if (ret > 0)
+- dst += ret;
+- }
++ }
++ if (cmdline_has_extra_options() && ret >= 0 && boot_command_line[0]) {
++ ret = snprintf(dst, rest(dst, end), "# Parameters from bootloader:\n# %s\n",
++ boot_command_line);
++ if (ret > 0)
++ dst += ret;
+ }
+ out:
+ kfree(key);
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index 13a9d7acf8f8e..0ff2491c311d8 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -433,8 +433,8 @@ smb2_close_cached_fid(struct kref *ref)
+ if (cfid->is_open) {
+ rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid);
+- if (rc != -EBUSY && rc != -EAGAIN)
+- atomic_dec(&cfid->tcon->num_remote_opens);
++ if (rc) /* should we retry on -EBUSY or -EAGAIN? */
++ cifs_dbg(VFS, "close cached dir rc %d\n", rc);
+ }
+
+ free_cached_dir(cfid);
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index 446225aada50d..8b45b82cd5edc 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -911,17 +911,19 @@ static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2)
+ * acpi_dev_hid_uid_match - Match device by supplied HID and UID
+ * @adev: ACPI device to match.
+ * @hid2: Hardware ID of the device.
+- * @uid2: Unique ID of the device, pass 0 or NULL to not check _UID.
++ * @uid2: Unique ID of the device, pass NULL to not check _UID.
+ *
+ * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
+ * will be treated as a match. If user wants to validate @uid2, it should be
+ * done before calling this function.
+ *
+- * Returns: %true if matches or @uid2 is 0 or NULL, %false otherwise.
++ * Returns: %true if matches or @uid2 is NULL, %false otherwise.
+ */
+ #define acpi_dev_hid_uid_match(adev, hid2, uid2) \
+ (acpi_dev_hid_match(adev, hid2) && \
+- (!(uid2) || acpi_dev_uid_match(adev, uid2)))
++ /* Distinguish integer 0 from NULL @uid2 */ \
++ (_Generic(uid2, ACPI_STR_TYPES(!(uid2)), default: 0) || \
++ acpi_dev_uid_match(adev, uid2)))
+
+ void acpi_dev_clear_dependencies(struct acpi_device *supplier);
+ bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
+diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
+index ca73940e26df8..e5ee2c694401e 100644
+--- a/include/linux/bootconfig.h
++++ b/include/linux/bootconfig.h
+@@ -10,6 +10,7 @@
+ #ifdef __KERNEL__
+ #include <linux/kernel.h>
+ #include <linux/types.h>
++bool __init cmdline_has_extra_options(void);
+ #else /* !__KERNEL__ */
+ /*
+ * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
+diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
+index e06bad467f55e..c3f9bb6602ba2 100644
+--- a/include/linux/dma-fence.h
++++ b/include/linux/dma-fence.h
+@@ -682,4 +682,11 @@ static inline bool dma_fence_is_container(struct dma_fence *fence)
+ return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
+ }
+
++#define DMA_FENCE_WARN(f, fmt, args...) \
++ do { \
++ struct dma_fence *__ff = (f); \
++ pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
++ ##args); \
++ } while (0)
++
+ #endif /* __LINUX_DMA_FENCE_H */
+diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
+index 147feebd508ca..3f003d5fde534 100644
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -114,7 +114,7 @@ do { \
+ # define lockdep_softirq_enter() do { } while (0)
+ # define lockdep_softirq_exit() do { } while (0)
+ # define lockdep_hrtimer_enter(__hrtimer) false
+-# define lockdep_hrtimer_exit(__context) do { } while (0)
++# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
+ # define lockdep_posixtimer_enter() do { } while (0)
+ # define lockdep_posixtimer_exit() do { } while (0)
+ # define lockdep_irq_work_enter(__work) do { } while (0)
+diff --git a/include/linux/node.h b/include/linux/node.h
+index 25b66d705ee2e..dfc004e4bee74 100644
+--- a/include/linux/node.h
++++ b/include/linux/node.h
+@@ -34,6 +34,18 @@ struct access_coordinate {
+ unsigned int write_latency;
+ };
+
++/*
++ * ACCESS_COORDINATE_LOCAL correlates to ACCESS CLASS 0
++ * - access_coordinate between target node and nearest initiator node
++ * ACCESS_COORDINATE_CPU correlates to ACCESS CLASS 1
++ * - access_coordinate between target node and nearest CPU node
++ */
++enum access_coordinate_class {
++ ACCESS_COORDINATE_LOCAL,
++ ACCESS_COORDINATE_CPU,
++ ACCESS_COORDINATE_MAX
++};
++
+ enum cache_indexing {
+ NODE_CACHE_DIRECT_MAP,
+ NODE_CACHE_INDEXED,
+@@ -66,7 +78,7 @@ struct node_cache_attrs {
+ #ifdef CONFIG_HMEM_REPORTING
+ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
+ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+- unsigned access);
++ enum access_coordinate_class access);
+ #else
+ static inline void node_add_cache(unsigned int nid,
+ struct node_cache_attrs *cache_attrs)
+@@ -75,7 +87,7 @@ static inline void node_add_cache(unsigned int nid,
+
+ static inline void node_set_perf_attrs(unsigned int nid,
+ struct access_coordinate *coord,
+- unsigned access)
++ enum access_coordinate_class access)
+ {
+ }
+ #endif
+@@ -137,7 +149,7 @@ extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
+
+ extern int register_memory_node_under_compute_node(unsigned int mem_nid,
+ unsigned int cpu_nid,
+- unsigned access);
++ enum access_coordinate_class access);
+ #else
+ static inline void node_dev_init(void)
+ {
+diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
+index ffe48e69b3f3a..457879938fc19 100644
+--- a/include/linux/u64_stats_sync.h
++++ b/include/linux/u64_stats_sync.h
+@@ -135,10 +135,11 @@ static inline void u64_stats_inc(u64_stats_t *p)
+ p->v++;
+ }
+
+-static inline void u64_stats_init(struct u64_stats_sync *syncp)
+-{
+- seqcount_init(&syncp->seq);
+-}
++#define u64_stats_init(syncp) \
++ do { \
++ struct u64_stats_sync *__s = (syncp); \
++ seqcount_init(&__s->seq); \
++ } while (0)
+
+ static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
+ {
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index 61ebe723ee4d5..facb7a469efad 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -437,6 +437,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
+ refcount_inc(&ifp->refcnt);
+ }
+
++static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
++{
++ return refcount_inc_not_zero(&ifp->refcnt);
++}
+
+ /*
+ * compute link-local solicited-node multicast address
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index afd40dce40f3d..d1b07ddbe677e 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -55,7 +55,7 @@ struct unix_sock {
+ struct mutex iolock, bindlock;
+ struct sock *peer;
+ struct list_head link;
+- atomic_long_t inflight;
++ unsigned long inflight;
+ spinlock_t lock;
+ unsigned long gc_flags;
+ #define UNIX_GC_CANDIDATE 0
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index 7ffa8c192c3f2..eaec5d6caa29d 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -164,6 +164,8 @@ struct bt_voice {
+ #define BT_ISO_QOS_BIG_UNSET 0xff
+ #define BT_ISO_QOS_BIS_UNSET 0xff
+
++#define BT_ISO_SYNC_TIMEOUT 0x07d0 /* 20 secs */
++
+ struct bt_iso_io_qos {
+ __u32 interval;
+ __u16 latency;
+@@ -583,6 +585,15 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ return skb;
+ }
+
++static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
++ sockptr_t src, size_t src_size)
++{
++ if (dst_size > src_size)
++ return -EINVAL;
++
++ return copy_from_sockptr(dst, src, dst_size);
++}
++
+ int bt_to_errno(u16 code);
+ __u8 bt_status(int err);
+
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index 2d746f4c9a0a4..6690939f241a4 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -360,6 +360,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+ return pskb_network_may_pull(skb, nhlen);
+ }
+
++/* Variant of pskb_inet_may_pull().
++ */
++static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
++{
++ int nhlen = 0, maclen = ETH_HLEN;
++ __be16 type = skb->protocol;
++
++ /* Essentially this is skb_protocol(skb, true)
++ * And we get MAC len.
++ */
++ if (eth_type_vlan(type))
++ type = __vlan_get_protocol(skb, type, &maclen);
++
++ switch (type) {
++#if IS_ENABLED(CONFIG_IPV6)
++ case htons(ETH_P_IPV6):
++ nhlen = sizeof(struct ipv6hdr);
++ break;
++#endif
++ case htons(ETH_P_IP):
++ nhlen = sizeof(struct iphdr);
++ break;
++ }
++ /* For ETH_P_IPV6/ETH_P_IP we make sure to pull
++ * a base network header in skb->head.
++ */
++ if (!pskb_may_pull(skb, maclen + nhlen))
++ return false;
++
++ skb_set_network_header(skb, maclen);
++ return true;
++}
++
+ static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
+ {
+ const struct ip_tunnel_encap_ops *ops;
+diff --git a/init/main.c b/init/main.c
+index 9e6ab6d593bd8..98fdd93d79a5c 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -485,6 +485,11 @@ static int __init warn_bootconfig(char *str)
+
+ early_param("bootconfig", warn_bootconfig);
+
++bool __init cmdline_has_extra_options(void)
++{
++ return extra_command_line || extra_init_args;
++}
++
+ /* Change NUL term back to "=", to make "param" the whole string. */
+ static void __init repair_env_string(char *param, char *val)
+ {
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 0f52ea80103ef..3fc792dfc6ae7 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -949,6 +949,8 @@ bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags)
+ u64 user_data = req->cqe.user_data;
+ struct io_uring_cqe *cqe;
+
++ lockdep_assert(!io_wq_current_is_worker());
++
+ if (!defer)
+ return __io_post_aux_cqe(ctx, user_data, res, cflags, false);
+
+@@ -1950,6 +1952,29 @@ void io_wq_submit_work(struct io_wq_work *work)
+ goto fail;
+ }
+
++ /*
++ * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the
++ * submitter task context. Final request completions are handed to the
++ * right context, however this is not the case of auxiliary CQEs,
++ * which is the main mean of operation for multishot requests.
++ * Don't allow any multishot execution from io-wq. It's more restrictive
++ * than necessary and also cleaner.
++ */
++ if (req->flags & REQ_F_APOLL_MULTISHOT) {
++ err = -EBADFD;
++ if (!file_can_poll(req->file))
++ goto fail;
++ if (req->file->f_flags & O_NONBLOCK ||
++ req->file->f_mode & FMODE_NOWAIT) {
++ err = -ECANCELED;
++ if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
++ goto fail;
++ return;
++ } else {
++ req->flags &= ~REQ_F_APOLL_MULTISHOT;
++ }
++ }
++
+ if (req->flags & REQ_F_FORCE_ASYNC) {
+ bool opcode_poll = def->pollin || def->pollout;
+
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 5a4001139e288..46ea09e1e3829 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -78,19 +78,6 @@ struct io_sr_msg {
+ */
+ #define MULTISHOT_MAX_RETRY 32
+
+-static inline bool io_check_multishot(struct io_kiocb *req,
+- unsigned int issue_flags)
+-{
+- /*
+- * When ->locked_cq is set we only allow to post CQEs from the original
+- * task context. Usual request completions will be handled in other
+- * generic paths but multipoll may decide to post extra cqes.
+- */
+- return !(issue_flags & IO_URING_F_IOWQ) ||
+- !(req->flags & REQ_F_APOLL_MULTISHOT) ||
+- !req->ctx->task_complete;
+-}
+-
+ int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+ struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
+@@ -837,9 +824,6 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
+ (sr->flags & IORING_RECVSEND_POLL_FIRST))
+ return io_setup_async_msg(req, kmsg, issue_flags);
+
+- if (!io_check_multishot(req, issue_flags))
+- return io_setup_async_msg(req, kmsg, issue_flags);
+-
+ retry_multishot:
+ if (io_do_buffer_select(req)) {
+ void __user *buf;
+@@ -935,9 +919,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
+ (sr->flags & IORING_RECVSEND_POLL_FIRST))
+ return -EAGAIN;
+
+- if (!io_check_multishot(req, issue_flags))
+- return -EAGAIN;
+-
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+@@ -1274,6 +1255,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
+
+ if (req_has_async_data(req)) {
+ kmsg = req->async_data;
++ kmsg->msg.msg_control_user = sr->msg_control;
+ } else {
+ ret = io_sendmsg_copy_hdr(req, &iomsg);
+ if (ret)
+@@ -1386,8 +1368,6 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
+ struct file *file;
+ int ret, fd;
+
+- if (!io_check_multishot(req, issue_flags))
+- return -EAGAIN;
+ retry:
+ if (!fixed) {
+ fd = __get_unused_fd_flags(accept->flags, accept->nofile);
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index dd6fe3b328f40..c3c154790e452 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -932,8 +932,6 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
+ */
+ if (!file_can_poll(req->file))
+ return -EBADFD;
+- if (issue_flags & IO_URING_F_IOWQ)
+- return -EAGAIN;
+
+ ret = __io_read(req, issue_flags);
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index e6ec3ba4950b4..980908ef958c1 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -3207,7 +3207,8 @@ enum cpu_mitigations {
+ };
+
+ static enum cpu_mitigations cpu_mitigations __ro_after_init =
+- CPU_MITIGATIONS_AUTO;
++ IS_ENABLED(CONFIG_SPECULATION_MITIGATIONS) ? CPU_MITIGATIONS_AUTO :
++ CPU_MITIGATIONS_OFF;
+
+ static int __init mitigations_parse_cmdline(char *arg)
+ {
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 9d9095e817928..65adc815fc6e6 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1567,10 +1567,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ jump_label_lock();
+ preempt_disable();
+
+- /* Ensure it is not in reserved area nor out of text */
+- if (!(core_kernel_text((unsigned long) p->addr) ||
+- is_module_text_address((unsigned long) p->addr)) ||
+- in_gate_area_no_mm((unsigned long) p->addr) ||
++ /* Ensure the address is in a text area, and find a module if exists. */
++ *probed_mod = NULL;
++ if (!core_kernel_text((unsigned long) p->addr)) {
++ *probed_mod = __module_text_address((unsigned long) p->addr);
++ if (!(*probed_mod)) {
++ ret = -EINVAL;
++ goto out;
++ }
++ }
++ /* Ensure it is not in reserved area. */
++ if (in_gate_area_no_mm((unsigned long) p->addr) ||
+ within_kprobe_blacklist((unsigned long) p->addr) ||
+ jump_label_text_reserved(p->addr, p->addr) ||
+ static_call_text_reserved(p->addr, p->addr) ||
+@@ -1580,8 +1587,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ goto out;
+ }
+
+- /* Check if 'p' is probing a module. */
+- *probed_mod = __module_text_address((unsigned long) p->addr);
++ /* Get module refcount and reject __init functions for loaded modules. */
+ if (*probed_mod) {
+ /*
+ * We must hold a refcount of the probed module while updating
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index a718067deecee..3aae526cc4aac 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -106,6 +106,12 @@ static void s2idle_enter(void)
+ swait_event_exclusive(s2idle_wait_head,
+ s2idle_state == S2IDLE_STATE_WAKE);
+
++ /*
++ * Kick all CPUs to ensure that they resume their timers and restore
++ * consistent system state.
++ */
++ wake_up_all_idle_cpus();
++
+ cpus_read_unlock();
+
+ raw_spin_lock_irq(&s2idle_lock);
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 2ad234366d21c..faf56d9a9e88e 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1400,7 +1400,6 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+ old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
+ old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
+
+- local_inc(&cpu_buffer->pages_touched);
+ /*
+ * Just make sure we have seen our old_write and synchronize
+ * with any interrupts that come in.
+@@ -1437,8 +1436,9 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+ */
+ local_set(&next_page->page->commit, 0);
+
+- /* Again, either we update tail_page or an interrupt does */
+- (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
++ /* Either we update tail_page or an interrupt does */
++ if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
++ local_inc(&cpu_buffer->pages_touched);
+ }
+ }
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 7c364b87352ee..52f75c36bbca4 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1670,6 +1670,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
+ return 0;
+ }
+
++#ifdef CONFIG_PERF_EVENTS
+ static ssize_t
+ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+ {
+@@ -1684,6 +1685,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+ }
++#endif
+
+ static ssize_t
+ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+@@ -2152,10 +2154,12 @@ static const struct file_operations ftrace_event_format_fops = {
+ .release = seq_release,
+ };
+
++#ifdef CONFIG_PERF_EVENTS
+ static const struct file_operations ftrace_event_id_fops = {
+ .read = event_id_read,
+ .llseek = default_llseek,
+ };
++#endif
+
+ static const struct file_operations ftrace_event_filter_fops = {
+ .open = tracing_open_file_tr,
+diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
+index bf70850035c76..404dba36bae38 100644
+--- a/lib/checksum_kunit.c
++++ b/lib/checksum_kunit.c
+@@ -594,13 +594,15 @@ static void test_ip_fast_csum(struct kunit *test)
+
+ static void test_csum_ipv6_magic(struct kunit *test)
+ {
+-#if defined(CONFIG_NET)
+ const struct in6_addr *saddr;
+ const struct in6_addr *daddr;
+ unsigned int len;
+ unsigned char proto;
+ __wsum csum;
+
++ if (!IS_ENABLED(CONFIG_NET))
++ return;
++
+ const int daddr_offset = sizeof(struct in6_addr);
+ const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr);
+ const int proto_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+@@ -618,7 +620,6 @@ static void test_csum_ipv6_magic(struct kunit *test)
+ CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]),
+ csum_ipv6_magic(saddr, daddr, len, proto, csum));
+ }
+-#endif /* !CONFIG_NET */
+ }
+
+ static struct kunit_case __refdata checksum_test_cases[] = {
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index b95c36765d045..2243cec18ecc8 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -3948,7 +3948,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
+
+ spin_lock_bh(&bat_priv->tt.commit_lock);
+
+- while (true) {
++ while (timeout) {
+ table_size = batadv_tt_local_table_transmit_size(bat_priv);
+ if (packet_size_max >= table_size)
+ break;
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index 00e02138003ec..efea25eb56ce0 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -105,8 +105,10 @@ void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ if (hdev->req_status == HCI_REQ_PEND) {
+ hdev->req_result = result;
+ hdev->req_status = HCI_REQ_DONE;
+- if (skb)
++ if (skb) {
++ kfree_skb(hdev->req_skb);
+ hdev->req_skb = skb_get(skb);
++ }
+ wake_up_interruptible(&hdev->req_wait_q);
+ }
+ }
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 3e7cd330d731a..3f5f0932330d2 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -1946,10 +1946,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+
+ switch (optname) {
+ case HCI_DATA_DIR:
+- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
+@@ -1958,10 +1957,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+ break;
+
+ case HCI_TIME_STAMP:
+- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
+@@ -1979,11 +1977,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+ uf.event_mask[1] = *((u32 *) f->event_mask + 1);
+ }
+
+- len = min_t(unsigned int, len, sizeof(uf));
+- if (copy_from_sockptr(&uf, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&uf, sizeof(uf), optval, len);
++ if (err)
+ break;
+- }
+
+ if (!capable(CAP_NET_RAW)) {
+ uf.type_mask &= hci_sec_filter.type_mask;
+@@ -2042,10 +2038,9 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+ goto done;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
++ if (err)
+ break;
+- }
+
+ hci_pi(sk)->mtu = opt;
+ break;
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 824ce03bb361b..e1050d7d21a59 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -2611,6 +2611,14 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
+ return filter_policy;
+ }
+
++static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
++ u8 type, u16 interval, u16 window)
++{
++ cp->type = type;
++ cp->interval = cpu_to_le16(interval);
++ cp->window = cpu_to_le16(window);
++}
++
+ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
+ u16 interval, u16 window,
+ u8 own_addr_type, u8 filter_policy)
+@@ -2618,7 +2626,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
+ struct hci_cp_le_set_ext_scan_params *cp;
+ struct hci_cp_le_scan_phy_params *phy;
+ u8 data[sizeof(*cp) + sizeof(*phy) * 2];
+- u8 num_phy = 0;
++ u8 num_phy = 0x00;
+
+ cp = (void *)data;
+ phy = (void *)cp->data;
+@@ -2628,28 +2636,64 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
+ cp->own_addr_type = own_addr_type;
+ cp->filter_policy = filter_policy;
+
++ /* Check if PA Sync is in progress then select the PHY based on the
++ * hci_conn.iso_qos.
++ */
++ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
++ struct hci_cp_le_add_to_accept_list *sent;
++
++ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
++ if (sent) {
++ struct hci_conn *conn;
++
++ conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
++ &sent->bdaddr);
++ if (conn) {
++ struct bt_iso_qos *qos = &conn->iso_qos;
++
++ if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
++ qos->bcast.in.phy & BT_ISO_PHY_2M) {
++ cp->scanning_phys |= LE_SCAN_PHY_1M;
++ hci_le_scan_phy_params(phy, type,
++ interval,
++ window);
++ num_phy++;
++ phy++;
++ }
++
++ if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
++ cp->scanning_phys |= LE_SCAN_PHY_CODED;
++ hci_le_scan_phy_params(phy, type,
++ interval * 3,
++ window * 3);
++ num_phy++;
++ phy++;
++ }
++
++ if (num_phy)
++ goto done;
++ }
++ }
++ }
++
+ if (scan_1m(hdev) || scan_2m(hdev)) {
+ cp->scanning_phys |= LE_SCAN_PHY_1M;
+-
+- phy->type = type;
+- phy->interval = cpu_to_le16(interval);
+- phy->window = cpu_to_le16(window);
+-
++ hci_le_scan_phy_params(phy, type, interval, window);
+ num_phy++;
+ phy++;
+ }
+
+ if (scan_coded(hdev)) {
+ cp->scanning_phys |= LE_SCAN_PHY_CODED;
+-
+- phy->type = type;
+- phy->interval = cpu_to_le16(interval);
+- phy->window = cpu_to_le16(window);
+-
++ hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
+ num_phy++;
+ phy++;
+ }
+
++done:
++ if (!num_phy)
++ return -EINVAL;
++
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
+ sizeof(*cp) + sizeof(*phy) * num_phy,
+ data, HCI_CMD_TIMEOUT);
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 04f6572d35f17..a8b05baa8e5a9 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -837,10 +837,10 @@ static struct bt_iso_qos default_qos = {
+ .bcode = {0x00},
+ .options = 0x00,
+ .skip = 0x0000,
+- .sync_timeout = 0x4000,
++ .sync_timeout = BT_ISO_SYNC_TIMEOUT,
+ .sync_cte_type = 0x00,
+ .mse = 0x00,
+- .timeout = 0x4000,
++ .timeout = BT_ISO_SYNC_TIMEOUT,
+ },
+ };
+
+@@ -1430,8 +1430,8 @@ static bool check_ucast_qos(struct bt_iso_qos *qos)
+
+ static bool check_bcast_qos(struct bt_iso_qos *qos)
+ {
+- if (qos->bcast.sync_factor == 0x00)
+- return false;
++ if (!qos->bcast.sync_factor)
++ qos->bcast.sync_factor = 0x01;
+
+ if (qos->bcast.packing > 0x01)
+ return false;
+@@ -1454,6 +1454,9 @@ static bool check_bcast_qos(struct bt_iso_qos *qos)
+ if (qos->bcast.skip > 0x01f3)
+ return false;
+
++ if (!qos->bcast.sync_timeout)
++ qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT;
++
+ if (qos->bcast.sync_timeout < 0x000a || qos->bcast.sync_timeout > 0x4000)
+ return false;
+
+@@ -1463,6 +1466,9 @@ static bool check_bcast_qos(struct bt_iso_qos *qos)
+ if (qos->bcast.mse > 0x1f)
+ return false;
+
++ if (!qos->bcast.timeout)
++ qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT;
++
+ if (qos->bcast.timeout < 0x000a || qos->bcast.timeout > 0x4000)
+ return false;
+
+@@ -1473,7 +1479,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+ {
+ struct sock *sk = sock->sk;
+- int len, err = 0;
++ int err = 0;
+ struct bt_iso_qos qos = default_qos;
+ u32 opt;
+
+@@ -1488,10 +1494,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -1500,10 +1505,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case BT_PKT_STATUS:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
+@@ -1518,17 +1522,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- len = min_t(unsigned int, sizeof(qos), optlen);
+-
+- if (copy_from_sockptr(&qos, optval, len)) {
+- err = -EFAULT;
+- break;
+- }
+-
+- if (len == sizeof(qos.ucast) && !check_ucast_qos(&qos)) {
+- err = -EINVAL;
++ err = bt_copy_from_sockptr(&qos, sizeof(qos), optval, optlen);
++ if (err)
+ break;
+- }
+
+ iso_pi(sk)->qos = qos;
+ iso_pi(sk)->qos_user_set = true;
+@@ -1543,18 +1539,16 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ }
+
+ if (optlen > sizeof(iso_pi(sk)->base)) {
+- err = -EOVERFLOW;
++ err = -EINVAL;
+ break;
+ }
+
+- len = min_t(unsigned int, sizeof(iso_pi(sk)->base), optlen);
+-
+- if (copy_from_sockptr(iso_pi(sk)->base, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(iso_pi(sk)->base, optlen, optval,
++ optlen);
++ if (err)
+ break;
+- }
+
+- iso_pi(sk)->base_len = len;
++ iso_pi(sk)->base_len = optlen;
+
+ break;
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index ab5a9d42fae71..706d2478ddb33 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4054,8 +4054,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
+ return -EPROTO;
+
+ hci_dev_lock(hdev);
+- if (hci_dev_test_flag(hdev, HCI_MGMT) &&
+- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
++ if (hci_dev_test_flag(hdev, HCI_MGMT))
+ mgmt_device_connected(hdev, hcon, NULL, 0);
+ hci_dev_unlock(hdev);
+
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index ee7a41d6994fc..1eeea5d1306c2 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -726,7 +726,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct l2cap_options opts;
+- int len, err = 0;
++ int err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+@@ -753,11 +753,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ opts.max_tx = chan->max_tx;
+ opts.txwin_size = chan->tx_win;
+
+- len = min_t(unsigned int, sizeof(opts), optlen);
+- if (copy_from_sockptr(&opts, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opts, sizeof(opts), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
+ err = -EINVAL;
+@@ -800,10 +798,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ break;
+
+ case L2CAP_LM:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt & L2CAP_LM_FIPS) {
+ err = -EINVAL;
+@@ -884,7 +881,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ struct bt_security sec;
+ struct bt_power pwr;
+ struct l2cap_conn *conn;
+- int len, err = 0;
++ int err = 0;
+ u32 opt;
+ u16 mtu;
+ u8 mode;
+@@ -910,11 +907,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+
+ sec.level = BT_SECURITY_LOW;
+
+- len = min_t(unsigned int, sizeof(sec), optlen);
+- if (copy_from_sockptr(&sec, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (sec.level < BT_SECURITY_LOW ||
+ sec.level > BT_SECURITY_FIPS) {
+@@ -959,10 +954,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt) {
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -974,10 +968,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case BT_FLUSHABLE:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt > BT_FLUSHABLE_ON) {
+ err = -EINVAL;
+@@ -1009,11 +1002,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+
+ pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+
+- len = min_t(unsigned int, sizeof(pwr), optlen);
+- if (copy_from_sockptr(&pwr, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&pwr, sizeof(pwr), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (pwr.force_active)
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+@@ -1022,10 +1013,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case BT_CHANNEL_POLICY:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ err = -EOPNOTSUPP;
+ break;
+@@ -1054,10 +1044,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&mtu, optval, sizeof(u16))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&mtu, sizeof(mtu), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (chan->mode == L2CAP_MODE_EXT_FLOWCTL &&
+ sk->sk_state == BT_CONNECTED)
+@@ -1085,10 +1074,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&mode, optval, sizeof(u8))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&mode, sizeof(mode), optval, optlen);
++ if (err)
+ break;
+- }
+
+ BT_DBG("mode %u", mode);
+
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index b54e8a530f55a..29aa07e9db9d7 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -629,7 +629,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname,
+
+ switch (optname) {
+ case RFCOMM_LM:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
++ if (bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen)) {
+ err = -EFAULT;
+ break;
+ }
+@@ -664,7 +664,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+ struct sock *sk = sock->sk;
+ struct bt_security sec;
+ int err = 0;
+- size_t len;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+@@ -686,11 +685,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+
+ sec.level = BT_SECURITY_LOW;
+
+- len = min_t(unsigned int, sizeof(sec), optlen);
+- if (copy_from_sockptr(&sec, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (sec.level > BT_SECURITY_HIGH) {
+ err = -EINVAL;
+@@ -706,10 +703,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index c736186aba26b..8e4f39b8601cb 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -823,7 +823,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+ {
+ struct sock *sk = sock->sk;
+- int len, err = 0;
++ int err = 0;
+ struct bt_voice voice;
+ u32 opt;
+ struct bt_codecs *codecs;
+@@ -842,10 +842,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -862,11 +861,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+
+ voice.setting = sco_pi(sk)->setting;
+
+- len = min_t(unsigned int, sizeof(voice), optlen);
+- if (copy_from_sockptr(&voice, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&voice, sizeof(voice), optval,
++ optlen);
++ if (err)
+ break;
+- }
+
+ /* Explicitly check for these values */
+ if (voice.setting != BT_VOICE_TRANSPARENT &&
+@@ -889,10 +887,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case BT_PKT_STATUS:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
+@@ -933,9 +930,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(buffer, optval, optlen)) {
++ err = bt_copy_from_sockptr(buffer, optlen, optval, optlen);
++ if (err) {
+ hci_dev_put(hdev);
+- err = -EFAULT;
+ break;
+ }
+
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index b150c9929b12e..14365b20f1c5c 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -966,6 +966,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+@@ -1266,6 +1268,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 4876707595781..fe89a056eb06c 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1118,6 +1118,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+@@ -1504,6 +1506,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 16615d107cf06..15c37c8113fc8 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -926,13 +926,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+ peer->rate_last = jiffies;
+ ++peer->n_redirects;
+-#ifdef CONFIG_IP_ROUTE_VERBOSE
+- if (log_martians &&
++ if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
+ peer->n_redirects == ip_rt_redirect_number)
+ net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ &ip_hdr(skb)->saddr, inet_iif(skb),
+ &ip_hdr(skb)->daddr, &gw);
+-#endif
+ }
+ out_put_peer:
+ inet_putpeer(peer);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 055230b669cf2..37d48aa073c3c 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2061,9 +2061,10 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
+ if (ipv6_addr_equal(&ifp->addr, addr)) {
+ if (!dev || ifp->idev->dev == dev ||
+ !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
+- result = ifp;
+- in6_ifa_hold(ifp);
+- break;
++ if (in6_ifa_hold_safe(ifp)) {
++ result = ifp;
++ break;
++ }
+ }
+ }
+ }
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 54294f6a8ec51..8184076a3924e 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1375,7 +1375,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ struct nl_info *info, struct netlink_ext_ack *extack)
+ {
+ struct fib6_table *table = rt->fib6_table;
+- struct fib6_node *fn, *pn = NULL;
++ struct fib6_node *fn;
++#ifdef CONFIG_IPV6_SUBTREES
++ struct fib6_node *pn = NULL;
++#endif
+ int err = -ENOMEM;
+ int allow_create = 1;
+ int replace_required = 0;
+@@ -1399,9 +1402,9 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ goto out;
+ }
+
++#ifdef CONFIG_IPV6_SUBTREES
+ pn = fn;
+
+-#ifdef CONFIG_IPV6_SUBTREES
+ if (rt->fib6_src.plen) {
+ struct fib6_node *sn;
+
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 636b360311c53..131f7bb2110d3 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1135,6 +1135,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+@@ -1513,6 +1515,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 3019a4406ca4f..74b63cdb59923 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1380,8 +1380,9 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
+ if (ct_info.timeout[0]) {
+ if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
+ ct_info.timeout))
+- pr_info_ratelimited("Failed to associated timeout "
+- "policy `%s'\n", ct_info.timeout);
++ OVS_NLERR(log,
++ "Failed to associated timeout policy '%s'",
++ ct_info.timeout);
+ else
+ ct_info.nf_ct_timeout = rcu_dereference(
+ nf_ct_timeout_find(ct_info.ct)->timeout);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 0748e7ea5210e..e37cf913818a1 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -980,11 +980,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
+ sk->sk_write_space = unix_write_space;
+ sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
+ sk->sk_destruct = unix_sock_destructor;
+- u = unix_sk(sk);
++ u = unix_sk(sk);
++ u->inflight = 0;
+ u->path.dentry = NULL;
+ u->path.mnt = NULL;
+ spin_lock_init(&u->lock);
+- atomic_long_set(&u->inflight, 0);
+ INIT_LIST_HEAD(&u->link);
+ mutex_init(&u->iolock); /* single task reading lock */
+ mutex_init(&u->bindlock); /* single task binding lock */
+@@ -2604,7 +2604,9 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ }
+ } else if (!(flags & MSG_PEEK)) {
+ skb_unlink(skb, &sk->sk_receive_queue);
+- consume_skb(skb);
++ WRITE_ONCE(u->oob_skb, NULL);
++ if (!WARN_ON_ONCE(skb_unref(skb)))
++ kfree_skb(skb);
+ skb = skb_peek(&sk->sk_receive_queue);
+ }
+ }
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 027c86e804f8a..8734c0c1fc197 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -166,17 +166,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
+
+ static void dec_inflight(struct unix_sock *usk)
+ {
+- atomic_long_dec(&usk->inflight);
++ usk->inflight--;
+ }
+
+ static void inc_inflight(struct unix_sock *usk)
+ {
+- atomic_long_inc(&usk->inflight);
++ usk->inflight++;
+ }
+
+ static void inc_inflight_move_tail(struct unix_sock *u)
+ {
+- atomic_long_inc(&u->inflight);
++ u->inflight++;
++
+ /* If this still might be part of a cycle, move it to the end
+ * of the list, so that it's checked even if it was already
+ * passed over
+@@ -234,20 +235,34 @@ void unix_gc(void)
+ * receive queues. Other, non candidate sockets _can_ be
+ * added to queue, so we must make sure only to touch
+ * candidates.
++ *
++ * Embryos, though never candidates themselves, affect which
++ * candidates are reachable by the garbage collector. Before
++ * being added to a listener's queue, an embryo may already
++ * receive data carrying SCM_RIGHTS, potentially making the
++ * passed socket a candidate that is not yet reachable by the
++ * collector. It becomes reachable once the embryo is
++ * enqueued. Therefore, we must ensure that no SCM-laden
++ * embryo appears in a (candidate) listener's queue between
++ * consecutive scan_children() calls.
+ */
+ list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
++ struct sock *sk = &u->sk;
+ long total_refs;
+- long inflight_refs;
+
+- total_refs = file_count(u->sk.sk_socket->file);
+- inflight_refs = atomic_long_read(&u->inflight);
++ total_refs = file_count(sk->sk_socket->file);
+
+- BUG_ON(inflight_refs < 1);
+- BUG_ON(total_refs < inflight_refs);
+- if (total_refs == inflight_refs) {
++ BUG_ON(!u->inflight);
++ BUG_ON(total_refs < u->inflight);
++ if (total_refs == u->inflight) {
+ list_move_tail(&u->link, &gc_candidates);
+ __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
+ __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
++
++ if (sk->sk_state == TCP_LISTEN) {
++ unix_state_lock(sk);
++ unix_state_unlock(sk);
++ }
+ }
+ }
+
+@@ -271,7 +286,7 @@ void unix_gc(void)
+ /* Move cursor to after the current position. */
+ list_move(&cursor, &u->link);
+
+- if (atomic_long_read(&u->inflight) > 0) {
++ if (u->inflight) {
+ list_move_tail(&u->link, &not_cycle_list);
+ __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
+ scan_children(&u->sk, inc_inflight_move_tail, NULL);
+diff --git a/net/unix/scm.c b/net/unix/scm.c
+index 822ce0d0d7915..e92f2fad64105 100644
+--- a/net/unix/scm.c
++++ b/net/unix/scm.c
+@@ -53,12 +53,13 @@ void unix_inflight(struct user_struct *user, struct file *fp)
+ if (s) {
+ struct unix_sock *u = unix_sk(s);
+
+- if (atomic_long_inc_return(&u->inflight) == 1) {
++ if (!u->inflight) {
+ BUG_ON(!list_empty(&u->link));
+ list_add_tail(&u->link, &gc_inflight_list);
+ } else {
+ BUG_ON(list_empty(&u->link));
+ }
++ u->inflight++;
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
+ }
+@@ -75,10 +76,11 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
+ if (s) {
+ struct unix_sock *u = unix_sk(s);
+
+- BUG_ON(!atomic_long_read(&u->inflight));
++ BUG_ON(!u->inflight);
+ BUG_ON(list_empty(&u->link));
+
+- if (atomic_long_dec_and_test(&u->inflight))
++ u->inflight--;
++ if (!u->inflight)
+ list_del_init(&u->link);
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index b78c0e095e221..7d1c0986f9bb3 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -1414,6 +1414,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
+ struct xsk_queue **q;
+ int entries;
+
++ if (optlen < sizeof(entries))
++ return -EINVAL;
+ if (copy_from_sockptr(&entries, optval, sizeof(entries)))
+ return -EFAULT;
+
+diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
+index a781e6311810b..e3e260fe002f7 100644
+--- a/tools/testing/selftests/kselftest.h
++++ b/tools/testing/selftests/kselftest.h
+@@ -50,6 +50,7 @@
+ #include <stdarg.h>
+ #include <string.h>
+ #include <stdio.h>
++#include <sys/utsname.h>
+ #endif
+
+ #ifndef ARRAY_SIZE
+@@ -78,6 +79,9 @@
+ #define KSFT_XPASS 3
+ #define KSFT_SKIP 4
+
++#ifndef __noreturn
++#define __noreturn __attribute__((__noreturn__))
++#endif
+ #define __printf(a, b) __attribute__((format(printf, a, b)))
+
+ /* counters */
+@@ -254,13 +258,13 @@ static inline __printf(1, 2) void ksft_test_result_error(const char *msg, ...)
+ va_end(args);
+ }
+
+-static inline int ksft_exit_pass(void)
++static inline __noreturn int ksft_exit_pass(void)
+ {
+ ksft_print_cnts();
+ exit(KSFT_PASS);
+ }
+
+-static inline int ksft_exit_fail(void)
++static inline __noreturn int ksft_exit_fail(void)
+ {
+ ksft_print_cnts();
+ exit(KSFT_FAIL);
+@@ -287,7 +291,7 @@ static inline int ksft_exit_fail(void)
+ ksft_cnt.ksft_xfail + \
+ ksft_cnt.ksft_xskip)
+
+-static inline __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...)
++static inline __noreturn __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...)
+ {
+ int saved_errno = errno;
+ va_list args;
+@@ -302,19 +306,19 @@ static inline __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...)
+ exit(KSFT_FAIL);
+ }
+
+-static inline int ksft_exit_xfail(void)
++static inline __noreturn int ksft_exit_xfail(void)
+ {
+ ksft_print_cnts();
+ exit(KSFT_XFAIL);
+ }
+
+-static inline int ksft_exit_xpass(void)
++static inline __noreturn int ksft_exit_xpass(void)
+ {
+ ksft_print_cnts();
+ exit(KSFT_XPASS);
+ }
+
+-static inline __printf(1, 2) int ksft_exit_skip(const char *msg, ...)
++static inline __noreturn __printf(1, 2) int ksft_exit_skip(const char *msg, ...)
+ {
+ int saved_errno = errno;
+ va_list args;
+@@ -343,4 +347,21 @@ static inline __printf(1, 2) int ksft_exit_skip(const char *msg, ...)
+ exit(KSFT_SKIP);
+ }
+
++static inline int ksft_min_kernel_version(unsigned int min_major,
++ unsigned int min_minor)
++{
++#ifdef NOLIBC
++ ksft_print_msg("NOLIBC: Can't check kernel version: Function not implemented\n");
++ return 0;
++#else
++ unsigned int major, minor;
++ struct utsname info;
++
++ if (uname(&info) || sscanf(info.release, "%u.%u.", &major, &minor) != 2)
++ ksft_exit_fail_msg("Can't parse kernel version\n");
++
++ return major > min_major || (major == min_major && minor >= min_minor);
++#endif
++}
++
+ #endif /* __KSELFTEST_H */
+diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
+index d49dd3ffd0d96..c001dd79179d5 100644
+--- a/tools/testing/selftests/timers/posix_timers.c
++++ b/tools/testing/selftests/timers/posix_timers.c
+@@ -66,7 +66,7 @@ static int check_diff(struct timeval start, struct timeval end)
+ diff = end.tv_usec - start.tv_usec;
+ diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
+
+- if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
++ if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
+ printf("Diff too high: %lld..", diff);
+ return -1;
+ }
+@@ -184,80 +184,71 @@ static int check_timer_create(int which)
+ return 0;
+ }
+
+-int remain;
+-__thread int got_signal;
++static pthread_t ctd_thread;
++static volatile int ctd_count, ctd_failed;
+
+-static void *distribution_thread(void *arg)
++static void ctd_sighandler(int sig)
+ {
+- while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
+- return NULL;
++ if (pthread_self() != ctd_thread)
++ ctd_failed = 1;
++ ctd_count--;
+ }
+
+-static void distribution_handler(int nr)
++static void *ctd_thread_func(void *arg)
+ {
+- if (!__atomic_exchange_n(&got_signal, 1, __ATOMIC_RELAXED))
+- __atomic_fetch_sub(&remain, 1, __ATOMIC_RELAXED);
+-}
+-
+-/*
+- * Test that all running threads _eventually_ receive CLOCK_PROCESS_CPUTIME_ID
+- * timer signals. This primarily tests that the kernel does not favour any one.
+- */
+-static int check_timer_distribution(void)
+-{
+- int err, i;
+- timer_t id;
+- const int nthreads = 10;
+- pthread_t threads[nthreads];
+ struct itimerspec val = {
+ .it_value.tv_sec = 0,
+ .it_value.tv_nsec = 1000 * 1000,
+ .it_interval.tv_sec = 0,
+ .it_interval.tv_nsec = 1000 * 1000,
+ };
++ timer_t id;
+
+- remain = nthreads + 1; /* worker threads + this thread */
+- signal(SIGALRM, distribution_handler);
+- err = timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id);
+- if (err < 0) {
+- ksft_perror("Can't create timer");
+- return -1;
+- }
+- err = timer_settime(id, 0, &val, NULL);
+- if (err < 0) {
+- ksft_perror("Can't set timer");
+- return -1;
+- }
++ /* 1/10 seconds to ensure the leader sleeps */
++ usleep(10000);
+
+- for (i = 0; i < nthreads; i++) {
+- err = pthread_create(&threads[i], NULL, distribution_thread,
+- NULL);
+- if (err) {
+- ksft_print_msg("Can't create thread: %s (%d)\n",
+- strerror(errno), errno);
+- return -1;
+- }
+- }
++ ctd_count = 100;
++ if (timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id))
++ return "Can't create timer\n";
++ if (timer_settime(id, 0, &val, NULL))
++ return "Can't set timer\n";
+
+- /* Wait for all threads to receive the signal. */
+- while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
++ while (ctd_count > 0 && !ctd_failed)
++ ;
+
+- for (i = 0; i < nthreads; i++) {
+- err = pthread_join(threads[i], NULL);
+- if (err) {
+- ksft_print_msg("Can't join thread: %s (%d)\n",
+- strerror(errno), errno);
+- return -1;
+- }
+- }
++ if (timer_delete(id))
++ return "Can't delete timer\n";
+
+- if (timer_delete(id)) {
+- ksft_perror("Can't delete timer");
+- return -1;
+- }
++ return NULL;
++}
++
++/*
++ * Test that only the running thread receives the timer signal.
++ */
++static int check_timer_distribution(void)
++{
++ const char *errmsg;
+
+- ksft_test_result_pass("check_timer_distribution\n");
++ signal(SIGALRM, ctd_sighandler);
++
++ errmsg = "Can't create thread\n";
++ if (pthread_create(&ctd_thread, NULL, ctd_thread_func, NULL))
++ goto err;
++
++ errmsg = "Can't join thread\n";
++ if (pthread_join(ctd_thread, (void **)&errmsg) || errmsg)
++ goto err;
++
++ if (!ctd_failed)
++ ksft_test_result_pass("check signal distribution\n");
++ else if (ksft_min_kernel_version(6, 3))
++ ksft_test_result_fail("check signal distribution\n");
++ else
++ ksft_test_result_skip("check signal distribution (old kernel)\n");
+ return 0;
++err:
++ ksft_print_msg("%s", errmsg);
++ return -1;
+ }
+
+ int main(int argc, char **argv)