diff --git a/.mailmap b/.mailmap index 71127b2608d20..bf076bbc36b1e 100644 --- a/.mailmap +++ b/.mailmap @@ -364,6 +364,11 @@ Nicolas Pitre Nicolas Saenz Julienne Nicolas Saenz Julienne Niklas Söderlund +Nikolay Aleksandrov +Nikolay Aleksandrov +Nikolay Aleksandrov +Nikolay Aleksandrov +Nikolay Aleksandrov Oleksandr Natalenko Oleksij Rempel Oleksij Rempel diff --git a/CREDITS b/CREDITS index 2d9da9a7defa6..de7e4dbbc5991 100644 --- a/CREDITS +++ b/CREDITS @@ -1706,6 +1706,10 @@ S: Panoramastrasse 18 S: D-69126 Heidelberg S: Germany +N: Neil Horman +M: nhorman@tuxdriver.com +D: SCTP protocol maintainer. + N: Simon Horman M: horms@verge.net.au D: Renesas ARM/ARM64 SoC maintainer diff --git a/Documentation/admin-guide/quickly-build-trimmed-linux.rst b/Documentation/admin-guide/quickly-build-trimmed-linux.rst index ff4f4cc8522bd..f08149bc53f84 100644 --- a/Documentation/admin-guide/quickly-build-trimmed-linux.rst +++ b/Documentation/admin-guide/quickly-build-trimmed-linux.rst @@ -215,12 +215,14 @@ again. reduce the compile time enormously, especially if you are running an universal kernel from a commodity Linux distribution. - There is a catch: the make target 'localmodconfig' will disable kernel - features you have not directly or indirectly through some program utilized - since you booted the system. You can reduce or nearly eliminate that risk by - using tricks outlined in the reference section; for quick testing purposes - that risk is often negligible, but it is an aspect you want to keep in mind - in case your kernel behaves oddly. + There is a catch: 'localmodconfig' is likely to disable kernel features you + did not use since you booted your Linux -- like drivers for currently + disconnected peripherals or a virtualization software not haven't used yet. + You can reduce or nearly eliminate that risk with tricks the reference + section outlines; but when building a kernel just for quick testing purposes + it is often negligible if such features are missing. But you should keep that + aspect in mind when using a kernel built with this make target, as it might + be the reason why something you only use occasionally stopped working. [:ref:`details`] @@ -271,6 +273,9 @@ again. does nothing at all; in that case you have to manually install your kernel, as outlined in the reference section. + If you are running a immutable Linux distribution, check its documentation + and the web to find out how to install your own kernel there. + [:ref:`details`] .. _another_sbs: @@ -291,29 +296,29 @@ again. version you care about, as git otherwise might retrieve the entire commit history:: - git fetch --shallow-exclude=v6.1 origin - - If you modified the sources (for example by applying a patch), you now need - to discard those modifications; that's because git otherwise will not be able - to switch to the sources of another version due to potential conflicting - changes:: - - git reset --hard + git fetch --shallow-exclude=v6.0 origin - Now checkout the version you are interested in, as explained above:: + Now switch to the version you are interested in -- but be aware the command + used here will discard any modifications you performed, as they would + conflict with the sources you want to checkout:: - git checkout --detach origin/master + git checkout --force --detach origin/master At this point you might want to patch the sources again or set/modify a build - tag, as explained earlier; afterwards adjust the build configuration to the - new codebase and build your next kernel:: + tag, as explained earlier. Afterwards adjust the build configuration to the + new codebase using olddefconfig, which will now adjust the configuration file + you prepared earlier using localmodconfig (~/linux/.config) for your next + kernel:: # reminder: if you want to apply patches, do it at this point # reminder: you might want to update your build tag at this point make olddefconfig + + Now build your kernel:: + make -j $(nproc --all) - Install the kernel as outlined above:: + Afterwards install the kernel as outlined above:: command -v installkernel && sudo make modules_install install @@ -584,11 +589,11 @@ versions and individual commits at hand at any time:: curl -L \ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/clone.bundle \ -o linux-stable.git.bundle - git clone clone.bundle ~/linux/ + git clone linux-stable.git.bundle ~/linux/ rm linux-stable.git.bundle cd ~/linux/ - git remote set-url origin - https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git + git remote set-url origin \ + https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git git fetch origin git checkout --detach origin/master diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst index 466c560b0c304..4877563241f3b 100644 --- a/Documentation/admin-guide/sysctl/net.rst +++ b/Documentation/admin-guide/sysctl/net.rst @@ -386,8 +386,8 @@ Default : 0 (for compatibility reasons) txrehash -------- -Controls default hash rethink behaviour on listening socket when SO_TXREHASH -option is set to SOCK_TXREHASH_DEFAULT (i. e. not overridden by setsockopt). +Controls default hash rethink behaviour on socket when SO_TXREHASH option is set +to SOCK_TXREHASH_DEFAULT (i. e. not overridden by setsockopt). If set to 1 (default), hash rethink is performed on listening socket. If set to 0, hash rethink is not performed. diff --git a/Documentation/block/index.rst b/Documentation/block/index.rst index 102953166429b..9fea696f9daa0 100644 --- a/Documentation/block/index.rst +++ b/Documentation/block/index.rst @@ -18,7 +18,6 @@ Block kyber-iosched null_blk pr - request stat switching-sched writeback_cache_control diff --git a/Documentation/block/request.rst b/Documentation/block/request.rst deleted file mode 100644 index 747021e1ffdbd..0000000000000 --- a/Documentation/block/request.rst +++ /dev/null @@ -1,99 +0,0 @@ -============================ -struct request documentation -============================ - -Jens Axboe 27/05/02 - - -.. FIXME: - No idea about what does mean - seems just some noise, so comment it - - 1.0 - Index - - 2.0 Struct request members classification - - 2.1 struct request members explanation - - 3.0 - - - 2.0 - - - -Short explanation of request members -==================================== - -Classification flags: - - = ==================== - D driver member - B block layer member - I I/O scheduler member - = ==================== - -Unless an entry contains a D classification, a device driver must not access -this member. Some members may contain D classifications, but should only be -access through certain macros or functions (eg ->flags). - - - -=============================== ======= ======================================= -Member Flag Comment -=============================== ======= ======================================= -struct list_head queuelist BI Organization on various internal - queues - -``void *elevator_private`` I I/O scheduler private data - -unsigned char cmd[16] D Driver can use this for setting up - a cdb before execution, see - blk_queue_prep_rq - -unsigned long flags DBI Contains info about data direction, - request type, etc. - -int rq_status D Request status bits - -kdev_t rq_dev DBI Target device - -int errors DB Error counts - -sector_t sector DBI Target location - -unsigned long hard_nr_sectors B Used to keep sector sane - -unsigned long nr_sectors DBI Total number of sectors in request - -unsigned long hard_nr_sectors B Used to keep nr_sectors sane - -unsigned short nr_phys_segments DB Number of physical scatter gather - segments in a request - -unsigned short nr_hw_segments DB Number of hardware scatter gather - segments in a request - -unsigned int current_nr_sectors DB Number of sectors in first segment - of request - -unsigned int hard_cur_sectors B Used to keep current_nr_sectors sane - -int tag DB TCQ tag, if assigned - -``void *special`` D Free to be used by driver - -``char *buffer`` D Map of first segment, also see - section on bouncing SECTION - -``struct completion *waiting`` D Can be used by driver to get signalled - on request completion - -``struct bio *bio`` DBI First bio in request - -``struct bio *biotail`` DBI Last bio in request - -``struct request_queue *q`` DB Request queue this request belongs to - -``struct request_list *rl`` B Request list this request came from -=============================== ======= ======================================= diff --git a/Documentation/cdrom/index.rst b/Documentation/cdrom/index.rst index e87a8785bc1a7..e9b022d709395 100644 --- a/Documentation/cdrom/index.rst +++ b/Documentation/cdrom/index.rst @@ -1,8 +1,8 @@ .. SPDX-License-Identifier: GPL-2.0 -===== -cdrom -===== +====== +CD-ROM +====== .. toctree:: :maxdepth: 1 diff --git a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml index 9b31f864e071e..71364c6081ff5 100644 --- a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml +++ b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml @@ -32,7 +32,7 @@ properties: maxItems: 1 iommus: - maxItems: 1 + maxItems: 4 power-domains: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml index e6c1ebfe8a324..130e16d025bcf 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml @@ -82,6 +82,18 @@ properties: Indicates if the DSI controller is driving a panel which needs 2 DSI links. + qcom,master-dsi: + type: boolean + description: | + Indicates if the DSI controller is the master DSI controller when + qcom,dual-dsi-mode enabled. + + qcom,sync-dual-dsi: + type: boolean + description: | + Indicates if the DSI controller needs to sync the other DSI controller + with MIPI DCS commands when qcom,dual-dsi-mode enabled. + assigned-clocks: minItems: 2 maxItems: 4 diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml index 8b389314c3526..e2ffe0a9c26b3 100644 --- a/Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml +++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml @@ -49,6 +49,7 @@ properties: properties: data-lanes: + minItems: 1 maxItems: 2 required: diff --git a/Documentation/devicetree/bindings/net/can/st,stm32-bxcan.yaml b/Documentation/devicetree/bindings/net/can/st,stm32-bxcan.yaml index 769fa5c27b76e..de1d4298893be 100644 --- a/Documentation/devicetree/bindings/net/can/st,stm32-bxcan.yaml +++ b/Documentation/devicetree/bindings/net/can/st,stm32-bxcan.yaml @@ -21,11 +21,22 @@ properties: st,can-primary: description: - Primary and secondary mode of the bxCAN peripheral is only relevant - if the chip has two CAN peripherals. In that case they share some - of the required logic. + Primary mode of the bxCAN peripheral is only relevant if the chip has + two CAN peripherals in dual CAN configuration. In that case they share + some of the required logic. + Not to be used if the peripheral is in single CAN configuration. To avoid misunderstandings, it should be noted that ST documentation - uses the terms master/slave instead of primary/secondary. + uses the terms master instead of primary. + type: boolean + + st,can-secondary: + description: + Secondary mode of the bxCAN peripheral is only relevant if the chip + has two CAN peripherals in dual CAN configuration. In that case they + share some of the required logic. + Not to be used if the peripheral is in single CAN configuration. + To avoid misunderstandings, it should be noted that ST documentation + uses the terms slave instead of secondary. type: boolean reg: diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml index 9a64ed6587454..4d5f5cc6d031e 100644 --- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml +++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml @@ -12,10 +12,6 @@ description: cs_sck_delay of 500ns. Ensuring that this SPI timing requirement is observed depends on the SPI bus master driver. -allOf: - - $ref: dsa.yaml#/$defs/ethernet-ports - - $ref: /schemas/spi/spi-peripheral-props.yaml# - maintainers: - Vladimir Oltean @@ -36,6 +32,9 @@ properties: reg: maxItems: 1 + spi-cpha: true + spi-cpol: true + # Optional container node for the 2 internal MDIO buses of the SJA1110 # (one for the internal 100base-T1 PHYs and the other for the single # 100base-TX PHY). The "reg" property does not have physical significance. @@ -109,6 +108,30 @@ $defs: 1860, 1880, 1900, 1920, 1940, 1960, 1980, 2000, 2020, 2040, 2060, 2080, 2100, 2120, 2140, 2160, 2180, 2200, 2220, 2240, 2260] +allOf: + - $ref: dsa.yaml#/$defs/ethernet-ports + - $ref: /schemas/spi/spi-peripheral-props.yaml# + - if: + properties: + compatible: + enum: + - nxp,sja1105e + - nxp,sja1105p + - nxp,sja1105q + - nxp,sja1105r + - nxp,sja1105s + - nxp,sja1105t + then: + properties: + spi-cpol: false + required: + - spi-cpha + else: + properties: + spi-cpha: false + required: + - spi-cpol + unevaluatedProperties: false examples: @@ -120,6 +143,7 @@ examples: ethernet-switch@1 { reg = <0x1>; compatible = "nxp,sja1105t"; + spi-cpha; ethernet-ports { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-common.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-common.yaml index 9bff8ecb653c0..d91b639ae7ae7 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-common.yaml +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-common.yaml @@ -17,20 +17,11 @@ description: properties: clocks: minItems: 3 - items: - - description: PCIe bridge clock. - - description: PCIe bus clock. - - description: PCIe PHY clock. - - description: Additional required clock entry for imx6sx-pcie, - imx6sx-pcie-ep, imx8mq-pcie, imx8mq-pcie-ep. + maxItems: 4 clock-names: minItems: 3 - items: - - const: pcie - - const: pcie_bus - - enum: [ pcie_phy, pcie_aux ] - - enum: [ pcie_inbound_axi, pcie_aux ] + maxItems: 4 num-lanes: const: 1 diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml index f4a328ec1daa6..ee155ed5f1811 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml @@ -31,6 +31,19 @@ properties: - const: dbi - const: addr_space + clocks: + minItems: 3 + items: + - description: PCIe bridge clock. + - description: PCIe bus clock. + - description: PCIe PHY clock. + - description: Additional required clock entry for imx6sx-pcie, + imx6sx-pcie-ep, imx8mq-pcie, imx8mq-pcie-ep. + + clock-names: + minItems: 3 + maxItems: 4 + interrupts: items: - description: builtin eDMA interrupter. @@ -49,6 +62,31 @@ required: allOf: - $ref: /schemas/pci/snps,dw-pcie-ep.yaml# - $ref: /schemas/pci/fsl,imx6q-pcie-common.yaml# + - if: + properties: + compatible: + enum: + - fsl,imx8mq-pcie-ep + then: + properties: + clocks: + minItems: 4 + clock-names: + items: + - const: pcie + - const: pcie_bus + - const: pcie_phy + - const: pcie_aux + else: + properties: + clocks: + maxItems: 3 + clock-names: + items: + - const: pcie + - const: pcie_bus + - const: pcie_aux + unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml index 2443641754d3f..81bbb8728f0f9 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml @@ -40,6 +40,19 @@ properties: - const: dbi - const: config + clocks: + minItems: 3 + items: + - description: PCIe bridge clock. + - description: PCIe bus clock. + - description: PCIe PHY clock. + - description: Additional required clock entry for imx6sx-pcie, + imx6sx-pcie-ep, imx8mq-pcie, imx8mq-pcie-ep. + + clock-names: + minItems: 3 + maxItems: 4 + interrupts: items: - description: builtin MSI controller. @@ -77,6 +90,70 @@ required: allOf: - $ref: /schemas/pci/snps,dw-pcie.yaml# - $ref: /schemas/pci/fsl,imx6q-pcie-common.yaml# + - if: + properties: + compatible: + enum: + - fsl,imx6sx-pcie + then: + properties: + clocks: + minItems: 4 + clock-names: + items: + - const: pcie + - const: pcie_bus + - const: pcie_phy + - const: pcie_inbound_axi + + - if: + properties: + compatible: + enum: + - fsl,imx8mq-pcie + then: + properties: + clocks: + minItems: 4 + clock-names: + items: + - const: pcie + - const: pcie_bus + - const: pcie_phy + - const: pcie_aux + + - if: + properties: + compatible: + enum: + - fsl,imx6q-pcie + - fsl,imx6qp-pcie + - fsl,imx7d-pcie + then: + properties: + clocks: + maxItems: 3 + clock-names: + items: + - const: pcie + - const: pcie_bus + - const: pcie_phy + + - if: + properties: + compatible: + enum: + - fsl,imx8mm-pcie + - fsl,imx8mp-pcie + then: + properties: + clocks: + maxItems: 3 + clock-names: + items: + - const: pcie + - const: pcie_bus + - const: pcie_aux unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/sound/tas2562.yaml b/Documentation/devicetree/bindings/sound/tas2562.yaml index a5bb561bfcfba..31a3024ea7898 100644 --- a/Documentation/devicetree/bindings/sound/tas2562.yaml +++ b/Documentation/devicetree/bindings/sound/tas2562.yaml @@ -55,7 +55,9 @@ properties: description: TDM TX current sense time slot. '#sound-dai-cells': - const: 1 + # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward + # compatibility but is deprecated. + enum: [0, 1] required: - compatible @@ -72,7 +74,7 @@ examples: codec: codec@4c { compatible = "ti,tas2562"; reg = <0x4c>; - #sound-dai-cells = <1>; + #sound-dai-cells = <0>; interrupt-parent = <&gpio1>; interrupts = <14>; shutdown-gpios = <&gpio1 15 0>; diff --git a/Documentation/devicetree/bindings/sound/tas2770.yaml b/Documentation/devicetree/bindings/sound/tas2770.yaml index 26088adb9dc24..8908bf1122e96 100644 --- a/Documentation/devicetree/bindings/sound/tas2770.yaml +++ b/Documentation/devicetree/bindings/sound/tas2770.yaml @@ -57,7 +57,9 @@ properties: - 1 # Falling edge '#sound-dai-cells': - const: 1 + # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward + # compatibility but is deprecated. + enum: [0, 1] required: - compatible @@ -74,7 +76,7 @@ examples: codec: codec@41 { compatible = "ti,tas2770"; reg = <0x41>; - #sound-dai-cells = <1>; + #sound-dai-cells = <0>; interrupt-parent = <&gpio1>; interrupts = <14>; reset-gpio = <&gpio1 15 0>; diff --git a/Documentation/devicetree/bindings/sound/tas27xx.yaml b/Documentation/devicetree/bindings/sound/tas27xx.yaml index 8cba01316855d..a876545ec87d8 100644 --- a/Documentation/devicetree/bindings/sound/tas27xx.yaml +++ b/Documentation/devicetree/bindings/sound/tas27xx.yaml @@ -50,7 +50,9 @@ properties: description: TDM TX voltage sense time slot. '#sound-dai-cells': - const: 1 + # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward + # compatibility but is deprecated. + enum: [0, 1] required: - compatible @@ -67,7 +69,7 @@ examples: codec: codec@38 { compatible = "ti,tas2764"; reg = <0x38>; - #sound-dai-cells = <1>; + #sound-dai-cells = <0>; interrupt-parent = <&gpio1>; interrupts = <14>; reset-gpios = <&gpio1 15 0>; diff --git a/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt b/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt index f59125bc79d1b..0b4e21bde5bc1 100644 --- a/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt +++ b/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt @@ -8,7 +8,7 @@ Required properties: "ti,tlv320aic32x6" TLV320AIC3206, TLV320AIC3256 "ti,tas2505" TAS2505, TAS2521 - reg: I2C slave address - - supply-*: Required supply regulators are: + - *-supply: Required supply regulators are: "iov" - digital IO power supply "ldoin" - LDO power supply "dv" - Digital core power supply diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.rst b/Documentation/filesystems/ramfs-rootfs-initramfs.rst index 164960631925d..447f767c64627 100644 --- a/Documentation/filesystems/ramfs-rootfs-initramfs.rst +++ b/Documentation/filesystems/ramfs-rootfs-initramfs.rst @@ -6,8 +6,7 @@ Ramfs, rootfs and initramfs October 17, 2005 -Rob Landley -============================= +:Author: Rob Landley What is ramfs? -------------- diff --git a/Documentation/filesystems/sharedsubtree.rst b/Documentation/filesystems/sharedsubtree.rst index d83395354250d..1cf56489ed484 100644 --- a/Documentation/filesystems/sharedsubtree.rst +++ b/Documentation/filesystems/sharedsubtree.rst @@ -147,6 +147,7 @@ replicas continue to be exactly same. 3) Setting mount states +----------------------- The mount command (util-linux package) can be used to set mount states:: @@ -612,6 +613,7 @@ replicas continue to be exactly same. 6) Quiz +------- A. What is the result of the following command sequence? @@ -673,6 +675,7 @@ replicas continue to be exactly same. /mnt/1/test be? 7) FAQ +------ Q1. Why is bind mount needed? How is it different from symbolic links? symbolic links can get stale if the destination mount gets @@ -841,6 +844,7 @@ replicas continue to be exactly same. tmp usr tmp usr tmp usr 8) Implementation +----------------- 8A) Datastructure diff --git a/Documentation/fpga/index.rst b/Documentation/fpga/index.rst index f80f95667ca21..43c968871d995 100644 --- a/Documentation/fpga/index.rst +++ b/Documentation/fpga/index.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 ==== -fpga +FPGA ==== .. toctree:: diff --git a/Documentation/locking/index.rst b/Documentation/locking/index.rst index 7003bd5aeff4c..6a9ea96c8bcb7 100644 --- a/Documentation/locking/index.rst +++ b/Documentation/locking/index.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 ======= -locking +Locking ======= .. toctree:: diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml index b33541a51d6bb..ac4350498f5e9 100644 --- a/Documentation/netlink/genetlink-legacy.yaml +++ b/Documentation/netlink/genetlink-legacy.yaml @@ -122,6 +122,14 @@ properties: enum: [ u8, u16, u32, u64, s8, s16, s32, s64, string ] len: $ref: '#/$defs/len-or-define' + byte-order: + enum: [ little-endian, big-endian ] + doc: + description: Documentation for the struct member attribute. + type: string + enum: + description: Name of the enum type used for the attribute. + type: string # End genetlink-legacy attribute-sets: diff --git a/Documentation/netlink/specs/ovs_flow.yaml b/Documentation/netlink/specs/ovs_flow.yaml new file mode 100644 index 0000000000000..3b0624c870740 --- /dev/null +++ b/Documentation/netlink/specs/ovs_flow.yaml @@ -0,0 +1,831 @@ +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) + +name: ovs_flow +version: 1 +protocol: genetlink-legacy + +doc: + OVS flow configuration over generic netlink. + +definitions: + - + name: ovs-header + type: struct + doc: | + Header for OVS Generic Netlink messages. + members: + - + name: dp-ifindex + type: u32 + doc: | + ifindex of local port for datapath (0 to make a request not specific + to a datapath). + - + name: ovs-flow-stats + type: struct + members: + - + name: n-packets + type: u64 + doc: Number of matched packets. + - + name: n-bytes + type: u64 + doc: Number of matched bytes. + - + name: ovs-key-mpls + type: struct + members: + - + name: mpls-lse + type: u32 + byte-order: big-endian + - + name: ovs-key-ipv4 + type: struct + members: + - + name: ipv4-src + type: u32 + byte-order: big-endian + - + name: ipv4-dst + type: u32 + byte-order: big-endian + - + name: ipv4-proto + type: u8 + - + name: ipv4-tos + type: u8 + - + name: ipv4-ttl + type: u8 + - + name: ipv4-frag + type: u8 + enum: ovs-frag-type + - + name: ovs-frag-type + type: enum + entries: + - + name: none + doc: Packet is not a fragment. + - + name: first + doc: Packet is a fragment with offset 0. + - + name: later + doc: Packet is a fragment with nonzero offset. + - + name: any + value: 255 + - + name: ovs-key-tcp + type: struct + members: + - + name: tcp-src + type: u16 + byte-order: big-endian + - + name: tcp-dst + type: u16 + byte-order: big-endian + - + name: ovs-key-udp + type: struct + members: + - + name: udp-src + type: u16 + byte-order: big-endian + - + name: udp-dst + type: u16 + byte-order: big-endian + - + name: ovs-key-sctp + type: struct + members: + - + name: sctp-src + type: u16 + byte-order: big-endian + - + name: sctp-dst + type: u16 + byte-order: big-endian + - + name: ovs-key-icmp + type: struct + members: + - + name: icmp-type + type: u8 + - + name: icmp-code + type: u8 + - + name: ovs-key-ct-tuple-ipv4 + type: struct + members: + - + name: ipv4-src + type: u32 + byte-order: big-endian + - + name: ipv4-dst + type: u32 + byte-order: big-endian + - + name: src-port + type: u16 + byte-order: big-endian + - + name: dst-port + type: u16 + byte-order: big-endian + - + name: ipv4-proto + type: u8 + - + name: ovs-action-push-vlan + type: struct + members: + - + name: vlan_tpid + type: u16 + byte-order: big-endian + doc: Tag protocol identifier (TPID) to push. + - + name: vlan_tci + type: u16 + byte-order: big-endian + doc: Tag control identifier (TCI) to push. + - + name: ovs-ufid-flags + type: flags + entries: + - omit-key + - omit-mask + - omit-actions + - + name: ovs-action-hash + type: struct + members: + - + name: hash-algorithm + type: u32 + doc: Algorithm used to compute hash prior to recirculation. + - + name: hash-basis + type: u32 + doc: Basis used for computing hash. + - + name: ovs-hash-alg + type: enum + doc: | + Data path hash algorithm for computing Datapath hash. The algorithm type only specifies + the fields in a flow will be used as part of the hash. Each datapath is free to use its + own hash algorithm. The hash value will be opaque to the user space daemon. + entries: + - ovs-hash-alg-l4 + + - + name: ovs-action-push-mpls + type: struct + members: + - + name: lse + type: u32 + byte-order: big-endian + doc: | + MPLS label stack entry to push + - + name: ethertype + type: u32 + byte-order: big-endian + doc: | + Ethertype to set in the encapsulating ethernet frame. The only values + ethertype should ever be given are ETH_P_MPLS_UC and ETH_P_MPLS_MC, + indicating MPLS unicast or multicast. Other are rejected. + - + name: ovs-action-add-mpls + type: struct + members: + - + name: lse + type: u32 + byte-order: big-endian + doc: | + MPLS label stack entry to push + - + name: ethertype + type: u32 + byte-order: big-endian + doc: | + Ethertype to set in the encapsulating ethernet frame. The only values + ethertype should ever be given are ETH_P_MPLS_UC and ETH_P_MPLS_MC, + indicating MPLS unicast or multicast. Other are rejected. + - + name: tun-flags + type: u16 + doc: | + MPLS tunnel attributes. + - + name: ct-state-flags + type: flags + entries: + - + name: new + doc: Beginning of a new connection. + - + name: established + doc: Part of an existing connenction + - + name: related + doc: Related to an existing connection. + - + name: reply-dir + doc: Flow is in the reply direction. + - + name: invalid + doc: Could not track the connection. + - + name: tracked + doc: Conntrack has occurred. + - + name: src-nat + doc: Packet's source address/port was mangled by NAT. + - + name: dst-nat + doc: Packet's destination address/port was mangled by NAT. + +attribute-sets: + - + name: flow-attrs + attributes: + - + name: key + type: nest + nested-attributes: key-attrs + doc: | + Nested attributes specifying the flow key. Always present in + notifications. Required for all requests (except dumps). + - + name: actions + type: nest + nested-attributes: action-attrs + doc: | + Nested attributes specifying the actions to take for packets that + match the key. Always present in notifications. Required for + OVS_FLOW_CMD_NEW requests, optional for OVS_FLOW_CMD_SET requests. An + OVS_FLOW_CMD_SET without OVS_FLOW_ATTR_ACTIONS will not modify the + actions. To clear the actions, an OVS_FLOW_ATTR_ACTIONS without any + nested attributes must be given. + - + name: stats + type: binary + struct: ovs-flow-stats + doc: | + Statistics for this flow. Present in notifications if the stats would + be nonzero. Ignored in requests. + - + name: tcp-flags + type: u8 + doc: | + An 8-bit value giving the ORed value of all of the TCP flags seen on + packets in this flow. Only present in notifications for TCP flows, and + only if it would be nonzero. Ignored in requests. + - + name: used + type: u64 + doc: | + A 64-bit integer giving the time, in milliseconds on the system + monotonic clock, at which a packet was last processed for this + flow. Only present in notifications if a packet has been processed for + this flow. Ignored in requests. + - + name: clear + type: flag + doc: | + If present in a OVS_FLOW_CMD_SET request, clears the last-used time, + accumulated TCP flags, and statistics for this flow. Otherwise + ignored in requests. Never present in notifications. + - + name: mask + type: nest + nested-attributes: key-attrs + doc: | + Nested attributes specifying the mask bits for wildcarded flow + match. Mask bit value '1' specifies exact match with corresponding + flow key bit, while mask bit value '0' specifies a wildcarded + match. Omitting attribute is treated as wildcarding all corresponding + fields. Optional for all requests. If not present, all flow key bits + are exact match bits. + - + name: probe + type: binary + doc: | + Flow operation is a feature probe, error logging should be suppressed. + - + name: ufid + type: binary + doc: | + A value between 1-16 octets specifying a unique identifier for the + flow. Causes the flow to be indexed by this value rather than the + value of the OVS_FLOW_ATTR_KEY attribute. Optional for all + requests. Present in notifications if the flow was created with this + attribute. + - + name: ufid-flags + type: u32 + enum: ovs-ufid-flags + doc: | + A 32-bit value of ORed flags that provide alternative semantics for + flow installation and retrieval. Optional for all requests. + - + name: pad + type: binary + + - + name: key-attrs + attributes: + - + name: encap + type: nest + nested-attributes: key-attrs + - + name: priority + type: u32 + - + name: in-port + type: u32 + - + name: ethernet + type: binary + doc: struct ovs_key_ethernet + - + name: vlan + type: u16 + byte-order: big-endian + - + name: ethertype + type: u16 + byte-order: big-endian + - + name: ipv4 + type: binary + struct: ovs-key-ipv4 + - + name: ipv6 + type: binary + doc: struct ovs_key_ipv6 + - + name: tcp + type: binary + struct: ovs-key-tcp + - + name: udp + type: binary + struct: ovs-key-udp + - + name: icmp + type: binary + struct: ovs-key-icmp + - + name: icmpv6 + type: binary + struct: ovs-key-icmp + - + name: arp + type: binary + doc: struct ovs_key_arp + - + name: nd + type: binary + doc: struct ovs_key_nd + - + name: skb-mark + type: u32 + - + name: tunnel + type: nest + nested-attributes: tunnel-key-attrs + - + name: sctp + type: binary + struct: ovs-key-sctp + - + name: tcp-flags + type: u16 + byte-order: big-endian + - + name: dp-hash + type: u32 + doc: Value 0 indicates the hash is not computed by the datapath. + - + name: recirc-id + type: u32 + - + name: mpls + type: binary + struct: ovs-key-mpls + - + name: ct-state + type: u32 + enum: ct-state-flags + enum-as-flags: true + - + name: ct-zone + type: u16 + doc: connection tracking zone + - + name: ct-mark + type: u32 + doc: connection tracking mark + - + name: ct-labels + type: binary + doc: 16-octet connection tracking label + - + name: ct-orig-tuple-ipv4 + type: binary + struct: ovs-key-ct-tuple-ipv4 + - + name: ct-orig-tuple-ipv6 + type: binary + doc: struct ovs_key_ct_tuple_ipv6 + - + name: nsh + type: nest + nested-attributes: ovs-nsh-key-attrs + - + name: packet-type + type: u32 + byte-order: big-endian + doc: Should not be sent to the kernel + - + name: nd-extensions + type: binary + doc: Should not be sent to the kernel + - + name: tunnel-info + type: binary + doc: struct ip_tunnel_info + - + name: ipv6-exthdrs + type: binary + doc: struct ovs_key_ipv6_exthdr + - + name: action-attrs + attributes: + - + name: output + type: u32 + doc: ovs port number in datapath + - + name: userspace + type: nest + nested-attributes: userspace-attrs + - + name: set + type: nest + nested-attributes: key-attrs + doc: Replaces the contents of an existing header. The single nested attribute specifies a header to modify and its value. + - + name: push-vlan + type: binary + struct: ovs-action-push-vlan + doc: Push a new outermost 802.1Q or 802.1ad header onto the packet. + - + name: pop-vlan + type: flag + doc: Pop the outermost 802.1Q or 802.1ad header from the packet. + - + name: sample + type: nest + nested-attributes: sample-attrs + doc: | + Probabilistically executes actions, as specified in the nested attributes. + - + name: recirc + type: u32 + doc: recirc id + - + name: hash + type: binary + struct: ovs-action-hash + - + name: push-mpls + type: binary + struct: ovs-action-push-mpls + doc: | + Push a new MPLS label stack entry onto the top of the packets MPLS + label stack. Set the ethertype of the encapsulating frame to either + ETH_P_MPLS_UC or ETH_P_MPLS_MC to indicate the new packet contents. + - + name: pop-mpls + type: u16 + byte-order: big-endian + doc: ethertype + - + name: set-masked + type: nest + nested-attributes: key-attrs + doc: | + Replaces the contents of an existing header. A nested attribute + specifies a header to modify, its value, and a mask. For every bit set + in the mask, the corresponding bit value is copied from the value to + the packet header field, rest of the bits are left unchanged. The + non-masked value bits must be passed in as zeroes. Masking is not + supported for the OVS_KEY_ATTR_TUNNEL attribute. + - + name: ct + type: nest + nested-attributes: ct-attrs + doc: | + Track the connection. Populate the conntrack-related entries + in the flow key. + - + name: trunc + type: u32 + doc: struct ovs_action_trunc is a u32 max length + - + name: push-eth + type: binary + doc: struct ovs_action_push_eth + - + name: pop-eth + type: flag + - + name: ct-clear + type: flag + - + name: push-nsh + type: nest + nested-attributes: ovs-nsh-key-attrs + doc: | + Push NSH header to the packet. + - + name: pop-nsh + type: flag + doc: | + Pop the outermost NSH header off the packet. + - + name: meter + type: u32 + doc: | + Run packet through a meter, which may drop the packet, or modify the + packet (e.g., change the DSCP field) + - + name: clone + type: nest + nested-attributes: action-attrs + doc: | + Make a copy of the packet and execute a list of actions without + affecting the original packet and key. + - + name: check-pkt-len + type: nest + nested-attributes: check-pkt-len-attrs + doc: | + Check the packet length and execute a set of actions if greater than + the specified packet length, else execute another set of actions. + - + name: add-mpls + type: binary + struct: ovs-action-add-mpls + doc: | + Push a new MPLS label stack entry at the start of the packet or at the + start of the l3 header depending on the value of l3 tunnel flag in the + tun_flags field of this OVS_ACTION_ATTR_ADD_MPLS argument. + - + name: dec-ttl + type: nest + nested-attributes: dec-ttl-attrs + - + name: tunnel-key-attrs + attributes: + - + name: id + type: u64 + byte-order: big-endian + value: 0 + - + name: ipv4-src + type: u32 + byte-order: big-endian + - + name: ipv4-dst + type: u32 + byte-order: big-endian + - + name: tos + type: u8 + - + name: ttl + type: u8 + - + name: dont-fragment + type: flag + - + name: csum + type: flag + - + name: oam + type: flag + - + name: geneve-opts + type: binary + sub-type: u32 + - + name: tp-src + type: u16 + byte-order: big-endian + - + name: tp-dst + type: u16 + byte-order: big-endian + - + name: vxlan-opts + type: nest + nested-attributes: vxlan-ext-attrs + - + name: ipv6-src + type: binary + doc: | + struct in6_addr source IPv6 address + - + name: ipv6-dst + type: binary + doc: | + struct in6_addr destination IPv6 address + - + name: pad + type: binary + - + name: erspan-opts + type: binary + doc: | + struct erspan_metadata + - + name: ipv4-info-bridge + type: flag + - + name: check-pkt-len-attrs + attributes: + - + name: pkt-len + type: u16 + - + name: actions-if-greater + type: nest + nested-attributes: action-attrs + - + name: actions-if-less-equal + type: nest + nested-attributes: action-attrs + - + name: sample-attrs + attributes: + - + name: probability + type: u32 + - + name: actions + type: nest + nested-attributes: action-attrs + - + name: userspace-attrs + attributes: + - + name: pid + type: u32 + - + name: userdata + type: binary + - + name: egress-tun-port + type: u32 + - + name: actions + type: flag + - + name: ovs-nsh-key-attrs + attributes: + - + name: base + type: binary + - + name: md1 + type: binary + - + name: md2 + type: binary + - + name: ct-attrs + attributes: + - + name: commit + type: flag + - + name: zone + type: u16 + - + name: mark + type: binary + - + name: labels + type: binary + - + name: helper + type: string + - + name: nat + type: nest + nested-attributes: nat-attrs + - + name: force-commit + type: flag + - + name: eventmask + type: u32 + - + name: timeout + type: string + - + name: nat-attrs + attributes: + - + name: src + type: binary + - + name: dst + type: binary + - + name: ip-min + type: binary + - + name: ip-max + type: binary + - + name: proto-min + type: binary + - + name: proto-max + type: binary + - + name: persistent + type: binary + - + name: proto-hash + type: binary + - + name: proto-random + type: binary + - + name: dec-ttl-attrs + attributes: + - + name: action + type: nest + nested-attributes: action-attrs + - + name: vxlan-ext-attrs + attributes: + - + name: gbp + type: u32 + +operations: + fixed-header: ovs-header + list: + - + name: flow-get + doc: Get / dump OVS flow configuration and state + value: 3 + attribute-set: flow-attrs + do: &flow-get-op + request: + attributes: + - dp-ifindex + - key + - ufid + - ufid-flags + reply: + attributes: + - dp-ifindex + - key + - ufid + - mask + - stats + - actions + dump: *flow-get-op + +mcast-groups: + list: + - + name: ovs_flow diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst index 69695e5511f42..e4d065c55ea8d 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst @@ -84,24 +84,6 @@ Once the VM shuts down, or otherwise releases the VF, the command will complete. -Important notes for SR-IOV and Link Aggregation ------------------------------------------------ -Link Aggregation is mutually exclusive with SR-IOV. - -- If Link Aggregation is active, SR-IOV VFs cannot be created on the PF. -- If SR-IOV is active, you cannot set up Link Aggregation on the interface. - -Bridging and MACVLAN are also affected by this. If you wish to use bridging or -MACVLAN with SR-IOV, you must set up bridging or MACVLAN before enabling -SR-IOV. If you are using bridging or MACVLAN in conjunction with SR-IOV, and -you want to remove the interface from the bridge or MACVLAN, you must follow -these steps: - -1. Destroy SR-IOV VFs if they exist -2. Remove the interface from the bridge or MACVLAN -3. Recreate SRIOV VFs as needed - - Additional Features and Configurations ====================================== diff --git a/Documentation/pcmcia/index.rst b/Documentation/pcmcia/index.rst index 7ae1f62fca14f..8067236c51ab4 100644 --- a/Documentation/pcmcia/index.rst +++ b/Documentation/pcmcia/index.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 ====== -pcmcia +PCMCIA ====== .. toctree:: diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst index f73ac9e175a8d..83614cec9328b 100644 --- a/Documentation/process/maintainer-netdev.rst +++ b/Documentation/process/maintainer-netdev.rst @@ -127,13 +127,32 @@ the value of ``Message-ID`` to the URL above. Updating patch status ~~~~~~~~~~~~~~~~~~~~~ -It may be tempting to help the maintainers and update the state of your -own patches when you post a new version or spot a bug. Please **do not** -do that. -Interfering with the patch status on patchwork will only cause confusion. Leave -it to the maintainer to figure out what is the most recent and current -version that should be applied. If there is any doubt, the maintainer -will reply and ask what should be done. +Contributors and reviewers do not have the permissions to update patch +state directly in patchwork. Patchwork doesn't expose much information +about the history of the state of patches, therefore having multiple +people update the state leads to confusion. + +Instead of delegating patchwork permissions netdev uses a simple mail +bot which looks for special commands/lines within the emails sent to +the mailing list. For example to mark a series as Changes Requested +one needs to send the following line anywhere in the email thread:: + + pw-bot: changes-requested + +As a result the bot will set the entire series to Changes Requested. +This may be useful when author discovers a bug in their own series +and wants to prevent it from getting applied. + +The use of the bot is entirely optional, if in doubt ignore its existence +completely. Maintainers will classify and update the state of the patches +themselves. No email should ever be sent to the list with the main purpose +of communicating with the bot, the bot commands should be seen as metadata. + +The use of the bot is restricted to authors of the patches (the ``From:`` +header on patch submission and command must match!), maintainers themselves +and a handful of senior reviewers. Bot records its activity here: + + https://patchwork.hopto.org/pw-bot.html Review timelines ~~~~~~~~~~~~~~~~ diff --git a/Documentation/s390/vfio-ap.rst b/Documentation/s390/vfio-ap.rst index d46e98c7c1ec6..bb3f4c4e28856 100644 --- a/Documentation/s390/vfio-ap.rst +++ b/Documentation/s390/vfio-ap.rst @@ -551,7 +551,6 @@ These are the steps: * IOMMU_SUPPORT * S390 * ZCRYPT - * S390_AP_IOMMU * VFIO * KVM diff --git a/Documentation/staging/crc32.rst b/Documentation/staging/crc32.rst index 8a6860f33b4e7..7542220967cb4 100644 --- a/Documentation/staging/crc32.rst +++ b/Documentation/staging/crc32.rst @@ -1,5 +1,5 @@ ================================= -brief tutorial on CRC computation +Brief tutorial on CRC computation ================================= A CRC is a long-division remainder. You add the CRC to the message, diff --git a/Documentation/timers/index.rst b/Documentation/timers/index.rst index df510ad0c989b..983f91f8f023e 100644 --- a/Documentation/timers/index.rst +++ b/Documentation/timers/index.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 ====== -timers +Timers ====== .. toctree:: diff --git a/MAINTAINERS b/MAINTAINERS index 5ced63fcbfb9b..5c301e22ff741 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1677,10 +1677,7 @@ F: drivers/power/reset/arm-versatile-reboot.c F: drivers/soc/versatile/ ARM KOMEDA DRM-KMS DRIVER -M: James (Qian) Wang M: Liviu Dudau -M: Mihail Atanassov -L: Mali DP Maintainers S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/arm,komeda.yaml @@ -1701,8 +1698,6 @@ F: include/uapi/drm/panfrost_drm.h ARM MALI-DP DRM DRIVER M: Liviu Dudau -M: Brian Starkey -L: Mali DP Maintainers S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/arm,malidp.yaml @@ -1970,7 +1965,7 @@ F: Documentation/devicetree/bindings/nvmem/apple,efuses.yaml F: Documentation/devicetree/bindings/pci/apple,pcie.yaml F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml F: Documentation/devicetree/bindings/power/apple* -F: Documentation/devicetree/bindings/pwm/pwm-apple.yaml +F: Documentation/devicetree/bindings/pwm/apple,s5l-fpwm.yaml F: Documentation/devicetree/bindings/watchdog/apple,wdt.yaml F: arch/arm64/boot/dts/apple/ F: drivers/bluetooth/hci_bcm4377.c @@ -4914,7 +4909,6 @@ F: drivers/media/cec/i2c/ch7322.c CIRRUS LOGIC AUDIO CODEC DRIVERS M: James Schulman M: David Rhodes -M: Lucas Tanure M: Richard Fitzgerald L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: patches@opensource.cirrus.com @@ -6012,7 +6006,7 @@ W: http://www.dialog-semiconductor.com/products F: Documentation/devicetree/bindings/input/da90??-onkey.txt F: Documentation/devicetree/bindings/input/dlg,da72??.txt F: Documentation/devicetree/bindings/mfd/da90*.txt -F: Documentation/devicetree/bindings/mfd/da90*.yaml +F: Documentation/devicetree/bindings/mfd/dlg,da90*.yaml F: Documentation/devicetree/bindings/regulator/da92*.txt F: Documentation/devicetree/bindings/regulator/dlg,da9*.yaml F: Documentation/devicetree/bindings/regulator/slg51000.txt @@ -6211,6 +6205,7 @@ X: Documentation/devicetree/ X: Documentation/driver-api/media/ X: Documentation/firmware-guide/acpi/ X: Documentation/i2c/ +X: Documentation/netlink/ X: Documentation/power/ X: Documentation/spi/ X: Documentation/userspace-api/media/ @@ -7966,6 +7961,12 @@ S: Maintained F: drivers/hwmon/f75375s.c F: include/linux/f75375s.h +FINTEK F81604 USB to 2xCANBUS DEVICE DRIVER +M: Ji-Ze Hong (Peter Hong) +L: linux-can@vger.kernel.org +S: Maintained +F: drivers/net/can/usb/f81604.c + FIREWIRE AUDIO DRIVERS and IEC 61883-1/6 PACKET STREAMING ENGINE M: Clemens Ladisch M: Takashi Sakamoto @@ -8158,6 +8159,7 @@ F: include/linux/spi/spi-fsl-dspi.h FREESCALE ENETC ETHERNET DRIVERS M: Claudiu Manoil +M: Vladimir Oltean L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/enetc/ @@ -10330,9 +10332,8 @@ M: Jesse Brandeburg M: Tony Nguyen L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers) S: Supported -W: http://www.intel.com/support/feedback.htm -W: http://e1000.sourceforge.net/ -Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/ +W: https://www.intel.com/content/www/us/en/support.html +Q: https://patchwork.ozlabs.org/project/intel-wired-lan/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue.git F: Documentation/networking/device_drivers/ethernet/intel/ @@ -14566,6 +14567,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git F: Documentation/devicetree/bindings/net/ F: drivers/connector/ F: drivers/net/ +X: drivers/net/wireless/ F: include/dt-bindings/net/ F: include/linux/etherdevice.h F: include/linux/fcdevice.h @@ -14615,6 +14617,7 @@ B: mailto:netdev@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git F: Documentation/core-api/netlink.rst +F: Documentation/netlink/ F: Documentation/networking/ F: Documentation/process/maintainer-netdev.rst F: Documentation/userspace-api/netlink/ @@ -14629,6 +14632,7 @@ F: include/uapi/linux/netdevice.h F: lib/net_utils.c F: lib/random32.c F: net/ +X: net/bluetooth/ F: tools/net/ F: tools/testing/selftests/net/ @@ -18575,10 +18579,9 @@ F: Documentation/admin-guide/LSM/SafeSetID.rst F: security/safesetid/ SAMSUNG AUDIO (ASoC) DRIVERS -M: Krzysztof Kozlowski M: Sylwester Nawrocki L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Supported +S: Maintained B: mailto:linux-samsung-soc@vger.kernel.org F: Documentation/devicetree/bindings/sound/samsung* F: sound/soc/samsung/ @@ -18706,7 +18709,6 @@ F: include/dt-bindings/clock/samsung,*.h F: include/linux/clk/samsung.h SAMSUNG SPI DRIVERS -M: Krzysztof Kozlowski M: Andi Shyti L: linux-spi@vger.kernel.org L: linux-samsung-soc@vger.kernel.org @@ -18845,12 +18847,11 @@ F: drivers/target/ F: include/target/ SCTP PROTOCOL -M: Neil Horman M: Marcelo Ricardo Leitner M: Xin Long L: linux-sctp@vger.kernel.org S: Maintained -W: http://lksctp.sourceforge.net +W: https://github.com/sctp/lksctp-tools/wiki F: Documentation/networking/sctp.rst F: include/linux/sctp.h F: include/net/sctp/ diff --git a/Makefile b/Makefile index 9d765ebcccf1e..3cd9bc2886d64 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 4 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc3 NAME = Hurr durr I'ma ninja sloth # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi index c9e05e3540d64..00bf53f99c294 100644 --- a/arch/arm/boot/dts/stm32f429.dtsi +++ b/arch/arm/boot/dts/stm32f429.dtsi @@ -387,6 +387,7 @@ interrupt-names = "tx", "rx0", "rx1", "sce"; resets = <&rcc STM32F4_APB1_RESET(CAN2)>; clocks = <&rcc 0 STM32F4_APB1_CLOCK(CAN2)>; + st,can-secondary; st,gcan = <&gcan>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi index c8e6c52fb248e..9f65403295ca0 100644 --- a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi +++ b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi @@ -283,6 +283,88 @@ slew-rate = <2>; }; }; + + can1_pins_a: can1-0 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + }; + }; + + can1_pins_b: can1-1 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + }; + }; + + can1_pins_c: can1-2 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + + }; + }; + + can1_pins_d: can1-3 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + + }; + }; + + can2_pins_a: can2-0 { + pins1 { + pinmux = ; /* CAN2_TX */ + }; + pins2 { + pinmux = ; /* CAN2_RX */ + bias-pull-up; + }; + }; + + can2_pins_b: can2-1 { + pins1 { + pinmux = ; /* CAN2_TX */ + }; + pins2 { + pinmux = ; /* CAN2_RX */ + bias-pull-up; + }; + }; + + can3_pins_a: can3-0 { + pins1 { + pinmux = ; /* CAN3_TX */ + }; + pins2 { + pinmux = ; /* CAN3_RX */ + bias-pull-up; + }; + }; + + can3_pins_b: can3-1 { + pins1 { + pinmux = ; /* CAN3_TX */ + }; + pins2 { + pinmux = ; /* CAN3_RX */ + bias-pull-up; + }; + }; }; }; }; diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h index 78d3d4b82c6c2..f4db3e75d75f0 100644 --- a/arch/arm/include/asm/arm_pmuv3.h +++ b/arch/arm/include/asm/arm_pmuv3.h @@ -92,7 +92,7 @@ #define RETURN_READ_PMEVCNTRN(n) \ return read_sysreg(PMEVCNTR##n) -static unsigned long read_pmevcntrn(int n) +static inline unsigned long read_pmevcntrn(int n) { PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); return 0; @@ -100,14 +100,14 @@ static unsigned long read_pmevcntrn(int n) #define WRITE_PMEVCNTRN(n) \ write_sysreg(val, PMEVCNTR##n) -static void write_pmevcntrn(int n, unsigned long val) +static inline void write_pmevcntrn(int n, unsigned long val) { PMEVN_SWITCH(n, WRITE_PMEVCNTRN); } #define WRITE_PMEVTYPERN(n) \ write_sysreg(val, PMEVTYPER##n) -static void write_pmevtypern(int n, unsigned long val) +static inline void write_pmevtypern(int n, unsigned long val) { PMEVN_SWITCH(n, WRITE_PMEVTYPERN); } diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 53be7ea6181b3..9d2192156087b 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -308,6 +308,29 @@ static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl, return URC_OK; } +static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl) +{ + unsigned long bytes = 0; + unsigned long insn; + unsigned long result = 0; + + /* + * unwind_get_byte() will advance `ctrl` one instruction at a time, so + * loop until we get an instruction byte where bit 7 is not set. + * + * Note: This decodes a maximum of 4 bytes to output 28 bits data where + * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence + * it is sufficient for unwinding the stack. + */ + do { + insn = unwind_get_byte(ctrl); + result |= (insn & 0x7f) << (bytes * 7); + bytes++; + } while (!!(insn & 0x80) && (bytes != sizeof(result))); + + return result; +} + /* * Execute the current unwind instruction. */ @@ -361,7 +384,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) if (ret) goto error; } else if (insn == 0xb2) { - unsigned long uleb128 = unwind_get_byte(ctrl); + unsigned long uleb128 = unwind_decode_uleb128(ctrl); ctrl->vrs[SP] += 0x204 + (uleb128 << 2); } else { diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c index 67f72ca984b2f..1956b095e699d 100644 --- a/arch/arm/mach-sa1100/jornada720_ssp.c +++ b/arch/arm/mach-sa1100/jornada720_ssp.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * arch/arm/mac-sa1100/jornada720_ssp.c * * Copyright (C) 2006/2007 Kristoffer Ericson @@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags; /** * jornada_ssp_reverse - reverses input byte + * @byte: input byte to reverse * * we need to reverse all data we receive from the mcu due to its physical location * returns : 01110111 -> 11101110 @@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse); /** * jornada_ssp_byte - waits for ready ssp bus and sends byte + * @byte: input byte to transmit * * waits for fifo buffer to clear and then transmits, if it doesn't then we will * timeout after rounds. Needs mcu running before its called. @@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte); /** * jornada_ssp_inout - decide if input is command or trading byte + * @byte: input byte to send (may be %TXDUMMY) * * returns : (jornada_ssp_byte(byte)) on success * : %-ETIMEDOUT on timeout failure diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index 7483ef8bccda3..62206ef250371 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -23,6 +23,9 @@ @ ENTRY(do_vfp) mov r1, r10 - mov r3, r9 - b vfp_entry + str lr, [sp, #-8]! + add r3, sp, #4 + str r9, [r3] + bl vfp_entry + ldr pc, [sp], #8 ENDPROC(do_vfp) diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 4d8478264d82b..a4610d0f32152 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -172,13 +172,14 @@ vfp_hw_state_valid: @ out before setting an FPEXC that @ stops us reading stuff VFPFMXR FPEXC, r1 @ Restore FPEXC last + mov sp, r3 @ we think we have handled things + pop {lr} sub r2, r2, #4 @ Retry current instruction - if Thumb str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, @ else it's one 32-bit instruction, so @ always subtract 4 from the following @ instruction address. - mov lr, r3 @ we think we have handled things local_bh_enable_and_ret: adr r0, . mov r1, #SOFTIRQ_DISABLE_OFFSET @@ -209,8 +210,9 @@ skip: process_exception: DBGSTR "bounce" + mov sp, r3 @ setup for a return to the user code. + pop {lr} mov r2, sp @ nothing stacked - regdump is at TOS - mov lr, r3 @ setup for a return to the user code. @ Now call the C code to package up the bounce to the support code @ r0 holds the trigger instruction diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h index d6b51deb7bf0f..18dc2fb3d7b7b 100644 --- a/arch/arm64/include/asm/arm_pmuv3.h +++ b/arch/arm64/include/asm/arm_pmuv3.h @@ -13,7 +13,7 @@ #define RETURN_READ_PMEVCNTRN(n) \ return read_sysreg(pmevcntr##n##_el0) -static unsigned long read_pmevcntrn(int n) +static inline unsigned long read_pmevcntrn(int n) { PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); return 0; @@ -21,14 +21,14 @@ static unsigned long read_pmevcntrn(int n) #define WRITE_PMEVCNTRN(n) \ write_sysreg(val, pmevcntr##n##_el0) -static void write_pmevcntrn(int n, unsigned long val) +static inline void write_pmevcntrn(int n, unsigned long val) { PMEVN_SWITCH(n, WRITE_PMEVCNTRN); } #define WRITE_PMEVTYPERN(n) \ write_sysreg(val, pmevtyper##n##_el0) -static void write_pmevtypern(int n, unsigned long val) +static inline void write_pmevtypern(int n, unsigned long val) { PMEVN_SWITCH(n, WRITE_PMEVTYPERN); } diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 683ca3af40848..5f6f84837a490 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -126,6 +126,10 @@ #define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029 #define APPLE_CPU_PART_M2_BLIZZARD 0x032 #define APPLE_CPU_PART_M2_AVALANCHE 0x033 +#define APPLE_CPU_PART_M2_BLIZZARD_PRO 0x034 +#define APPLE_CPU_PART_M2_AVALANCHE_PRO 0x035 +#define APPLE_CPU_PART_M2_BLIZZARD_MAX 0x038 +#define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039 #define AMPERE_CPU_PART_AMPERE1 0xAC3 @@ -181,6 +185,10 @@ #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX) #define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD) #define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE) +#define MIDR_APPLE_M2_BLIZZARD_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_PRO) +#define MIDR_APPLE_M2_AVALANCHE_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_PRO) +#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX) +#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX) #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 4cd6762bda805..dc3c072e862f1 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -209,6 +209,7 @@ struct kvm_pgtable_visit_ctx { kvm_pte_t old; void *arg; struct kvm_pgtable_mm_ops *mm_ops; + u64 start; u64 addr; u64 end; u32 level; diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index f5bcb0dc62673..7e89968bd2824 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -66,13 +66,10 @@ void mte_sync_tags(pte_t old_pte, pte_t pte) return; /* if PG_mte_tagged is set, tags have already been initialised */ - for (i = 0; i < nr_pages; i++, page++) { - if (!page_mte_tagged(page)) { + for (i = 0; i < nr_pages; i++, page++) + if (!page_mte_tagged(page)) mte_sync_page_tags(page, old_pte, check_swap, pte_is_tagged); - set_page_mte_tagged(page); - } - } /* ensure the tags are visible before the PTE is set */ smp_wmb(); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 0119dc91abb5d..d9e1355730ef5 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -288,7 +288,7 @@ static int aarch32_alloc_kuser_vdso_page(void) memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz); - aarch32_vectors_page = virt_to_page(vdso_page); + aarch32_vectors_page = virt_to_page((void *)vdso_page); return 0; } diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 1279949599b5f..4c9dcd8fc9395 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -81,26 +81,34 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) fpsimd_kvm_prepare(); + /* + * We will check TIF_FOREIGN_FPSTATE just before entering the + * guest in kvm_arch_vcpu_ctxflush_fp() and override this to + * FP_STATE_FREE if the flag set. + */ vcpu->arch.fp_state = FP_STATE_HOST_OWNED; vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) vcpu_set_flag(vcpu, HOST_SVE_ENABLED); - /* - * We don't currently support SME guests but if we leave - * things in streaming mode then when the guest starts running - * FPSIMD or SVE code it may generate SME traps so as a - * special case if we are in streaming mode we force the host - * state to be saved now and exit streaming mode so that we - * don't have to handle any SME traps for valid guest - * operations. Do this for ZA as well for now for simplicity. - */ if (system_supports_sme()) { vcpu_clear_flag(vcpu, HOST_SME_ENABLED); if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) vcpu_set_flag(vcpu, HOST_SME_ENABLED); + /* + * If PSTATE.SM is enabled then save any pending FP + * state and disable PSTATE.SM. If we leave PSTATE.SM + * enabled and the guest does not enable SME via + * CPACR_EL1.SMEN then operations that should be valid + * may generate SME traps from EL1 to EL1 which we + * can't intercept and which would confuse the guest. + * + * Do the same for PSTATE.ZA in the case where there + * is state in the registers which has not already + * been saved, this is very unlikely to happen. + */ if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) { vcpu->arch.fp_state = FP_STATE_FREE; fpsimd_save_and_flush_cpu_state(); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index c41166f1a1dd7..e78a08a72a3c9 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -177,9 +177,17 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) sve_guest = vcpu_has_sve(vcpu); esr_ec = kvm_vcpu_trap_get_class(vcpu); - /* Don't handle SVE traps for non-SVE vcpus here: */ - if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) + /* Only handle traps the vCPU can support here: */ + switch (esr_ec) { + case ESR_ELx_EC_FP_ASIMD: + break; + case ESR_ELx_EC_SVE: + if (!sve_guest) + return false; + break; + default: return false; + } /* Valid trap. Switch the context: */ diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 3d61bd3e591d2..5282cb9ca4cff 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -58,8 +58,9 @@ struct kvm_pgtable_walk_data { struct kvm_pgtable_walker *walker; + const u64 start; u64 addr; - u64 end; + const u64 end; }; static bool kvm_phys_is_valid(u64 phys) @@ -201,6 +202,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, .old = READ_ONCE(*ptep), .arg = data->walker->arg, .mm_ops = mm_ops, + .start = data->start, .addr = data->addr, .end = data->end, .level = level, @@ -293,6 +295,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker) { struct kvm_pgtable_walk_data walk_data = { + .start = ALIGN_DOWN(addr, PAGE_SIZE), .addr = ALIGN_DOWN(addr, PAGE_SIZE), .end = PAGE_ALIGN(walk_data.addr + size), .walker = walker, @@ -349,7 +352,7 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, } struct hyp_map_data { - u64 phys; + const u64 phys; kvm_pte_t attr; }; @@ -407,13 +410,12 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte) static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct hyp_map_data *data) { + u64 phys = data->phys + (ctx->addr - ctx->start); kvm_pte_t new; - u64 granule = kvm_granule_size(ctx->level), phys = data->phys; if (!kvm_block_mapping_supported(ctx, phys)) return false; - data->phys += granule; new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); if (ctx->old == new) return true; @@ -576,7 +578,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) } struct stage2_map_data { - u64 phys; + const u64 phys; kvm_pte_t attr; u8 owner_id; @@ -794,20 +796,43 @@ static bool stage2_pte_executable(kvm_pte_t pte) return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN); } +static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx, + const struct stage2_map_data *data) +{ + u64 phys = data->phys; + + /* + * Stage-2 walks to update ownership data are communicated to the map + * walker using an invalid PA. Avoid offsetting an already invalid PA, + * which could overflow and make the address valid again. + */ + if (!kvm_phys_is_valid(phys)) + return phys; + + /* + * Otherwise, work out the correct PA based on how far the walk has + * gotten. + */ + return phys + (ctx->addr - ctx->start); +} + static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data) { + u64 phys = stage2_map_walker_phys_addr(ctx, data); + if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1))) return false; - return kvm_block_mapping_supported(ctx, data->phys); + return kvm_block_mapping_supported(ctx, phys); } static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data) { kvm_pte_t new; - u64 granule = kvm_granule_size(ctx->level), phys = data->phys; + u64 phys = stage2_map_walker_phys_addr(ctx, data); + u64 granule = kvm_granule_size(ctx->level); struct kvm_pgtable *pgt = data->mmu->pgt; struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; @@ -841,8 +866,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, stage2_make_pte(ctx, new); - if (kvm_phys_is_valid(phys)) - data->phys += granule; return 0; } diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 64c3aec0d937c..0bd93a5f21ce3 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -204,7 +204,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu) * Size Fault at level 0, as if exceeding PARange. * * Non-LPAE guests will only get the external abort, as there - * is no way to to describe the ASF. + * is no way to describe the ASF. */ if (vcpu_el1_is_32bit(vcpu) && !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 469d816f356f3..93a47a515c137 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -616,6 +616,10 @@ static const struct midr_range broken_seis[] = { MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX), MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD), MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX), {}, }; diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index 08978d0672e7e..7fe8ba1a2851c 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -47,7 +47,7 @@ static void flush_context(void) int cpu; u64 vmid; - bitmap_clear(vmid_map, 0, NUM_USER_VMIDS); + bitmap_zero(vmid_map, NUM_USER_VMIDS); for_each_possible_cpu(cpu) { vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0); @@ -182,8 +182,7 @@ int __init kvm_arm_vmid_alloc_init(void) */ WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus()); atomic64_set(&vmid_generation, VMID_FIRST_VERSION); - vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS), - sizeof(*vmid_map), GFP_KERNEL); + vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL); if (!vmid_map) return -ENOMEM; @@ -192,5 +191,5 @@ int __init kvm_arm_vmid_alloc_init(void) void __init kvm_arm_vmid_alloc_free(void) { - kfree(vmid_map); + bitmap_free(vmid_map); } diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 4aadcfb017545..a7bb20055ce09 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -21,9 +21,10 @@ void copy_highpage(struct page *to, struct page *from) copy_page(kto, kfrom); + if (kasan_hw_tags_enabled()) + page_kasan_tag_reset(to); + if (system_supports_mte() && page_mte_tagged(from)) { - if (kasan_hw_tags_enabled()) - page_kasan_tag_reset(to); /* It's a new page, shouldn't have been tagged yet */ WARN_ON_ONCE(!try_page_mte_tagging(to)); mte_copy_page_tags(kto, kfrom); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9e0db5c387e3a..cb21ccd7940d0 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -480,8 +480,8 @@ static void do_bad_area(unsigned long far, unsigned long esr, } } -#define VM_FAULT_BADMAP 0x010000 -#define VM_FAULT_BADACCESS 0x020000 +#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) +#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags, diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index b9f6908a31bc3..ba468b5f3f0b6 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -858,11 +858,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs * } static inline void __user * -get_sigframe(struct ksignal *ksig, size_t frame_size) +get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size) { unsigned long usp = sigsp(rdusp(), ksig); + unsigned long gap = 0; - return (void __user *)((usp - frame_size) & -8UL); + if (CPU_IS_020_OR_030 && tregs->format == 0xb) { + /* USP is unreliable so use worst-case value */ + gap = 256; + } + + return (void __user *)((usp - gap - frame_size) & -8UL); } static int setup_frame(struct ksignal *ksig, sigset_t *set, @@ -880,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, return -EFAULT; } - frame = get_sigframe(ksig, sizeof(*frame) + fsize); + frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize); if (fsize) err |= copy_to_user (frame + 1, regs + 1, fsize); @@ -952,7 +958,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, return -EFAULT; } - frame = get_sigframe(ksig, sizeof(*frame)); + frame = get_sigframe(ksig, tregs, sizeof(*frame)); if (fsize) err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize); diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index e2950f5db7c9c..e715df5385d6e 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -413,12 +413,12 @@ extern void paging_init (void); * For the 64bit version, the offset is extended by 32bit. */ #define __swp_type(x) ((x).val & 0x1f) -#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \ - (((x).val >> 8) & ~0x7) ) +#define __swp_offset(x) ( (((x).val >> 5) & 0x7) | \ + (((x).val >> 10) << 3) ) #define __swp_entry(type, offset) ((swp_entry_t) { \ ((type) & 0x1f) | \ - ((offset & 0x7) << 6) | \ - ((offset & ~0x7) << 8) }) + ((offset & 0x7) << 5) | \ + ((offset >> 3) << 10) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) diff --git a/arch/parisc/kernel/kexec.c b/arch/parisc/kernel/kexec.c index 5eb7f30edc1f7..db57345a9dafb 100644 --- a/arch/parisc/kernel/kexec.c +++ b/arch/parisc/kernel/kexec.c @@ -4,6 +4,8 @@ #include #include #include +#include + #include #include diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 85cde5bf04b7a..771b79423bbc2 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -34,8 +34,6 @@ endif BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ - $(call cc-option,-mno-prefixed) $(call cc-option,-mno-pcrel) \ - $(call cc-option,-mno-mma) \ $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ $(LINUXINCLUDE) @@ -71,6 +69,10 @@ BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -nostdinc BOOTARFLAGS := -crD +BOOTCFLAGS += $(call cc-option,-mno-prefixed) \ + $(call cc-option,-mno-pcrel) \ + $(call cc-option,-mno-mma) + ifdef CONFIG_CC_IS_CLANG BOOTCFLAGS += $(CLANG_FLAGS) BOOTAFLAGS += $(CLANG_FLAGS) diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index 7113f9355165a..ad1872518992e 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -96,7 +96,7 @@ config CRYPTO_AES_PPC_SPE config CRYPTO_AES_GCM_P10 tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)" - depends on PPC64 && CPU_LITTLE_ENDIAN + depends on PPC64 && CPU_LITTLE_ENDIAN && VSX select CRYPTO_LIB_AES select CRYPTO_ALGAPI select CRYPTO_AEAD diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 678b5bdc79b1f..34e14dfd8e042 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -205,7 +205,6 @@ extern void iommu_register_group(struct iommu_table_group *table_group, int pci_domain_number, unsigned long pe_num); extern int iommu_add_device(struct iommu_table_group *table_group, struct device *dev); -extern void iommu_del_device(struct device *dev); extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction); @@ -229,10 +228,6 @@ static inline int iommu_add_device(struct iommu_table_group *table_group, { return 0; } - -static inline void iommu_del_device(struct device *dev) -{ -} #endif /* !CONFIG_IOMMU_API */ u64 dma_iommu_get_required_mask(struct device *dev); diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 038ce8d9061d1..8920862ffd791 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -144,7 +144,7 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) /* We support DMA to/from any memory page via the iommu */ int dma_iommu_dma_supported(struct device *dev, u64 mask) { - struct iommu_table *tbl = get_iommu_table_base(dev); + struct iommu_table *tbl; if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) { /* @@ -162,6 +162,8 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask) return 1; } + tbl = get_iommu_table_base(dev); + if (!tbl) { dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask); return 0; diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 0089dd49b4cbf..67f0b01e6ff57 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -518,7 +518,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, /* Convert entry to a dma_addr_t */ entry += tbl->it_offset; dma_addr = entry << tbl->it_page_shift; - dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); + dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl)); DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", npages, entry, dma_addr); @@ -905,6 +905,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, unsigned int order; unsigned int nio_pages, io_order; struct page *page; + int tcesize = (1 << tbl->it_page_shift); size = PAGE_ALIGN(size); order = get_order(size); @@ -931,7 +932,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, memset(ret, 0, size); /* Set up tces to cover the allocated range */ - nio_pages = size >> tbl->it_page_shift; + nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift; + io_order = get_iommu_order(size, tbl); mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mask >> tbl->it_page_shift, io_order, 0); @@ -939,7 +941,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, free_pages((unsigned long)ret, order); return NULL; } - *dma_handle = mapping; + + *dma_handle = mapping | ((u64)ret & (tcesize - 1)); return ret; } @@ -950,7 +953,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size, unsigned int nio_pages; size = PAGE_ALIGN(size); - nio_pages = size >> tbl->it_page_shift; + nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift; iommu_free(tbl, dma_handle, nio_pages); size = PAGE_ALIGN(size); free_pages((unsigned long)vaddr, get_order(size)); @@ -1168,23 +1171,6 @@ int iommu_add_device(struct iommu_table_group *table_group, struct device *dev) } EXPORT_SYMBOL_GPL(iommu_add_device); -void iommu_del_device(struct device *dev) -{ - /* - * Some devices might not have IOMMU table and group - * and we needn't detach them from the associated - * IOMMU groups - */ - if (!device_iommu_mapped(dev)) { - pr_debug("iommu_tce: skipping device %s with no tbl\n", - dev_name(dev)); - return; - } - - iommu_group_remove_device(dev); -} -EXPORT_SYMBOL_GPL(iommu_del_device); - /* * A simple iommu_table_group_ops which only allows reusing the existing * iommu_table. This handles VFIO for POWER7 or the nested KVM. diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index 85bdd7d3652f8..48e0eaf1ad615 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -93,11 +93,12 @@ static int process_ISA_OF_ranges(struct device_node *isa_node, } inval_range: - if (!phb_io_base_phys) { + if (phb_io_base_phys) { pr_err("no ISA IO ranges or unexpected isa range, mapping 64k\n"); remap_isa_base(phb_io_base_phys, 0x10000); + return 0; } - return 0; + return -EINVAL; } diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 26245aaf12b8b..2297aa764ecdb 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1040,8 +1040,8 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, unsigned long address, int psize) { struct mm_struct *mm = vma->vm_mm; - unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_RW | _PAGE_EXEC); + unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY | + _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); unsigned long change = pte_val(entry) ^ pte_val(*ptep); /* diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index e93aefcfb83f2..37043dfc1addd 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -101,6 +101,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) bpf_hdr = jit_data->header; proglen = jit_data->proglen; extra_pass = true; + /* During extra pass, ensure index is reset before repopulating extable entries */ + cgctx.exentry_idx = 0; goto skip_init_ctx; } diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 0d9b7609c7d56..3e2e252016f7a 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -265,6 +265,7 @@ config CPM2 config FSL_ULI1575 bool "ULI1575 PCIe south bridge support" depends on FSL_SOC_BOOKE || PPC_86xx + depends on PCI select FSL_PCI select GENERIC_ISA_DMA help diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 233a50e65fced..7725492097b62 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -865,28 +865,3 @@ void __init pnv_pci_init(void) /* Configure IOMMU DMA hooks */ set_pci_dma_ops(&dma_iommu_ops); } - -static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - - switch (action) { - case BUS_NOTIFY_DEL_DEVICE: - iommu_del_device(dev); - return 0; - default: - return 0; - } -} - -static struct notifier_block pnv_tce_iommu_bus_nb = { - .notifier_call = pnv_tce_iommu_bus_notifier, -}; - -static int __init pnv_tce_iommu_bus_notifier_init(void) -{ - bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb); - return 0; -} -machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init); diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 7464fa6e41455..918f511837db4 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -91,19 +91,24 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node) static void iommu_pseries_free_group(struct iommu_table_group *table_group, const char *node_name) { - struct iommu_table *tbl; - if (!table_group) return; - tbl = table_group->tables[0]; #ifdef CONFIG_IOMMU_API if (table_group->group) { iommu_group_put(table_group->group); BUG_ON(table_group->group); } #endif - iommu_tce_table_put(tbl); + + /* Default DMA window table is at index 0, while DDW at 1. SR-IOV + * adapters only have table on index 1. + */ + if (table_group->tables[0]) + iommu_tce_table_put(table_group->tables[0]); + + if (table_group->tables[1]) + iommu_tce_table_put(table_group->tables[1]); kfree(table_group); } @@ -1695,31 +1700,6 @@ static int __init disable_multitce(char *str) __setup("multitce=", disable_multitce); -static int tce_iommu_bus_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - - switch (action) { - case BUS_NOTIFY_DEL_DEVICE: - iommu_del_device(dev); - return 0; - default: - return 0; - } -} - -static struct notifier_block tce_iommu_bus_nb = { - .notifier_call = tce_iommu_bus_notifier, -}; - -static int __init tce_iommu_bus_notifier_init(void) -{ - bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); - return 0; -} -machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init); - #ifdef CONFIG_SPAPR_TCE_IOMMU struct iommu_group *pSeries_pci_device_group(struct pci_controller *hose, struct pci_dev *pdev) diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile index 5d7cb991f2b86..7b593d44c712a 100644 --- a/arch/riscv/kernel/pi/Makefile +++ b/arch/riscv/kernel/pi/Makefile @@ -22,7 +22,7 @@ KCOV_INSTRUMENT := n $(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \ --remove-section=.note.gnu.property \ - --prefix-alloc-sections=.init + --prefix-alloc-sections=.init.pi $(obj)/%.pi.o: $(obj)/%.o FORCE $(call if_changed,objcopy) diff --git a/arch/riscv/kernel/probes/Makefile b/arch/riscv/kernel/probes/Makefile index c40139e9ca478..8265ff497977c 100644 --- a/arch/riscv/kernel/probes/Makefile +++ b/arch/riscv/kernel/probes/Makefile @@ -4,3 +4,5 @@ obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE) diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index f03b5697f8e05..e5f9f4677bbfd 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -84,11 +84,8 @@ SECTIONS __init_data_begin = .; INIT_DATA_SECTION(16) - /* Those sections result from the compilation of kernel/pi/string.c */ - .init.pidata : { - *(.init.srodata.cst8*) - *(.init__bug_table*) - *(.init.sdata*) + .init.pi : { + *(.init.pi*) } .init.bss : { diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index db20c1589a98f..6dab9c1be508b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -469,19 +469,11 @@ config SCHED_SMT config SCHED_MC def_bool n -config SCHED_BOOK - def_bool n - -config SCHED_DRAWER - def_bool n - config SCHED_TOPOLOGY def_bool y prompt "Topology scheduler support" select SCHED_SMT select SCHED_MC - select SCHED_BOOK - select SCHED_DRAWER help Topology scheduler support improves the CPU scheduler's decision making when dealing with machines that have multi-threading, @@ -716,7 +708,6 @@ config EADM_SCH config VFIO_CCW def_tristate n prompt "Support for VFIO-CCW subchannels" - depends on S390_CCW_IOMMU depends on VFIO select VFIO_MDEV help @@ -728,7 +719,7 @@ config VFIO_CCW config VFIO_AP def_tristate n prompt "VFIO support for AP devices" - depends on S390_AP_IOMMU && KVM + depends on KVM depends on VFIO depends on ZCRYPT select VFIO_MDEV diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 4ccf66d29fc24..be3bf03bf3618 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -591,8 +591,6 @@ CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=y CONFIG_VHOST_NET=m CONFIG_VHOST_VSOCK=m -CONFIG_S390_CCW_IOMMU=y -CONFIG_S390_AP_IOMMU=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -703,6 +701,7 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor" +CONFIG_INIT_STACK_NONE=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 693297a2e8973..769c7eed8b6ac 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -580,8 +580,6 @@ CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=y CONFIG_VHOST_NET=m CONFIG_VHOST_VSOCK=m -CONFIG_S390_CCW_IOMMU=y -CONFIG_S390_AP_IOMMU=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -686,6 +684,7 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor" +CONFIG_INIT_STACK_NONE=y CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 33a232bb68af9..6f68b39817eff 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -67,6 +67,7 @@ CONFIG_ZFCP=y # CONFIG_MISC_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_LSM="yama,loadpin,safesetid,integrity" +CONFIG_INIT_STACK_NONE=y # CONFIG_ZLIB_DFLTCC is not set CONFIG_XZ_DEC_MICROLZMA=y CONFIG_PRINTK_TIME=y diff --git a/arch/s390/crypto/chacha-glue.c b/arch/s390/crypto/chacha-glue.c index 7752bd314558e..5fae187f947a0 100644 --- a/arch/s390/crypto/chacha-glue.c +++ b/arch/s390/crypto/chacha-glue.c @@ -82,7 +82,7 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, * it cannot handle a block of data or less, but otherwise * it can handle data of arbitrary size */ - if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20) + if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !MACHINE_HAS_VX) chacha_crypt_generic(state, dst, src, bytes, nrounds); else chacha20_crypt_s390(state, dst, src, bytes, diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index a386070f1d565..3cb9d813f022b 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -112,7 +112,7 @@ struct compat_statfs64 { u32 f_namelen; u32 f_frsize; u32 f_flags; - u32 f_spare[4]; + u32 f_spare[5]; }; /* diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h index 72604f7792c33..f85b50723dd35 100644 --- a/arch/s390/include/uapi/asm/statfs.h +++ b/arch/s390/include/uapi/asm/statfs.h @@ -30,7 +30,7 @@ struct statfs { unsigned int f_namelen; unsigned int f_frsize; unsigned int f_flags; - unsigned int f_spare[4]; + unsigned int f_spare[5]; }; struct statfs64 { @@ -45,7 +45,7 @@ struct statfs64 { unsigned int f_namelen; unsigned int f_frsize; unsigned int f_flags; - unsigned int f_spare[4]; + unsigned int f_spare[5]; }; #endif diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 8983837b35659..6b2a051e1f8a2 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -10,6 +10,7 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) # Do not trace early setup code CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE) endif diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 43de939b7af19..f44f70de96611 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1935,14 +1935,13 @@ static struct shutdown_action __refdata dump_action = { static void dump_reipl_run(struct shutdown_trigger *trigger) { - unsigned long ipib = (unsigned long) reipl_block_actual; struct lowcore *abs_lc; unsigned int csum; csum = (__force unsigned int) csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); abs_lc = get_abs_lowcore(); - abs_lc->ipib = ipib; + abs_lc->ipib = __pa(reipl_block_actual); abs_lc->ipib_checksum = csum; put_abs_lowcore(abs_lc); dump_run(trigger); diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 9fd19530c9a56..68adf1de8888b 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -95,7 +95,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c static void cpu_thread_map(cpumask_t *dst, unsigned int cpu) { static cpumask_t mask; - int i; + unsigned int max_cpu; cpumask_clear(&mask); if (!cpumask_test_cpu(cpu, &cpu_setup_mask)) @@ -104,9 +104,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu) if (topology_mode != TOPOLOGY_MODE_HW) goto out; cpu -= cpu % (smp_cpu_mtid + 1); - for (i = 0; i <= smp_cpu_mtid; i++) { - if (cpumask_test_cpu(cpu + i, &cpu_setup_mask)) - cpumask_set_cpu(cpu + i, &mask); + max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1); + for (; cpu <= max_cpu; cpu++) { + if (cpumask_test_cpu(cpu, &cpu_setup_mask)) + cpumask_set_cpu(cpu, &mask); } out: cpumask_copy(dst, &mask); @@ -123,25 +124,26 @@ static void add_cpus_to_mask(struct topology_core *tl_core, unsigned int core; for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) { - unsigned int rcore; - int lcpu, i; + unsigned int max_cpu, rcore; + int cpu; rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin; - lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); - if (lcpu < 0) + cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); + if (cpu < 0) continue; - for (i = 0; i <= smp_cpu_mtid; i++) { - topo = &cpu_topology[lcpu + i]; + max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1); + for (; cpu <= max_cpu; cpu++) { + topo = &cpu_topology[cpu]; topo->drawer_id = drawer->id; topo->book_id = book->id; topo->socket_id = socket->id; topo->core_id = rcore; - topo->thread_id = lcpu + i; + topo->thread_id = cpu; topo->dedicated = tl_core->d; - cpumask_set_cpu(lcpu + i, &drawer->mask); - cpumask_set_cpu(lcpu + i, &book->mask); - cpumask_set_cpu(lcpu + i, &socket->mask); - smp_cpu_set_polarization(lcpu + i, tl_core->pp); + cpumask_set_cpu(cpu, &drawer->mask); + cpumask_set_cpu(cpu, &book->mask); + cpumask_set_cpu(cpu, &socket->mask); + smp_cpu_set_polarization(cpu, tl_core->pp); } } } diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile index dee6f66353b33..a461a950f0518 100644 --- a/arch/um/drivers/Makefile +++ b/arch/um/drivers/Makefile @@ -16,7 +16,8 @@ mconsole-objs := mconsole_kern.o mconsole_user.o hostaudio-objs := hostaudio_kern.o ubd-objs := ubd_kern.o ubd_user.o port-objs := port_kern.o port_user.o -harddog-objs := harddog_kern.o harddog_user.o +harddog-objs := harddog_kern.o +harddog-builtin-$(CONFIG_UML_WATCHDOG) := harddog_user.o harddog_user_exp.o rtc-objs := rtc_kern.o rtc_user.o LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a) @@ -60,6 +61,7 @@ obj-$(CONFIG_PTY_CHAN) += pty.o obj-$(CONFIG_TTY_CHAN) += tty.o obj-$(CONFIG_XTERM_CHAN) += xterm.o xterm_kern.o obj-$(CONFIG_UML_WATCHDOG) += harddog.o +obj-y += $(harddog-builtin-y) $(harddog-builtin-m) obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o obj-$(CONFIG_UML_RANDOM) += random.o obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o diff --git a/arch/um/drivers/harddog.h b/arch/um/drivers/harddog.h new file mode 100644 index 0000000000000..6d9ea60e7133e --- /dev/null +++ b/arch/um/drivers/harddog.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef UM_WATCHDOG_H +#define UM_WATCHDOG_H + +int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock); +void stop_watchdog(int in_fd, int out_fd); +int ping_watchdog(int fd); + +#endif /* UM_WATCHDOG_H */ diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c index e6d4f43deba82..60d1c6cab8a95 100644 --- a/arch/um/drivers/harddog_kern.c +++ b/arch/um/drivers/harddog_kern.c @@ -47,6 +47,7 @@ #include #include #include "mconsole.h" +#include "harddog.h" MODULE_LICENSE("GPL"); @@ -60,8 +61,6 @@ static int harddog_out_fd = -1; * Allow only one person to hold it open */ -extern int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock); - static int harddog_open(struct inode *inode, struct file *file) { int err = -EBUSY; @@ -92,8 +91,6 @@ static int harddog_open(struct inode *inode, struct file *file) return err; } -extern void stop_watchdog(int in_fd, int out_fd); - static int harddog_release(struct inode *inode, struct file *file) { /* @@ -112,8 +109,6 @@ static int harddog_release(struct inode *inode, struct file *file) return 0; } -extern int ping_watchdog(int fd); - static ssize_t harddog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { diff --git a/arch/um/drivers/harddog_user.c b/arch/um/drivers/harddog_user.c index 070468d22e394..9ed89304975ed 100644 --- a/arch/um/drivers/harddog_user.c +++ b/arch/um/drivers/harddog_user.c @@ -7,6 +7,7 @@ #include #include #include +#include "harddog.h" struct dog_data { int stdin_fd; diff --git a/arch/um/drivers/harddog_user_exp.c b/arch/um/drivers/harddog_user_exp.c new file mode 100644 index 0000000000000..c74d4b815d143 --- /dev/null +++ b/arch/um/drivers/harddog_user_exp.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "harddog.h" + +#if IS_MODULE(CONFIG_UML_WATCHDOG) +EXPORT_SYMBOL(start_watchdog); +EXPORT_SYMBOL(stop_watchdog); +EXPORT_SYMBOL(ping_watchdog); +#endif diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d096b04bf80e8..9d248703cbddc 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1703,10 +1703,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs) perf_sample_data_init(&data, 0, event->hw.last_period); - if (has_branch_stack(event)) { - data.br_stack = &cpuc->lbr_stack; - data.sample_flags |= PERF_SAMPLE_BRANCH_STACK; - } + if (has_branch_stack(event)) + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index a2e566e53076e..df88576d6b2a5 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1229,12 +1229,14 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct perf_event *event, bool add) { struct pmu *pmu = event->pmu; + /* * Make sure we get updated with the first PEBS * event. It will trigger also during removal, but * that does not hurt: */ - bool update = cpuc->n_pebs == 1; + if (cpuc->n_pebs == 1) + cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW; if (needed_cb != pebs_needs_sched_cb(cpuc)) { if (!needed_cb) @@ -1242,7 +1244,7 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, else perf_sched_cb_dec(pmu); - update = true; + cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW; } /* @@ -1252,24 +1254,13 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, if (x86_pmu.intel_cap.pebs_baseline && add) { u64 pebs_data_cfg; - /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */ - if (cpuc->n_pebs == 1) { - cpuc->pebs_data_cfg = 0; - cpuc->pebs_record_size = sizeof(struct pebs_basic); - } - pebs_data_cfg = pebs_update_adaptive_cfg(event); - - /* Update pebs_record_size if new event requires more data. */ - if (pebs_data_cfg & ~cpuc->pebs_data_cfg) { - cpuc->pebs_data_cfg |= pebs_data_cfg; - adaptive_pebs_record_size_update(); - update = true; - } + /* + * Be sure to update the thresholds when we change the record. + */ + if (pebs_data_cfg & ~cpuc->pebs_data_cfg) + cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; } - - if (update) - pebs_update_threshold(cpuc); } void intel_pmu_pebs_add(struct perf_event *event) @@ -1326,9 +1317,17 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event) wrmsrl(base + idx, value); } +static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc) +{ + if (cpuc->n_pebs == cpuc->n_large_pebs && + cpuc->n_pebs != cpuc->n_pebs_via_pt) + intel_pmu_drain_pebs_buffer(); +} + void intel_pmu_pebs_enable(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW; struct hw_perf_event *hwc = &event->hw; struct debug_store *ds = cpuc->ds; unsigned int idx = hwc->idx; @@ -1344,11 +1343,22 @@ void intel_pmu_pebs_enable(struct perf_event *event) if (x86_pmu.intel_cap.pebs_baseline) { hwc->config |= ICL_EVENTSEL_ADAPTIVE; - if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) { - wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg); - cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg; + if (pebs_data_cfg != cpuc->active_pebs_data_cfg) { + /* + * drain_pebs() assumes uniform record size; + * hence we need to drain when changing said + * size. + */ + intel_pmu_drain_large_pebs(cpuc); + adaptive_pebs_record_size_update(); + wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg); + cpuc->active_pebs_data_cfg = pebs_data_cfg; } } + if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) { + cpuc->pebs_data_cfg = pebs_data_cfg; + pebs_update_threshold(cpuc); + } if (idx >= INTEL_PMC_IDX_FIXED) { if (x86_pmu.intel_cap.pebs_format < 5) @@ -1391,9 +1401,7 @@ void intel_pmu_pebs_disable(struct perf_event *event) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - if (cpuc->n_pebs == cpuc->n_large_pebs && - cpuc->n_pebs != cpuc->n_pebs_via_pt) - intel_pmu_drain_pebs_buffer(); + intel_pmu_drain_large_pebs(cpuc); cpuc->pebs_enabled &= ~(1ULL << hwc->idx); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 8fc15ed5e60bb..abf09882f58b6 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -121,6 +121,9 @@ #define PEBS_DATACFG_LBRS BIT_ULL(3) #define PEBS_DATACFG_LBR_SHIFT 24 +/* Steal the highest bit of pebs_data_cfg for SW usage */ +#define PEBS_UPDATE_DS_SW BIT_ULL(63) + /* * Intel "Architectural Performance Monitoring" CPUID * detection/enumeration details: diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 498dc600bd5c8..0d02c4aafa6f4 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -13,7 +13,9 @@ #include +#include #include + #include #include diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index dd61752f4c966..4070a01c11b72 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -17,6 +17,7 @@ CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg CFLAGS_REMOVE_head64.o = -pg CFLAGS_REMOVE_sev.o = -pg +CFLAGS_REMOVE_rethook.o = -pg endif KASAN_SANITIZE_head$(BITS).o := n diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 4266b64631a46..7e331e8f36929 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -36,6 +36,7 @@ #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4 +#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc /* Protect the PCI config register pairs used for SMN. */ static DEFINE_MUTEX(smn_mutex); @@ -79,6 +80,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, {} }; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 123bf8b97a4b2..0c9660a07b233 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -253,7 +253,6 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e int nent) { struct kvm_cpuid_entry2 *best; - u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent); best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); if (best) { @@ -292,21 +291,6 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT); } - - /* - * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate - * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's - * requested XCR0 value. The enclave's XFRM must be a subset of XCRO - * at the time of EENTER, thus adjust the allowed XFRM by the guest's - * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to - * '1' even on CPUs that don't support XSAVE. - */ - best = cpuid_entry2_find(entries, nent, 0x12, 0x1); - if (best) { - best->ecx &= guest_supported_xcr0 & 0xffffffff; - best->edx &= guest_supported_xcr0 >> 32; - best->ecx |= XFEATURE_MASK_FPSSE; - } } void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 0574030b071fb..2261b684a7d44 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -170,12 +170,19 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, return 1; } - /* Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. */ + /* + * Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. Note + * that the allowed XFRM (XFeature Request Mask) isn't strictly bound + * by the supported XCR0. FP+SSE *must* be set in XFRM, even if XSAVE + * is unsupported, i.e. even if XCR0 itself is completely unsupported. + */ if ((u32)miscselect & ~sgx_12_0->ebx || (u32)attributes & ~sgx_12_1->eax || (u32)(attributes >> 32) & ~sgx_12_1->ebx || (u32)xfrm & ~sgx_12_1->ecx || - (u32)(xfrm >> 32) & ~sgx_12_1->edx) { + (u32)(xfrm >> 32) & ~sgx_12_1->edx || + xfrm & ~(vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE) || + (xfrm & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) { kvm_inject_gp(vcpu, 0); return 1; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ceb7c5e9cf9e9..c0778ca39650f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1446,7 +1446,7 @@ static const u32 msrs_to_save_base[] = { #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, - MSR_IA32_SPEC_CTRL, + MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL, MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, @@ -7155,6 +7155,10 @@ static void kvm_probe_msr_to_save(u32 msr_index) if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) return; break; + case MSR_IA32_TSX_CTRL: + if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR)) + return; + break; default: break; } diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 27ef53fab6bd7..b3b1e376dce86 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -144,8 +144,8 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) */ .align 64 .skip 63, 0xcc -SYM_FUNC_START_NOALIGN(zen_untrain_ret); - +SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_NOENDBR /* * As executed from zen_untrain_ret, this is: * diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 3cdac0f0055de..8192452d1d2d3 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -261,6 +262,24 @@ static void __init probe_page_size_mask(void) } } +#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \ + .family = 6, \ + .model = _model, \ + } +/* + * INVLPG may not properly flush Global entries + * on these CPUs when PCIDs are enabled. + */ +static const struct x86_cpu_id invlpg_miss_ids[] = { + INTEL_MATCH(INTEL_FAM6_ALDERLAKE ), + INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ), + INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ), + INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ), + INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P), + INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S), + {} +}; + static void setup_pcid(void) { if (!IS_ENABLED(CONFIG_X86_64)) @@ -269,6 +288,12 @@ static void setup_pcid(void) if (!boot_cpu_has(X86_FEATURE_PCID)) return; + if (x86_match_cpu(invlpg_miss_ids)) { + pr_info("Incomplete global flushes, disabling PCID"); + setup_clear_cpu_cap(X86_FEATURE_PCID); + return; + } + if (boot_cpu_has(X86_FEATURE_PGE)) { /* * This can't be cr4_set_bits_and_update_boot() -- the diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index 876d5df157ed9..5c01d7e70d90d 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -343,7 +343,19 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, struct rt_sigframe *frame; int err = 0, sig = ksig->sig; unsigned long sp, ra, tp, ps; + unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; + unsigned long handler_fdpic_GOT = 0; unsigned int base; + bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && + (current->personality & FDPIC_FUNCPTRS); + + if (fdpic) { + unsigned long __user *fdpic_func_desc = + (unsigned long __user *)handler; + if (__get_user(handler, &fdpic_func_desc[0]) || + __get_user(handler_fdpic_GOT, &fdpic_func_desc[1])) + return -EFAULT; + } sp = regs->areg[1]; @@ -373,20 +385,26 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (ksig->ka.sa.sa_flags & SA_RESTORER) { - ra = (unsigned long)ksig->ka.sa.sa_restorer; + if (fdpic) { + unsigned long __user *fdpic_func_desc = + (unsigned long __user *)ksig->ka.sa.sa_restorer; + + err |= __get_user(ra, fdpic_func_desc); + } else { + ra = (unsigned long)ksig->ka.sa.sa_restorer; + } } else { /* Create sys_rt_sigreturn syscall in stack frame */ err |= gen_return_code(frame->retcode); - - if (err) { - return -EFAULT; - } ra = (unsigned long) frame->retcode; } - /* + if (err) + return -EFAULT; + + /* * Create signal handler execution context. * Return context not modified until this point. */ @@ -394,8 +412,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, /* Set up registers for signal handler; preserve the threadptr */ tp = regs->threadptr; ps = regs->ps; - start_thread(regs, (unsigned long) ksig->ka.sa.sa_handler, - (unsigned long) frame); + start_thread(regs, handler, (unsigned long)frame); /* Set up a stack frame for a call4 if userspace uses windowed ABI */ if (ps & PS_WOE_MASK) { @@ -413,6 +430,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, regs->areg[base + 4] = (unsigned long) &frame->uc; regs->threadptr = tp; regs->ps = ps; + if (fdpic) + regs->areg[base + 11] = handler_fdpic_GOT; pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n", current->comm, current->pid, sig, frame, regs->pc); diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 2a31b1ab0c9f2..17a7ef86fd0dd 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -56,6 +56,8 @@ EXPORT_SYMBOL(empty_zero_page); */ extern long long __ashrdi3(long long, int); extern long long __ashldi3(long long, int); +extern long long __bswapdi2(long long); +extern int __bswapsi2(int); extern long long __lshrdi3(long long, int); extern int __divsi3(int, int); extern int __modsi3(int, int); @@ -66,6 +68,8 @@ extern unsigned long long __umulsidi3(unsigned int, unsigned int); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); +EXPORT_SYMBOL(__bswapdi2); +EXPORT_SYMBOL(__bswapsi2); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__modsi3); diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile index 7ecef0519a27c..c9c2614188f74 100644 --- a/arch/xtensa/lib/Makefile +++ b/arch/xtensa/lib/Makefile @@ -4,7 +4,7 @@ # lib-y += memcopy.o memset.o checksum.o \ - ashldi3.o ashrdi3.o lshrdi3.o \ + ashldi3.o ashrdi3.o bswapdi2.o bswapsi2.o lshrdi3.o \ divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o umulsidi3.o \ usercopy.o strncpy_user.o strnlen_user.o lib-$(CONFIG_PCI) += pci-auto.o diff --git a/arch/xtensa/lib/bswapdi2.S b/arch/xtensa/lib/bswapdi2.S new file mode 100644 index 0000000000000..d8e52e05eba66 --- /dev/null +++ b/arch/xtensa/lib/bswapdi2.S @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ +#include +#include +#include + +ENTRY(__bswapdi2) + + abi_entry_default + ssai 8 + srli a4, a2, 16 + src a4, a4, a2 + src a4, a4, a4 + src a4, a2, a4 + srli a2, a3, 16 + src a2, a2, a3 + src a2, a2, a2 + src a2, a3, a2 + mov a3, a4 + abi_ret_default + +ENDPROC(__bswapdi2) diff --git a/arch/xtensa/lib/bswapsi2.S b/arch/xtensa/lib/bswapsi2.S new file mode 100644 index 0000000000000..9c1de1344f79a --- /dev/null +++ b/arch/xtensa/lib/bswapsi2.S @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ +#include +#include +#include + +ENTRY(__bswapsi2) + + abi_entry_default + ssai 8 + srli a3, a2, 16 + src a3, a3, a2 + src a3, a3, a3 + src a2, a2, a3 + abi_ret_default + +ENDPROC(__bswapsi2) diff --git a/block/fops.c b/block/fops.c index d2e6be4e3d1c7..58d0aebc7313a 100644 --- a/block/fops.c +++ b/block/fops.c @@ -678,6 +678,16 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, return error; } +static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct inode *bd_inode = bdev_file_inode(file); + + if (bdev_read_only(I_BDEV(bd_inode))) + return generic_file_readonly_mmap(file, vma); + + return generic_file_mmap(file, vma); +} + const struct file_operations def_blk_fops = { .open = blkdev_open, .release = blkdev_close, @@ -685,7 +695,7 @@ const struct file_operations def_blk_fops = { .read_iter = blkdev_read_iter, .write_iter = blkdev_write_iter, .iopoll = iocb_bio_iopoll, - .mmap = generic_file_mmap, + .mmap = blkdev_mmap, .fsync = blkdev_fsync, .unlocked_ioctl = blkdev_ioctl, #ifdef CONFIG_COMPAT diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index e8492b3a393ab..0800a9d775580 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -516,6 +516,17 @@ static const struct dmi_system_id maingear_laptop[] = { { } }; +static const struct dmi_system_id lg_laptop[] = { + { + .ident = "LG Electronics 17U70P", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), + DMI_MATCH(DMI_BOARD_NAME, "17U70P"), + }, + }, + { } +}; + struct irq_override_cmp { const struct dmi_system_id *system; unsigned char irq; @@ -532,6 +543,7 @@ static const struct irq_override_cmp override_table[] = { { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, + { lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, }; static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, diff --git a/drivers/base/class.c b/drivers/base/class.c index ac1808d1a2e8f..05d9df90f621b 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -320,6 +320,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class, start_knode = &start->p->knode_class; klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode); iter->type = type; + iter->sp = sp; } EXPORT_SYMBOL_GPL(class_dev_iter_init); @@ -361,6 +362,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_next); void class_dev_iter_exit(struct class_dev_iter *iter) { klist_iter_exit(&iter->ki); + subsys_put(iter->sp); } EXPORT_SYMBOL_GPL(class_dev_iter_exit); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9c35c958f2c88..65ecde3e2a5be 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1666,7 +1666,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd) return -EIO; dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); - if (!dir) { + if (IS_ERR(dir)) { dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", nbd_name(nbd)); return -EIO; @@ -1692,7 +1692,7 @@ static int nbd_dbg_init(void) struct dentry *dbg_dir; dbg_dir = debugfs_create_dir("nbd", NULL); - if (!dbg_dir) + if (IS_ERR(dbg_dir)) return -EIO; nbd_dbg_dir = dbg_dir; diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h index ea7ac8bca63cf..da1d0542d7e2c 100644 --- a/drivers/block/rnbd/rnbd-proto.h +++ b/drivers/block/rnbd/rnbd-proto.h @@ -241,7 +241,7 @@ static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf) bio_opf = REQ_OP_WRITE; break; case RNBD_OP_FLUSH: - bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH; + bio_opf = REQ_OP_WRITE | REQ_PREFLUSH; break; case RNBD_OP_DISCARD: bio_opf = REQ_OP_DISCARD; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index c7331f5197503..33d3298a0da16 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1120,6 +1120,11 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq) return ubq->nr_io_ready == ubq->q_depth; } +static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags) +{ + io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags); +} + static void ublk_cancel_queue(struct ublk_queue *ubq) { int i; @@ -1131,8 +1136,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq) struct ublk_io *io = &ubq->ios[i]; if (io->flags & UBLK_IO_FLAG_ACTIVE) - io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, - IO_URING_F_UNLOCKED); + io_uring_cmd_complete_in_task(io->cmd, + ublk_cmd_cancel_cb); } /* all io commands are canceled */ @@ -1281,7 +1286,7 @@ static inline int ublk_check_cmd_op(u32 cmd_op) { u32 ioc_type = _IOC_TYPE(cmd_op); - if (IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u') + if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u') return -EOPNOTSUPP; if (ioc_type != 'u' && ioc_type != 0) diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 3a34d7c1475bc..52ef44688d38a 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -1319,17 +1319,17 @@ static void nxp_serdev_remove(struct serdev_device *serdev) hci_free_dev(hdev); } -static struct btnxpuart_data w8987_data = { +static struct btnxpuart_data w8987_data __maybe_unused = { .helper_fw_name = NULL, .fw_name = FIRMWARE_W8987, }; -static struct btnxpuart_data w8997_data = { +static struct btnxpuart_data w8997_data __maybe_unused = { .helper_fw_name = FIRMWARE_HELPER, .fw_name = FIRMWARE_W8997, }; -static const struct of_device_id nxpuart_of_match_table[] = { +static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = { { .compatible = "nxp,88w8987-bt", .data = &w8987_data }, { .compatible = "nxp,88w8997-bt", .data = &w8997_data }, { } diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index c10a4aa973736..cd48033b804a3 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -571,6 +571,10 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng); + /* Give back zero bytes, as TPM chip has not yet fully resumed: */ + if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) + return 0; + return tpm_get_random(chip, data, max); } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 4463d00182909..586ca10b0d72e 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -412,6 +412,8 @@ int tpm_pm_suspend(struct device *dev) } suspended: + chip->flags |= TPM_CHIP_FLAG_SUSPENDED; + if (rc) dev_err(dev, "Ignoring error %d while suspending\n", rc); return 0; @@ -429,6 +431,14 @@ int tpm_pm_resume(struct device *dev) if (chip == NULL) return -ENODEV; + chip->flags &= ~TPM_CHIP_FLAG_SUSPENDED; + + /* + * Guarantee that SUSPENDED is written last, so that hwrng does not + * activate before the chip has been fully resumed. + */ + wmb(); + return 0; } EXPORT_SYMBOL_GPL(tpm_pm_resume); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 7af3898066433..7db3593941eaa 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -122,6 +122,29 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"), }, }, + { + .callback = tpm_tis_disable_irq, + .ident = "ThinkStation P360 Tiny", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"), + }, + }, + { + .callback = tpm_tis_disable_irq, + .ident = "ThinkPad L490", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"), + }, + }, + { + .callback = tpm_tis_disable_irq, + .ident = "UPX-TGL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "AAEON"), + }, + }, {} }; diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 02945d53fcefa..558144fa707ae 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1209,25 +1209,20 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) u32 intmask; int rc; - if (chip->ops->clk_enable != NULL) - chip->ops->clk_enable(chip, true); - - /* reenable interrupts that device may have lost or - * BIOS/firmware may have disabled + /* + * Re-enable interrupts that device may have lost or BIOS/firmware may + * have disabled. */ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq); - if (rc < 0) - goto out; + if (rc < 0) { + dev_err(&chip->dev, "Setting IRQ failed.\n"); + return; + } intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE; - - tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); - -out: - if (chip->ops->clk_enable != NULL) - chip->ops->clk_enable(chip, false); - - return; + rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); + if (rc < 0) + dev_err(&chip->dev, "Enabling interrupts failed.\n"); } int tpm_tis_resume(struct device *dev) @@ -1235,27 +1230,27 @@ int tpm_tis_resume(struct device *dev) struct tpm_chip *chip = dev_get_drvdata(dev); int ret; - ret = tpm_tis_request_locality(chip, 0); - if (ret < 0) + ret = tpm_chip_start(chip); + if (ret) return ret; if (chip->flags & TPM_CHIP_FLAG_IRQ) tpm_tis_reenable_interrupts(chip); - ret = tpm_pm_resume(dev); - if (ret) - goto out; - /* * TPM 1.2 requires self-test on resume. This function actually returns * an error code but for unknown reason it isn't handled. */ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) tpm1_do_selftest(chip); -out: - tpm_tis_relinquish_locality(chip, 0); - return ret; + tpm_chip_stop(chip); + + ret = tpm_pm_resume(dev); + if (ret) + return ret; + + return 0; } EXPORT_SYMBOL_GPL(tpm_tis_resume); #endif diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 29904395e95f9..b2f05d27167e3 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -975,7 +975,7 @@ static int __init acpi_cpufreq_probe(struct platform_device *pdev) /* don't keep reloading if cpufreq_driver exists */ if (cpufreq_get_current_driver()) - return -EEXIST; + return -ENODEV; pr_debug("%s\n", __func__); diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 1d2cfea9858af..73efbcf5513b2 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -583,7 +583,7 @@ static int __init pcc_cpufreq_probe(struct platform_device *pdev) /* Skip initialization if another cpufreq driver is there. */ if (cpufreq_get_current_driver()) - return -EEXIST; + return -ENODEV; if (acpi_disabled) return -ENODEV; diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index bdbd907884ce7..f332fe7af92ba 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -571,6 +571,7 @@ void read_cdat_data(struct cxl_port *port) /* Don't leave table data allocated on error */ devm_kfree(dev, cdat_table); dev_err(dev, "CDAT data read error\n"); + return; } port->cdat.table = cdat_table + sizeof(__le32); diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index af22be84034bb..538bd677c254a 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -706,21 +706,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, int rcode; if (destination == IEEE1394_ALL_NODES) { - kfree(r); - - return; - } - - if (offset != dev->handler.offset) + // Although the response to the broadcast packet is not necessarily required, the + // fw_send_response() function should still be called to maintain the reference + // counting of the object. In the case, the call of function just releases the + // object as a result to decrease the reference counting. + rcode = RCODE_COMPLETE; + } else if (offset != dev->handler.offset) { rcode = RCODE_ADDRESS_ERROR; - else if (tcode != TCODE_WRITE_BLOCK_REQUEST) + } else if (tcode != TCODE_WRITE_BLOCK_REQUEST) { rcode = RCODE_TYPE_ERROR; - else if (fwnet_incoming_packet(dev, payload, length, - source, generation, false) != 0) { + } else if (fwnet_incoming_packet(dev, payload, length, + source, generation, false) != 0) { dev_err(&dev->netdev->dev, "incoming packet failure\n"); rcode = RCODE_CONFLICT_ERROR; - } else + } else { rcode = RCODE_COMPLETE; + } fw_send_response(card, r, rcode); } diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c index 82c64cb9f5316..74363ed7501f6 100644 --- a/drivers/firmware/sysfb_simplefb.c +++ b/drivers/firmware/sysfb_simplefb.c @@ -51,7 +51,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si, * * It's not easily possible to fix this in struct screen_info, * as this could break UAPI. The best solution is to compute - * bits_per_pixel here and ignore lfb_depth. In the loop below, + * bits_per_pixel from the color bits, reserved bits and + * reported lfb_depth, whichever is highest. In the loop below, * ignore simplefb formats with alpha bits, as EFI and VESA * don't specify alpha channels. */ @@ -60,6 +61,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si, si->green_size + si->green_pos, si->blue_size + si->blue_pos), si->rsvd_size + si->rsvd_pos); + bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth); } else { bits_per_pixel = si->lfb_depth; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 981a9cfb63b57..5c7d40873ee20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3757,6 +3757,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a + * internal path natively support atomics, set have_atomics_support to true. + */ + else if ((adev->flags & AMD_IS_APU) && + (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) + adev->have_atomics_support = true; else adev->have_atomics_support = !pci_enable_atomic_ops_to_root(adev->pdev, @@ -4506,7 +4512,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) dev_info(adev->dev, "recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { - shadow = &vmbo->bo; + /* If vm is compute context or adev is APU, shadow will be NULL */ + if (!vmbo->shadow) + continue; + shadow = vmbo->shadow; + /* No need to recover an evicted BO */ if (shadow->tbo.resource->mem_type != TTM_PL_TT || shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index f52d0ba91a770..a7d250809da99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -582,7 +582,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) if (r) amdgpu_fence_driver_force_completion(ring); - if (ring->fence_drv.irq_src) + if (!drm_dev_is_unplugged(adev_to_drm(adev)) && + ring->fence_drv.irq_src) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9d3a0542c9967..f3f541ba0acaa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -687,9 +687,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (r) return r; - r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); - if (r) - goto late_fini; + if (adev->gfx.cp_ecc_error_irq.funcs) { + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (r) + goto late_fini; + } } else { amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f5b5ce1051a28..1ec076517c96d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -8152,8 +8152,14 @@ static int gfx_v10_0_set_powergating_state(void *handle, case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 6): case IP_VERSION(10, 3, 7): + if (!enable) + amdgpu_gfx_off_ctrl(adev, false); + gfx_v10_cntl_pg(adev, enable); - amdgpu_gfx_off_ctrl(adev, enable); + + if (enable) + amdgpu_gfx_off_ctrl(adev, true); + break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index a9da0486467ac..c4940b6ea1c4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1315,13 +1315,6 @@ static int gfx_v11_0_sw_init(void *handle) if (r) return r; - /* ECC error */ - r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, - GFX_11_0_0__SRCID__CP_ECC_ERROR, - &adev->gfx.cp_ecc_error_irq); - if (r) - return r; - /* FED error */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, @@ -4444,7 +4437,6 @@ static int gfx_v11_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4675,24 +4667,27 @@ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) uint64_t clock; uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; - amdgpu_gfx_off_ctrl(adev, false); - mutex_lock(&adev->gfx.gpu_clock_mutex); if (amdgpu_sriov_vf(adev)) { + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); if (clock_counter_hi_pre != clock_counter_hi_after) clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); } else { + preempt_disable(); clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); if (clock_counter_hi_pre != clock_counter_hi_after) clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); + preempt_enable(); } clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); - mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); + return clock; } @@ -5158,8 +5153,14 @@ static int gfx_v11_0_set_powergating_state(void *handle, break; case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): + if (!enable) + amdgpu_gfx_off_ctrl(adev, false); + gfx_v11_cntl_pg(adev, enable); - amdgpu_gfx_off_ctrl(adev, enable); + + if (enable) + amdgpu_gfx_off_ctrl(adev, true); + break; default: break; @@ -5897,36 +5898,6 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev } } -#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1 -#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \ - do { \ - uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \ - tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \ - WREG32_SOC15_IP(GC, reg_addr, tmp); \ - } while (0) - -static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - uint32_t ecc_irq_state = 0; - uint32_t pipe0_int_cntl_addr = 0; - int i = 0; - - ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0; - - pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); - - WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state); - - for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) - SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL, - ecc_irq_state); - - return 0; -} - static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -6341,11 +6312,6 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { .process = gfx_v11_0_priv_inst_irq, }; -static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = { - .set = gfx_v11_0_set_cp_ecc_error_state, - .process = amdgpu_gfx_cp_ecc_error_irq, -}; - static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { .process = gfx_v11_0_rlc_gc_fed_irq, }; @@ -6361,9 +6327,6 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; - adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */ - adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs; - adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index adbcd8127c82e..9818743ec4197 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3764,7 +3764,8 @@ static int gfx_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4002,30 +4003,25 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) clock = clock_lo | (clock_hi << 32ULL); break; case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): preempt_disable(); - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ - if (hi_check != clock_hi) { + if (adev->rev_id >= 0x8) { + clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); + hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); + } else { + clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - clock_hi = hi_check; + hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); } - preempt_enable(); - clock = clock_lo | (clock_hi << 32ULL); - break; - case IP_VERSION(9, 2, 2): - preempt_disable(); - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ + * roughly every 42 seconds. + */ if (hi_check != clock_hi) { - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); + if (adev->rev_id >= 0x8) + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); + else + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); clock_hi = hi_check; } preempt_enable(); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index d95f9fe8f1c54..4116c112e8a01 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -31,6 +31,8 @@ #include "umc_v8_10.h" #include "athub/athub_3_0_0_sh_mask.h" #include "athub/athub_3_0_0_offset.h" +#include "dcn/dcn_3_2_0_offset.h" +#include "dcn/dcn_3_2_0_sh_mask.h" #include "oss/osssys_6_0_0_offset.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" #include "navi10_enum.h" @@ -546,7 +548,24 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) { - return 0; + u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = AMDGPU_VBIOS_VGA_ALLOCATION; + } else { + u32 viewport; + u32 pitch; + + viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); + pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * + 4); + } + + return size; } static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index c55e09432e262..1c2292cc5f2ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle) switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 1, 1): + case IP_VERSION(3, 1, 2): break; default: harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 98c826f1f89b0..0fb6013441f07 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -98,6 +98,16 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = }; /* Sienna Cichlid */ +static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs sc_video_codecs_encode = { + .codec_count = ARRAY_SIZE(sc_video_codecs_encode_array), + .codec_array = sc_video_codecs_encode_array, +}; + static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, @@ -136,8 +146,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = /* SRIOV Sienna Cichlid, not const since data is controlled by host */ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, }; static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = @@ -237,12 +247,12 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, } else { if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn1; } else { if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn0; } @@ -251,14 +261,14 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, case IP_VERSION(3, 0, 16): case IP_VERSION(3, 0, 2): if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn0; return 0; case IP_VERSION(3, 1, 1): case IP_VERSION(3, 1, 2): if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &yc_video_codecs_decode; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index b3cc04dd86536..9295ac7edd565 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1917,9 +1917,11 @@ static int sdma_v4_0_hw_fini(void *handle) return 0; } - for (i = 0; i < adev->sdma.num_instances; i++) { - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + } } sdma_v4_0_ctx_switch_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 744be2a056236..d771625365144 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -711,7 +711,7 @@ static int soc21_common_early_init(void *handle) AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_JPEG; - adev->external_rev_id = adev->rev_id + 0x1; + adev->external_rev_id = adev->rev_id + 0x80; break; default: diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 40c488b26901b..cc3fe9cac5b53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -423,3 +423,68 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool PERF_TRACE(); } +static void apply_symclk_on_tx_off_wa(struct dc_link *link) +{ + /* There are use cases where SYMCLK is referenced by OTG. For instance + * for TMDS signal, OTG relies SYMCLK even if TX video output is off. + * However current link interface will power off PHY when disabling link + * output. This will turn off SYMCLK generated by PHY. The workaround is + * to identify such case where SYMCLK is still in use by OTG when we + * power off PHY. When this is detected, we will temporarily power PHY + * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling + * program_pix_clk interface. When OTG is disabled, we will then power + * off PHY by calling disable link output again. + * + * In future dcn generations, we plan to rework transmitter control + * interface so that we could have an option to set SYMCLK ON TX OFF + * state in one step without this workaround + */ + + struct dc *dc = link->ctx->dc; + struct pipe_ctx *pipe_ctx = NULL; + uint8_t i; + + if (link->phy_state.symclk_ref_cnts.otg > 0) { + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { + pipe_ctx->clock_source->funcs->program_pix_clk( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + dc->link_srv->dp_get_encoding_format( + &pipe_ctx->link_config.dp_link_settings), + &pipe_ctx->pll_settings); + link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; + break; + } + } + } +} + +void dcn314_disable_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct dmcu *dmcu = dc->res_pool->dmcu; + + if (signal == SIGNAL_TYPE_EDP && + link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, false); + else if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + + link_hwss->disable_link_output(link, link_res, signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + /* + * Add the logic to extract BOTH power up and power down sequences + * from enable/disable link output and only call edp panel control + * in enable_link_dp and disable_link_dp once. + */ + if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->unlock_phy(dmcu); + dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); + + apply_symclk_on_tx_off_wa(link); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index c786d5e6a428e..6d0b62503caa6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -45,4 +45,6 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); +void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal); + #endif /* __DC_HWSS_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 5267e901a35c1..a588f46b166f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -105,7 +105,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, + .disable_link_output = dcn314_disable_link_output, .z10_restore = dcn31_z10_restore, .z10_save_init = dcn31_z10_save_init, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 13c7e7394b1c7..d75248b6cae99 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -810,7 +810,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SwathHeightY[k], v->SwathHeightC[k], TWait, - v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ? + (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ || + v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->DSTXAfterScaler[k], @@ -3310,7 +3311,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->swath_width_chroma_ub_this_state[k], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->TWait, - v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ? + (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index 500b3dd6052d9..d98e36a9a09cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -53,6 +53,7 @@ #define BPP_BLENDED_PIPE 0xffffffff #define MEM_STROBE_FREQ_MHZ 1600 +#define MIN_DCFCLK_FREQ_MHZ 200 #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0 struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index d4b7da526f0a5..e8b2fc4002a52 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -359,5 +359,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un link[i] = stream[i].link; bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing); } + + ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); + return ret; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 300e156b924f4..078aaaa531628 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -36,6 +36,8 @@ #define amdgpu_dpm_enable_bapm(adev, e) \ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) +#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) + int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1460,15 +1462,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - struct smu_context *smu = adev->powerplay.pp_handle; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = adev->powerplay.pp_handle; - if ((is_support_sw_smu(adev) && smu->od_enabled) || - (is_support_sw_smu(adev) && smu->is_apu) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) - return true; + return (smu->od_enabled || smu->is_apu); + } else { + struct pp_hwmgr *hwmgr; - return false; + /* + * dpm on some legacy asics don't carry od_enabled member + * as its pp_handle is casted directly from adev. + */ + if (amdgpu_dpm_is_legacy_dpm(adev)) + return false; + + hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; + + return hwmgr->od_enabled; + } } int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 5633c5797e85a..2ddf5198e5c48 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -733,6 +733,24 @@ static int smu_late_init(void *handle) return ret; } + /* + * Explicitly notify PMFW the power mode the system in. Since + * the PMFW may boot the ASIC with a different mode. + * For those supporting ACDC switch via gpio, PMFW will + * handle the switch automatically. Driver involvement + * is unnecessary. + */ + if (!smu->dc_controlled_by_gpio) { + ret = smu_set_power_source(smu, + adev->pm.ac_power ? SMU_POWER_SOURCE_AC : + SMU_POWER_SOURCE_DC); + if (ret) { + dev_err(adev->dev, "Failed to switch to %s mode!\n", + adev->pm.ac_power ? "AC" : "DC"); + return ret; + } + } + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index c4000518dc56d..275f708db6362 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -3413,26 +3413,8 @@ static int navi10_post_smu_init(struct smu_context *smu) return 0; ret = navi10_run_umc_cdr_workaround(smu); - if (ret) { + if (ret) dev_err(adev->dev, "Failed to apply umc cdr workaround!\n"); - return ret; - } - - if (!smu->dc_controlled_by_gpio) { - /* - * For Navi1X, manually switch it to AC mode as PMFW - * may boot it with DC mode. - */ - ret = smu_v11_0_set_power_source(smu, - adev->pm.ac_power ? - SMU_POWER_SOURCE_AC : - SMU_POWER_SOURCE_DC); - if (ret) { - dev_err(adev->dev, "Failed to switch to %s mode!\n", - adev->pm.ac_power ? "AC" : "DC"); - return ret; - } - } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 3d9ff46706fb7..98a33f8ee2095 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1770,6 +1770,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost, .get_power_limit = smu_v13_0_7_get_power_limit, .set_power_limit = smu_v13_0_set_power_limit, + .set_power_source = smu_v13_0_set_power_source, .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode, .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode, .set_tool_table_location = smu_v13_0_set_tool_table_location, diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 794ffd4a29c5a..f32ce29edba72 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -425,11 +425,12 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, return ERR_PTR(-EIO); /* - * If we don't have IO space at all, use MMIO now and - * assume the chip has MMIO enabled by default (rev 0x20 - * and higher). + * After AST2500, MMIO is enabled by default, and it should be adopted + * to be compatible with Arm. */ - if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) { + if (pdev->revision >= 0x40) { + ast->ioregs = ast->regs + AST_IO_MM_OFFSET; + } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) { drm_info(dev, "platform has no IO space, trying MMIO\n"); ast->ioregs = ast->regs + AST_IO_MM_OFFSET; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 64458982be40c..6bb1b8b27d7a1 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -641,19 +641,27 @@ static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y, static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len, struct drm_rect *clip) { + u32 line_length = info->fix.line_length; + u32 fb_height = info->var.yres; off_t end = off + len; u32 x1 = 0; - u32 y1 = off / info->fix.line_length; + u32 y1 = off / line_length; u32 x2 = info->var.xres; - u32 y2 = DIV_ROUND_UP(end, info->fix.line_length); + u32 y2 = DIV_ROUND_UP(end, line_length); + + /* Don't allow any of them beyond the bottom bound of display area */ + if (y1 > fb_height) + y1 = fb_height; + if (y2 > fb_height) + y2 = fb_height; if ((y2 - y1) == 1) { /* * We've only written to a single scanline. Try to reduce * the number of horizontal pixels that need an update. */ - off_t bit_off = (off % info->fix.line_length) * 8; - off_t bit_end = (end % info->fix.line_length) * 8; + off_t bit_off = (off % line_length) * 8; + off_t bit_end = (end % line_length) * 8; x1 = bit_off / info->var.bits_per_pixel; x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 295382cd09b0b..3fd6c733ff4e3 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -221,7 +221,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host, return dsi; } - dsi->dev.of_node = info->node; + device_set_node(&dsi->dev, of_fwnode_handle(info->node)); dsi->channel = info->channel; strlcpy(dsi->name, info->type, sizeof(dsi->name)); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h index 74ea3c26deadc..1a5ae781b56c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h @@ -34,11 +34,11 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data, return -ENODEV; } -int g2d_open(struct drm_device *drm_dev, struct drm_file *file) +static inline int g2d_open(struct drm_device *drm_dev, struct drm_file *file) { return 0; } -void g2d_close(struct drm_device *drm_dev, struct drm_file *file) +static inline void g2d_close(struct drm_device *drm_dev, struct drm_file *file) { } #endif diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 06a0ca157e89e..e4f4d2e3fdfeb 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -62,10 +62,11 @@ config DRM_I915_FORCE_PROBE This is the default value for the i915.force_probe module parameter. Using the module parameter overrides this option. - Force probe the i915 for Intel graphics devices that are - recognized but not properly supported by this kernel version. It is - recommended to upgrade to a kernel version with proper support as soon - as it is available. + Force probe the i915 driver for Intel graphics devices that are + recognized but not properly supported by this kernel version. Force + probing an unsupported device taints the kernel. It is recommended to + upgrade to a kernel version with proper support as soon as it is + available. It can also be used to block the probe of recognized and fully supported devices. @@ -75,7 +76,8 @@ config DRM_I915_FORCE_PROBE Use "[,,...]" to force probe the i915 for listed devices. For example, "4500" or "4500,4571". - Use "*" to force probe the driver for all known devices. + Use "*" to force probe the driver for all known devices. Not + recommended. Use "!" right before the ID to block the probe of the device. For example, "4500,!4571" forces the probe of 4500 and blocks the probe of diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 40de9f0f171b4..f33164b10292c 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -1028,7 +1028,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, int ret; if (old_obj) { - const struct intel_crtc_state *crtc_state = + const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, to_intel_crtc(old_plane_state->hw.crtc)); @@ -1043,7 +1043,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, * This should only fail upon a hung GPU, in which case we * can safely continue. */ - if (intel_crtc_needs_modeset(crtc_state)) { + if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) { ret = i915_sw_fence_await_reservation(&state->commit_ready, old_obj->base.resv, false, 0, diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index f0bace9d98a18..529ee22be872e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1601,6 +1601,11 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->dsc.slice_count = drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, true); + if (!pipe_config->dsc.slice_count) { + drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n", + pipe_config->dsc.slice_count); + return -EINVAL; + } } else { u16 dsc_max_output_bpp = 0; u8 dsc_dp_slice_count; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 650232c4892b1..b183efab04a1d 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -204,8 +204,6 @@ bool intel_hdcp2_capable(struct intel_connector *connector) struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - struct intel_gt *gt = dev_priv->media_gt; - struct intel_gsc_uc *gsc = >->uc.gsc; bool capable = false; /* I915 support for HDCP2.2 */ @@ -213,9 +211,13 @@ bool intel_hdcp2_capable(struct intel_connector *connector) return false; /* If MTL+ make sure gsc is loaded and proxy is setup */ - if (intel_hdcp_gsc_cs_required(dev_priv)) - if (!intel_uc_fw_is_running(&gsc->fw)) + if (intel_hdcp_gsc_cs_required(dev_priv)) { + struct intel_gt *gt = dev_priv->media_gt; + struct intel_gsc_uc *gsc = gt ? >->uc.gsc : NULL; + + if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) return false; + } /* MEI/GSC interface is solid depending on which is used */ mutex_lock(&dev_priv->display.hdcp.comp_mutex); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c index cf49188db6a6e..e0e793167d61b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c @@ -31,12 +31,14 @@ { FORCEWAKE_MT, 0, 0, "FORCEWAKE" } #define COMMON_GEN9BASE_GLOBAL \ - { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \ - { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }, \ { ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \ { DONE_REG, 0, 0, "DONE_REG" }, \ { HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" } +#define GEN9_GLOBAL \ + { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \ + { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" } + #define COMMON_GEN12BASE_GLOBAL \ { GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \ { GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \ @@ -142,6 +144,7 @@ static const struct __guc_mmio_reg_descr xe_lpd_gsc_inst_regs[] = { static const struct __guc_mmio_reg_descr default_global_regs[] = { COMMON_BASE_GLOBAL, COMMON_GEN9BASE_GLOBAL, + GEN9_GLOBAL, }; static const struct __guc_mmio_reg_descr default_rc_class_regs[] = { diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 2a012da8ccfaa..edcfb5fe20b24 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -1344,6 +1344,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENODEV; } + if (intel_info->require_force_probe) { + dev_info(&pdev->dev, "Force probing unsupported Device ID %04x, tainting kernel\n", + pdev->device); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } + /* Only bind to function 0 of the device. Early generations * used function 1 as a placeholder for multi-head. This causes * us confusion instead, especially on the systems where both diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h index 2b3ae84057dfe..bdcd554fc8a80 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h @@ -98,17 +98,17 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { static const struct dpu_lm_cfg msm8998_lm[] = { LM_BLK("lm_0", LM_0, 0x44000, MIXER_MSM8998_MASK, - &msm8998_lm_sblk, PINGPONG_0, LM_2, DSPP_0), + &msm8998_lm_sblk, PINGPONG_0, LM_1, DSPP_0), LM_BLK("lm_1", LM_1, 0x45000, MIXER_MSM8998_MASK, - &msm8998_lm_sblk, PINGPONG_1, LM_5, DSPP_1), + &msm8998_lm_sblk, PINGPONG_1, LM_0, DSPP_1), LM_BLK("lm_2", LM_2, 0x46000, MIXER_MSM8998_MASK, - &msm8998_lm_sblk, PINGPONG_2, LM_0, 0), + &msm8998_lm_sblk, PINGPONG_2, LM_5, 0), LM_BLK("lm_3", LM_3, 0x47000, MIXER_MSM8998_MASK, &msm8998_lm_sblk, PINGPONG_MAX, 0, 0), LM_BLK("lm_4", LM_4, 0x48000, MIXER_MSM8998_MASK, &msm8998_lm_sblk, PINGPONG_MAX, 0, 0), LM_BLK("lm_5", LM_5, 0x49000, MIXER_MSM8998_MASK, - &msm8998_lm_sblk, PINGPONG_3, LM_1, 0), + &msm8998_lm_sblk, PINGPONG_3, LM_2, 0), }; static const struct dpu_pingpong_cfg msm8998_pp[] = { @@ -134,10 +134,10 @@ static const struct dpu_dspp_cfg msm8998_dspp[] = { }; static const struct dpu_intf_cfg msm8998_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25), - INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27), - INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29), - INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31), + INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27), + INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29), + INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31), }; static const struct dpu_perf_cfg msm8998_perf_data = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h index 282d410269ff6..42b0e58624d00 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h @@ -128,10 +128,10 @@ static const struct dpu_dspp_cfg sm8150_dspp[] = { }; static const struct dpu_pingpong_cfg sm8150_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h index c57400265f288..e3bdfe7b30f1f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h @@ -116,10 +116,10 @@ static const struct dpu_lm_cfg sc8180x_lm[] = { }; static const struct dpu_pingpong_cfg sc8180x_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h index 2c40229ea5159..ed130582873c7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h @@ -129,10 +129,10 @@ static const struct dpu_dspp_cfg sm8250_dspp[] = { }; static const struct dpu_pingpong_cfg sm8250_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h index 8799ed7571190..a46b11730a4d4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h @@ -80,8 +80,8 @@ static const struct dpu_dspp_cfg sc7180_dspp[] = { }; static const struct dpu_pingpong_cfg sc7180_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te, -1, -1), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, -1, -1), + PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk, -1, -1), + PP_BLK("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk, -1, -1), }; static const struct dpu_intf_cfg sc7180_intf[] = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h index 6f04d8f85c925..988d820f7ef2e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h @@ -122,7 +122,6 @@ const struct dpu_mdss_cfg dpu_sm6115_cfg = { .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \ BIT(MDP_SSPP_TOP0_INTR2) | \ BIT(MDP_SSPP_TOP0_HIST_INTR) | \ - BIT(MDP_INTF0_INTR) | \ BIT(MDP_INTF1_INTR), }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h index 303492d62a5ca..c9003dcc1a59b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h @@ -112,7 +112,6 @@ const struct dpu_mdss_cfg dpu_qcm2290_cfg = { .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \ BIT(MDP_SSPP_TOP0_INTR2) | \ BIT(MDP_SSPP_TOP0_HIST_INTR) | \ - BIT(MDP_INTF0_INTR) | \ BIT(MDP_INTF1_INTR), }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h index ca107ca8de462..4f6a965bcd90b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h @@ -127,22 +127,22 @@ static const struct dpu_dspp_cfg sm8350_dspp[] = { }; static const struct dpu_pingpong_cfg sm8350_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), - PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)), - PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)), - PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), - PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h index 5957de1859844..6b2c7eae71d99 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h @@ -87,10 +87,10 @@ static const struct dpu_dspp_cfg sc7280_dspp[] = { }; static const struct dpu_pingpong_cfg sc7280_pp[] = { - PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1), - PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1), - PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1), - PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1), }; static const struct dpu_intf_cfg sc7280_intf[] = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h index 9aab110b8c44d..706d0f13b598e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h @@ -121,18 +121,18 @@ static const struct dpu_dspp_cfg sc8280xp_dspp[] = { }; static const struct dpu_pingpong_cfg sc8280xp_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1), - PP_BLK_TE("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1), - PP_BLK_TE("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1), - PP_BLK_TE("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), - PP_BLK_TE("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), + PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1), + PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1), + PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1), + PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1), + PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), + PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), }; static const struct dpu_merge_3d_cfg sc8280xp_merge_3d[] = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h index 02a259b6b4268..4ecb3df5cbc02 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h @@ -128,28 +128,28 @@ static const struct dpu_dspp_cfg sm8450_dspp[] = { }; /* FIXME: interrupts */ static const struct dpu_pingpong_cfg sm8450_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), - PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)), - PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)), - PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), - PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), - PP_BLK("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sc7280_pp_sblk, -1, -1), - PP_BLK("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sc7280_pp_sblk, -1, -1), }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h index 9e403034093fd..d0ab351b6a8b9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h @@ -132,28 +132,28 @@ static const struct dpu_dspp_cfg sm8550_dspp[] = { &sm8150_dspp_sblk), }; static const struct dpu_pingpong_cfg sm8550_pp[] = { - PP_BLK_DIPHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1), - PP_BLK_DIPHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1), - PP_BLK_DIPHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1), - PP_BLK_DIPHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1), - PP_BLK_DIPHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), - PP_BLK_DIPHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), - PP_BLK_DIPHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk, -1, -1), - PP_BLK_DIPHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk, -1, -1), }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 03f162af1a50b..5d994bce696f9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -491,7 +491,7 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = { .len = 0x20, .version = 0x20000}, }; -#define PP_BLK_DIPHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \ +#define PP_BLK_DITHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \ {\ .name = _name, .id = _id, \ .base = _base, .len = 0, \ @@ -587,12 +587,12 @@ static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3}; static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = { { - .pps = 1088 * 1920 * 30, + .pps = 1920 * 1080 * 30, .ot_limit = 2, }, { - .pps = 1088 * 1920 * 60, - .ot_limit = 6, + .pps = 1920 * 1080 * 60, + .ot_limit = 4, }, { .pps = 3840 * 2160 * 30, @@ -705,10 +705,7 @@ static const struct dpu_qos_lut_entry msm8998_qos_linear[] = { {.fl = 10, .lut = 0x1555b}, {.fl = 11, .lut = 0x5555b}, {.fl = 12, .lut = 0x15555b}, - {.fl = 13, .lut = 0x55555b}, - {.fl = 14, .lut = 0}, - {.fl = 1, .lut = 0x1b}, - {.fl = 0, .lut = 0} + {.fl = 0, .lut = 0x55555b} }; static const struct dpu_qos_lut_entry sdm845_qos_linear[] = { @@ -730,9 +727,7 @@ static const struct dpu_qos_lut_entry msm8998_qos_macrotile[] = { {.fl = 10, .lut = 0x1aaff}, {.fl = 11, .lut = 0x5aaff}, {.fl = 12, .lut = 0x15aaff}, - {.fl = 13, .lut = 0x55aaff}, - {.fl = 1, .lut = 0x1aaff}, - {.fl = 0, .lut = 0}, + {.fl = 0, .lut = 0x55aaff}, }; static const struct dpu_qos_lut_entry sc7180_qos_linear[] = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index 53326f25e40ef..17f3e7e4f1941 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -15,7 +15,7 @@ /* * Register offsets in MDSS register file for the interrupt registers - * w.r.t. to the MDP base + * w.r.t. the MDP base */ #define MDP_SSPP_TOP0_OFF 0x0 #define MDP_INTF_0_OFF 0x6A000 @@ -24,20 +24,23 @@ #define MDP_INTF_3_OFF 0x6B800 #define MDP_INTF_4_OFF 0x6C000 #define MDP_INTF_5_OFF 0x6C800 +#define INTF_INTR_EN 0x1c0 +#define INTF_INTR_STATUS 0x1c4 +#define INTF_INTR_CLEAR 0x1c8 #define MDP_AD4_0_OFF 0x7C000 #define MDP_AD4_1_OFF 0x7D000 #define MDP_AD4_INTR_EN_OFF 0x41c #define MDP_AD4_INTR_CLEAR_OFF 0x424 #define MDP_AD4_INTR_STATUS_OFF 0x420 -#define MDP_INTF_0_OFF_REV_7xxx 0x34000 -#define MDP_INTF_1_OFF_REV_7xxx 0x35000 -#define MDP_INTF_2_OFF_REV_7xxx 0x36000 -#define MDP_INTF_3_OFF_REV_7xxx 0x37000 -#define MDP_INTF_4_OFF_REV_7xxx 0x38000 -#define MDP_INTF_5_OFF_REV_7xxx 0x39000 -#define MDP_INTF_6_OFF_REV_7xxx 0x3a000 -#define MDP_INTF_7_OFF_REV_7xxx 0x3b000 -#define MDP_INTF_8_OFF_REV_7xxx 0x3c000 +#define MDP_INTF_0_OFF_REV_7xxx 0x34000 +#define MDP_INTF_1_OFF_REV_7xxx 0x35000 +#define MDP_INTF_2_OFF_REV_7xxx 0x36000 +#define MDP_INTF_3_OFF_REV_7xxx 0x37000 +#define MDP_INTF_4_OFF_REV_7xxx 0x38000 +#define MDP_INTF_5_OFF_REV_7xxx 0x39000 +#define MDP_INTF_6_OFF_REV_7xxx 0x3a000 +#define MDP_INTF_7_OFF_REV_7xxx 0x3b000 +#define MDP_INTF_8_OFF_REV_7xxx 0x3c000 /** * struct dpu_intr_reg - array of DPU register sets diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index 84ee2efa9c664..b9dddf576c029 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -56,11 +56,6 @@ #define INTF_TPG_RGB_MAPPING 0x11C #define INTF_PROG_FETCH_START 0x170 #define INTF_PROG_ROT_START 0x174 - -#define INTF_FRAME_LINE_COUNT_EN 0x0A8 -#define INTF_FRAME_COUNT 0x0AC -#define INTF_LINE_COUNT 0x0B0 - #define INTF_MUX 0x25C #define INTF_STATUS 0x26C diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c index 2d28afdf860ef..a3e413d277175 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c @@ -61,6 +61,7 @@ static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb, for (i = 0; i < m->wb_count; i++) { if (wb == m->wb[i].id) { b->blk_addr = addr + m->wb[i].base; + b->log_mask = DPU_DBG_MASK_WB; return &m->wb[i]; } } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h index feb9a729844a3..5acd5683d25a4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h @@ -21,9 +21,6 @@ #define HIST_INTR_EN 0x01c #define HIST_INTR_STATUS 0x020 #define HIST_INTR_CLEAR 0x024 -#define INTF_INTR_EN 0x1C0 -#define INTF_INTR_STATUS 0x1C4 -#define INTF_INTR_CLEAR 0x1C8 #define SPLIT_DISPLAY_EN 0x2F4 #define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8 #define DSPP_IGC_COLOR0_RAM_LUTN 0x300 diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index 6666783e1468e..1245c7aa49df8 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -593,6 +593,18 @@ static struct hdmi_codec_pdata codec_data = { .i2s = 1, }; +void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio_priv; + + audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio); + + if (audio_priv->audio_pdev) { + platform_device_unregister(audio_priv->audio_pdev); + audio_priv->audio_pdev = NULL; + } +} + int dp_register_audio_driver(struct device *dev, struct dp_audio *dp_audio) { diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h index 84e5f4a5d26ba..4ab78880af829 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.h +++ b/drivers/gpu/drm/msm/dp/dp_audio.h @@ -53,6 +53,8 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev, int dp_register_audio_driver(struct device *dev, struct dp_audio *dp_audio); +void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio); + /** * dp_audio_put() * diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 3e13acdfa7e53..99a38dbe51c0f 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -326,6 +326,7 @@ static void dp_display_unbind(struct device *dev, struct device *master, kthread_stop(dp->ev_tsk); dp_power_client_deinit(dp->power); + dp_unregister_audio_driver(dev, dp->audio); dp_aux_unregister(dp->aux); dp->drm_dev = NULL; dp->aux->drm_dev = NULL; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index d77fa9793c54d..9c45d641b5212 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -155,6 +155,8 @@ static bool can_do_async(struct drm_atomic_state *state, for_each_new_crtc_in_state(state, crtc, crtc_state, i) { if (drm_atomic_crtc_needs_modeset(crtc_state)) return false; + if (!crtc_state->active) + return false; if (++num_crtcs > 1) return false; *async_crtc = crtc; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index db6c4e281d75d..cd39b9d8abdbe 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -219,7 +219,8 @@ static void put_pages(struct drm_gem_object *obj) } } -static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) +static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj, + unsigned madv) { struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -227,7 +228,9 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) msm_gem_assert_locked(obj); - if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { + if (GEM_WARN_ON(msm_obj->madv > madv)) { + DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", + msm_obj->madv, madv); return ERR_PTR(-EBUSY); } @@ -248,7 +251,7 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj) struct page **p; msm_gem_lock(obj); - p = msm_gem_pin_pages_locked(obj); + p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); msm_gem_unlock(obj); return p; @@ -473,10 +476,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) msm_gem_assert_locked(obj); - if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) - return -EBUSY; - - pages = msm_gem_pin_pages_locked(obj); + pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); if (IS_ERR(pages)) return PTR_ERR(pages); @@ -699,13 +699,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) if (obj->import_attach) return ERR_PTR(-ENODEV); - if (GEM_WARN_ON(msm_obj->madv > madv)) { - DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", - msm_obj->madv, madv); - return ERR_PTR(-EBUSY); - } - - pages = msm_gem_pin_pages_locked(obj); + pages = msm_gem_pin_pages_locked(obj, madv); if (IS_ERR(pages)) return ERR_CAST(pages); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index aff18c2f600ab..9f5933c75e3df 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -722,7 +722,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_drm_private *priv = dev->dev_private; struct drm_msm_gem_submit *args = data; struct msm_file_private *ctx = file->driver_priv; - struct msm_gem_submit *submit; + struct msm_gem_submit *submit = NULL; struct msm_gpu *gpu = priv->gpu; struct msm_gpu_submitqueue *queue; struct msm_ringbuffer *ring; @@ -769,13 +769,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) { ret = out_fence_fd; - return ret; + goto out_post_unlock; } } submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); - if (IS_ERR(submit)) - return PTR_ERR(submit); + if (IS_ERR(submit)) { + ret = PTR_ERR(submit); + goto out_post_unlock; + } trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident, args->nr_bos, args->nr_cmds); @@ -962,11 +964,20 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (has_ww_ticket) ww_acquire_fini(&submit->ticket); out_unlock: - if (ret && (out_fence_fd >= 0)) - put_unused_fd(out_fence_fd); mutex_unlock(&queue->lock); out_post_unlock: - msm_gem_submit_put(submit); + if (ret && (out_fence_fd >= 0)) + put_unused_fd(out_fence_fd); + + if (!IS_ERR_OR_NULL(submit)) { + msm_gem_submit_put(submit); + } else { + /* + * If the submit hasn't yet taken ownership of the queue + * then we need to drop the reference ourself: + */ + msm_submitqueue_put(queue); + } if (!IS_ERR_OR_NULL(post_deps)) { for (i = 0; i < args->nr_out_syncobjs; ++i) { kfree(post_deps[i].chain); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 418e1e06cddef..5cc8d358cc975 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -234,7 +234,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* Get the pagetable configuration from the domain */ if (adreno_smmu->cookie) ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); - if (!ttbr1_cfg) + + /* + * If you hit this WARN_ONCE() you are probably missing an entry in + * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c + */ + if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables")) return ERR_PTR(-ENODEV); pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL); @@ -410,7 +415,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig struct msm_mmu *mmu; mmu = msm_iommu_new(dev, quirks); - if (IS_ERR(mmu)) + if (IS_ERR_OR_NULL(mmu)) return mmu; iommu = to_msm_iommu(mmu); diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0012.h b/drivers/gpu/drm/nouveau/include/nvif/if0012.h index eb99d84eb8443..16d4ad5023a3e 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/if0012.h +++ b/drivers/gpu/drm/nouveau/include/nvif/if0012.h @@ -2,6 +2,8 @@ #ifndef __NVIF_IF0012_H__ #define __NVIF_IF0012_H__ +#include + union nvif_outp_args { struct nvif_outp_v0 { __u8 version; @@ -63,7 +65,7 @@ union nvif_outp_acquire_args { __u8 hda; __u8 mst; __u8 pad04[4]; - __u8 dpcd[16]; + __u8 dpcd[DP_RECEIVER_CAP_SIZE]; } dp; }; } v0; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h index b7631c1ab2420..4e7f873f66e27 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h @@ -3,6 +3,7 @@ #define __NVKM_DISP_OUTP_H__ #include "priv.h" +#include #include #include #include @@ -42,7 +43,7 @@ struct nvkm_outp { bool aux_pwr_pu; u8 lttpr[6]; u8 lttprs; - u8 dpcd[16]; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; struct { int dpcd; /* -1, or index into SUPPORTED_LINK_RATES table */ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c index 4f0ca709c85a4..fc283a4a1522a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c @@ -146,7 +146,7 @@ nvkm_uoutp_mthd_release(struct nvkm_outp *outp, void *argv, u32 argc) } static int -nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[16], +nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE], u8 link_nr, u8 link_bw, bool hda, bool mst) { int ret; diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index fcd5bd7e5e8e9..8c183639603ec 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -309,7 +309,7 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) */ void drm_sched_fault(struct drm_gpu_scheduler *sched) { - if (sched->ready) + if (sched->timeout_wq) mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); } EXPORT_SYMBOL(drm_sched_fault); diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index ba2f6a4f8c167..7b177b9fbb097 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -507,6 +507,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} }; diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 4b292e0504f1a..ffb16beb6c305 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -312,7 +312,7 @@ static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s, } /* - * 0copy TCP transmit interface: Use do_tcp_sendpages. + * 0copy TCP transmit interface: Use MSG_SPLICE_PAGES. * * Using sendpage to push page by page appears to be less efficient * than using sendmsg, even if data are copied. @@ -323,20 +323,27 @@ static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s, static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset, size_t size) { + struct bio_vec bvec; + struct msghdr msg = { + .msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST | + MSG_SPLICE_PAGES), + }; struct sock *sk = s->sk; - int i = 0, rv = 0, sent = 0, - flags = MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST; + int i = 0, rv = 0, sent = 0; while (size) { size_t bytes = min_t(size_t, PAGE_SIZE - offset, size); if (size + offset <= PAGE_SIZE) - flags = MSG_MORE | MSG_DONTWAIT; + msg.msg_flags &= ~MSG_SENDPAGE_NOTLAST; tcp_rate_check_app_limited(sk); + bvec_set_page(&bvec, page[i], bytes, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + try_page_again: lock_sock(sk); - rv = do_tcp_sendpages(sk, page[i], offset, bytes, flags); + rv = tcp_sendmsg_locked(sk, &msg, size); release_sock(sk); if (rv > 0) { diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index db98c3f86e8c8..6de900776e24a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -417,22 +417,6 @@ config S390_IOMMU help Support for the IOMMU API for s390 PCI devices. -config S390_CCW_IOMMU - bool "S390 CCW IOMMU Support" - depends on S390 && CCW || COMPILE_TEST - select IOMMU_API - help - Enables bits of IOMMU API required by VFIO. The iommu_ops - is not implemented as it is not necessary for VFIO. - -config S390_AP_IOMMU - bool "S390 AP IOMMU Support" - depends on S390 && ZCRYPT || COMPILE_TEST - select IOMMU_API - help - Enables bits of IOMMU API required by VFIO. The iommu_ops - is not implemented as it is not necessary for VFIO. - config MTK_IOMMU tristate "MediaTek IOMMU Support" depends on ARCH_MEDIATEK || COMPILE_TEST diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index ae09c627bc844..c71afda79d644 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -517,6 +517,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { { .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sc7180-smmu-v2", .data = &qcom_smmu_v2_data }, { .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data }, @@ -561,5 +562,14 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) if (match) return qcom_smmu_create(smmu, match->data); + /* + * If you hit this WARN_ON() you are missing an entry in the + * qcom_smmu_impl_of_match[] table, and GPU per-process page- + * tables will be broken. + */ + WARN(of_device_is_compatible(np, "qcom,adreno-smmu"), + "Missing qcom_smmu_impl_of_match entry for: %s", + dev_name(smmu->dev)); + return smmu; } diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c index d5e774d830215..305eb543ba84f 100644 --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include "../leds.h" @@ -37,7 +37,7 @@ */ struct led_netdev_data { - spinlock_t lock; + struct mutex lock; struct delayed_work work; struct notifier_block notifier; @@ -50,16 +50,16 @@ struct led_netdev_data { unsigned int last_activity; unsigned long mode; -#define NETDEV_LED_LINK 0 -#define NETDEV_LED_TX 1 -#define NETDEV_LED_RX 2 -#define NETDEV_LED_MODE_LINKUP 3 + bool carrier_link_up; }; -enum netdev_led_attr { - NETDEV_ATTR_LINK, - NETDEV_ATTR_TX, - NETDEV_ATTR_RX +enum led_trigger_netdev_modes { + TRIGGER_NETDEV_LINK = 0, + TRIGGER_NETDEV_TX, + TRIGGER_NETDEV_RX, + + /* Keep last */ + __TRIGGER_NETDEV_MAX, }; static void set_baseline_state(struct led_netdev_data *trigger_data) @@ -73,10 +73,10 @@ static void set_baseline_state(struct led_netdev_data *trigger_data) if (!led_cdev->blink_brightness) led_cdev->blink_brightness = led_cdev->max_brightness; - if (!test_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode)) + if (!trigger_data->carrier_link_up) { led_set_brightness(led_cdev, LED_OFF); - else { - if (test_bit(NETDEV_LED_LINK, &trigger_data->mode)) + } else { + if (test_bit(TRIGGER_NETDEV_LINK, &trigger_data->mode)) led_set_brightness(led_cdev, led_cdev->blink_brightness); else @@ -85,8 +85,8 @@ static void set_baseline_state(struct led_netdev_data *trigger_data) /* If we are looking for RX/TX start periodically * checking stats */ - if (test_bit(NETDEV_LED_TX, &trigger_data->mode) || - test_bit(NETDEV_LED_RX, &trigger_data->mode)) + if (test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) || + test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode)) schedule_delayed_work(&trigger_data->work, 0); } } @@ -97,9 +97,9 @@ static ssize_t device_name_show(struct device *dev, struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev); ssize_t len; - spin_lock_bh(&trigger_data->lock); + mutex_lock(&trigger_data->lock); len = sprintf(buf, "%s\n", trigger_data->device_name); - spin_unlock_bh(&trigger_data->lock); + mutex_unlock(&trigger_data->lock); return len; } @@ -115,7 +115,7 @@ static ssize_t device_name_store(struct device *dev, cancel_delayed_work_sync(&trigger_data->work); - spin_lock_bh(&trigger_data->lock); + mutex_lock(&trigger_data->lock); if (trigger_data->net_dev) { dev_put(trigger_data->net_dev); @@ -131,15 +131,14 @@ static ssize_t device_name_store(struct device *dev, trigger_data->net_dev = dev_get_by_name(&init_net, trigger_data->device_name); - clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode); + trigger_data->carrier_link_up = false; if (trigger_data->net_dev != NULL) - if (netif_carrier_ok(trigger_data->net_dev)) - set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode); + trigger_data->carrier_link_up = netif_carrier_ok(trigger_data->net_dev); trigger_data->last_activity = 0; set_baseline_state(trigger_data); - spin_unlock_bh(&trigger_data->lock); + mutex_unlock(&trigger_data->lock); return size; } @@ -147,20 +146,16 @@ static ssize_t device_name_store(struct device *dev, static DEVICE_ATTR_RW(device_name); static ssize_t netdev_led_attr_show(struct device *dev, char *buf, - enum netdev_led_attr attr) + enum led_trigger_netdev_modes attr) { struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev); int bit; switch (attr) { - case NETDEV_ATTR_LINK: - bit = NETDEV_LED_LINK; - break; - case NETDEV_ATTR_TX: - bit = NETDEV_LED_TX; - break; - case NETDEV_ATTR_RX: - bit = NETDEV_LED_RX; + case TRIGGER_NETDEV_LINK: + case TRIGGER_NETDEV_TX: + case TRIGGER_NETDEV_RX: + bit = attr; break; default: return -EINVAL; @@ -170,7 +165,7 @@ static ssize_t netdev_led_attr_show(struct device *dev, char *buf, } static ssize_t netdev_led_attr_store(struct device *dev, const char *buf, - size_t size, enum netdev_led_attr attr) + size_t size, enum led_trigger_netdev_modes attr) { struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev); unsigned long state; @@ -182,14 +177,10 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf, return ret; switch (attr) { - case NETDEV_ATTR_LINK: - bit = NETDEV_LED_LINK; - break; - case NETDEV_ATTR_TX: - bit = NETDEV_LED_TX; - break; - case NETDEV_ATTR_RX: - bit = NETDEV_LED_RX; + case TRIGGER_NETDEV_LINK: + case TRIGGER_NETDEV_TX: + case TRIGGER_NETDEV_RX: + bit = attr; break; default: return -EINVAL; @@ -207,47 +198,22 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf, return size; } -static ssize_t link_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return netdev_led_attr_show(dev, buf, NETDEV_ATTR_LINK); -} - -static ssize_t link_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_LINK); -} - -static DEVICE_ATTR_RW(link); - -static ssize_t tx_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return netdev_led_attr_show(dev, buf, NETDEV_ATTR_TX); -} - -static ssize_t tx_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_TX); -} - -static DEVICE_ATTR_RW(tx); - -static ssize_t rx_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return netdev_led_attr_show(dev, buf, NETDEV_ATTR_RX); -} - -static ssize_t rx_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_RX); -} - -static DEVICE_ATTR_RW(rx); +#define DEFINE_NETDEV_TRIGGER(trigger_name, trigger) \ + static ssize_t trigger_name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ + { \ + return netdev_led_attr_show(dev, buf, trigger); \ + } \ + static ssize_t trigger_name##_store(struct device *dev, \ + struct device_attribute *attr, const char *buf, size_t size) \ + { \ + return netdev_led_attr_store(dev, buf, size, trigger); \ + } \ + static DEVICE_ATTR_RW(trigger_name) + +DEFINE_NETDEV_TRIGGER(link, TRIGGER_NETDEV_LINK); +DEFINE_NETDEV_TRIGGER(tx, TRIGGER_NETDEV_TX); +DEFINE_NETDEV_TRIGGER(rx, TRIGGER_NETDEV_RX); static ssize_t interval_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -313,11 +279,13 @@ static int netdev_trig_notify(struct notifier_block *nb, cancel_delayed_work_sync(&trigger_data->work); - spin_lock_bh(&trigger_data->lock); + mutex_lock(&trigger_data->lock); - clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode); + trigger_data->carrier_link_up = false; switch (evt) { case NETDEV_CHANGENAME: + trigger_data->carrier_link_up = netif_carrier_ok(dev); + fallthrough; case NETDEV_REGISTER: if (trigger_data->net_dev) dev_put(trigger_data->net_dev); @@ -330,14 +298,13 @@ static int netdev_trig_notify(struct notifier_block *nb, break; case NETDEV_UP: case NETDEV_CHANGE: - if (netif_carrier_ok(dev)) - set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode); + trigger_data->carrier_link_up = netif_carrier_ok(dev); break; } set_baseline_state(trigger_data); - spin_unlock_bh(&trigger_data->lock); + mutex_unlock(&trigger_data->lock); return NOTIFY_DONE; } @@ -360,21 +327,21 @@ static void netdev_trig_work(struct work_struct *work) } /* If we are not looking for RX/TX then return */ - if (!test_bit(NETDEV_LED_TX, &trigger_data->mode) && - !test_bit(NETDEV_LED_RX, &trigger_data->mode)) + if (!test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) && + !test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode)) return; dev_stats = dev_get_stats(trigger_data->net_dev, &temp); new_activity = - (test_bit(NETDEV_LED_TX, &trigger_data->mode) ? + (test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) ? dev_stats->tx_packets : 0) + - (test_bit(NETDEV_LED_RX, &trigger_data->mode) ? + (test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode) ? dev_stats->rx_packets : 0); if (trigger_data->last_activity != new_activity) { led_stop_software_blink(trigger_data->led_cdev); - invert = test_bit(NETDEV_LED_LINK, &trigger_data->mode); + invert = test_bit(TRIGGER_NETDEV_LINK, &trigger_data->mode); interval = jiffies_to_msecs( atomic_read(&trigger_data->interval)); /* base state is ON (link present) */ @@ -398,7 +365,7 @@ static int netdev_trig_activate(struct led_classdev *led_cdev) if (!trigger_data) return -ENOMEM; - spin_lock_init(&trigger_data->lock); + mutex_init(&trigger_data->lock); trigger_data->notifier.notifier_call = netdev_trig_notify; trigger_data->notifier.priority = 10; diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index c2d2792227f86..baf64540dc00a 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -151,6 +151,12 @@ struct dvb_ca_private { /* mutex serializing ioctls */ struct mutex ioctl_mutex; + + /* A mutex used when a device is disconnected */ + struct mutex remove_mutex; + + /* Whether the device is disconnected */ + int exit; }; static void dvb_ca_private_free(struct dvb_ca_private *ca) @@ -187,7 +193,7 @@ static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca); static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 *ebuf, int ecount); static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, - u8 *ebuf, int ecount); + u8 *ebuf, int ecount, int size_write_flag); /** * findstr - Safely find needle in haystack. @@ -370,7 +376,7 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10); if (ret) return ret; - ret = dvb_ca_en50221_write_data(ca, slot, buf, 2); + ret = dvb_ca_en50221_write_data(ca, slot, buf, 2, CMDREG_SW); if (ret != 2) return -EIO; ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN); @@ -778,11 +784,13 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, * @buf: The data in this buffer is treated as a complete link-level packet to * be written. * @bytes_write: Size of ebuf. + * @size_write_flag: A flag on Command Register which says whether the link size + * information will be writen or not. * * return: Number of bytes written, or < 0 on error. */ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, - u8 *buf, int bytes_write) + u8 *buf, int bytes_write, int size_write_flag) { struct dvb_ca_slot *sl = &ca->slot_info[slot]; int status; @@ -817,7 +825,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, /* OK, set HC bit */ status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, - IRQEN | CMDREG_HC); + IRQEN | CMDREG_HC | size_write_flag); if (status) goto exit; @@ -1508,7 +1516,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file, mutex_lock(&sl->slot_lock); status = dvb_ca_en50221_write_data(ca, slot, fragbuf, - fraglen + 2); + fraglen + 2, 0); mutex_unlock(&sl->slot_lock); if (status == (fraglen + 2)) { written = 1; @@ -1709,12 +1717,22 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) dprintk("%s\n", __func__); - if (!try_module_get(ca->pub->owner)) + mutex_lock(&ca->remove_mutex); + + if (ca->exit) { + mutex_unlock(&ca->remove_mutex); + return -ENODEV; + } + + if (!try_module_get(ca->pub->owner)) { + mutex_unlock(&ca->remove_mutex); return -EIO; + } err = dvb_generic_open(inode, file); if (err < 0) { module_put(ca->pub->owner); + mutex_unlock(&ca->remove_mutex); return err; } @@ -1739,6 +1757,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) dvb_ca_private_get(ca); + mutex_unlock(&ca->remove_mutex); return 0; } @@ -1758,6 +1777,8 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) dprintk("%s\n", __func__); + mutex_lock(&ca->remove_mutex); + /* mark the CA device as closed */ ca->open = 0; dvb_ca_en50221_thread_update_delay(ca); @@ -1768,6 +1789,13 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) dvb_ca_private_put(ca); + if (dvbdev->users == 1 && ca->exit == 1) { + mutex_unlock(&ca->remove_mutex); + wake_up(&dvbdev->wait_queue); + } else { + mutex_unlock(&ca->remove_mutex); + } + return err; } @@ -1891,6 +1919,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, } mutex_init(&ca->ioctl_mutex); + mutex_init(&ca->remove_mutex); if (signal_pending(current)) { ret = -EINTR; @@ -1933,6 +1962,14 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca) dprintk("%s\n", __func__); + mutex_lock(&ca->remove_mutex); + ca->exit = 1; + mutex_unlock(&ca->remove_mutex); + + if (ca->dvbdev->users < 1) + wait_event(ca->dvbdev->wait_queue, + ca->dvbdev->users == 1); + /* shutdown the thread if there was one */ kthread_stop(ca->thread); diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 398c86279b5b0..7c4d86bfdd6c9 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -115,12 +115,12 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed, cc = buf[3] & 0x0f; ccok = ((feed->cc + 1) & 0x0f) == cc; - feed->cc = cc; if (!ccok) { set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); dprintk_sect_loss("missed packet: %d instead of %d!\n", cc, (feed->cc + 1) & 0x0f); } + feed->cc = cc; if (buf[1] & 0x40) // PUSI ? feed->peslen = 0xfffa; @@ -300,7 +300,6 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, cc = buf[3] & 0x0f; ccok = ((feed->cc + 1) & 0x0f) == cc; - feed->cc = cc; if (buf[3] & 0x20) { /* adaption field present, check for discontinuity_indicator */ @@ -336,6 +335,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, feed->pusi_seen = false; dvb_dmx_swfilter_section_new(feed); } + feed->cc = cc; if (buf[1] & 0x40) { /* PUSI=1 (is set), section boundary is here */ diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index cc0a789f09ae5..bc6950a5740f6 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -293,14 +293,22 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe, } if (events->eventw == events->eventr) { - int ret; + struct wait_queue_entry wait; + int ret = 0; if (flags & O_NONBLOCK) return -EWOULDBLOCK; - ret = wait_event_interruptible(events->wait_queue, - dvb_frontend_test_event(fepriv, events)); - + init_waitqueue_entry(&wait, current); + add_wait_queue(&events->wait_queue, &wait); + while (!dvb_frontend_test_event(fepriv, events)) { + wait_woken(&wait, TASK_INTERRUPTIBLE, 0); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + remove_wait_queue(&events->wait_queue, &wait); if (ret < 0) return ret; } @@ -809,15 +817,26 @@ static void dvb_frontend_stop(struct dvb_frontend *fe) dev_dbg(fe->dvb->device, "%s:\n", __func__); + mutex_lock(&fe->remove_mutex); + if (fe->exit != DVB_FE_DEVICE_REMOVED) fe->exit = DVB_FE_NORMAL_EXIT; mb(); - if (!fepriv->thread) + if (!fepriv->thread) { + mutex_unlock(&fe->remove_mutex); return; + } kthread_stop(fepriv->thread); + mutex_unlock(&fe->remove_mutex); + + if (fepriv->dvbdev->users < -1) { + wait_event(fepriv->dvbdev->wait_queue, + fepriv->dvbdev->users == -1); + } + sema_init(&fepriv->sem, 1); fepriv->state = FESTATE_IDLE; @@ -2761,9 +2780,13 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) struct dvb_adapter *adapter = fe->dvb; int ret; + mutex_lock(&fe->remove_mutex); + dev_dbg(fe->dvb->device, "%s:\n", __func__); - if (fe->exit == DVB_FE_DEVICE_REMOVED) - return -ENODEV; + if (fe->exit == DVB_FE_DEVICE_REMOVED) { + ret = -ENODEV; + goto err_remove_mutex; + } if (adapter->mfe_shared == 2) { mutex_lock(&adapter->mfe_lock); @@ -2771,7 +2794,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) if (adapter->mfe_dvbdev && !adapter->mfe_dvbdev->writers) { mutex_unlock(&adapter->mfe_lock); - return -EBUSY; + ret = -EBUSY; + goto err_remove_mutex; } adapter->mfe_dvbdev = dvbdev; } @@ -2794,8 +2818,10 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) while (mferetry-- && (mfedev->users != -1 || mfepriv->thread)) { if (msleep_interruptible(500)) { - if (signal_pending(current)) - return -EINTR; + if (signal_pending(current)) { + ret = -EINTR; + goto err_remove_mutex; + } } } @@ -2807,7 +2833,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) if (mfedev->users != -1 || mfepriv->thread) { mutex_unlock(&adapter->mfe_lock); - return -EBUSY; + ret = -EBUSY; + goto err_remove_mutex; } adapter->mfe_dvbdev = dvbdev; } @@ -2866,6 +2893,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) if (adapter->mfe_shared) mutex_unlock(&adapter->mfe_lock); + + mutex_unlock(&fe->remove_mutex); return ret; err3: @@ -2887,6 +2916,9 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) err0: if (adapter->mfe_shared) mutex_unlock(&adapter->mfe_lock); + +err_remove_mutex: + mutex_unlock(&fe->remove_mutex); return ret; } @@ -2897,6 +2929,8 @@ static int dvb_frontend_release(struct inode *inode, struct file *file) struct dvb_frontend_private *fepriv = fe->frontend_priv; int ret; + mutex_lock(&fe->remove_mutex); + dev_dbg(fe->dvb->device, "%s:\n", __func__); if ((file->f_flags & O_ACCMODE) != O_RDONLY) { @@ -2918,10 +2952,18 @@ static int dvb_frontend_release(struct inode *inode, struct file *file) } mutex_unlock(&fe->dvb->mdev_lock); #endif - if (fe->exit != DVB_FE_NO_EXIT) - wake_up(&dvbdev->wait_queue); if (fe->ops.ts_bus_ctrl) fe->ops.ts_bus_ctrl(fe, 0); + + if (fe->exit != DVB_FE_NO_EXIT) { + mutex_unlock(&fe->remove_mutex); + wake_up(&dvbdev->wait_queue); + } else { + mutex_unlock(&fe->remove_mutex); + } + + } else { + mutex_unlock(&fe->remove_mutex); } dvb_frontend_put(fe); @@ -3022,6 +3064,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb, fepriv = fe->frontend_priv; kref_init(&fe->refcount); + mutex_init(&fe->remove_mutex); /* * After initialization, there need to be two references: one diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 8a2febf33ce28..8bb8dd34c223e 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -1564,15 +1564,43 @@ static long dvb_net_ioctl(struct file *file, return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl); } +static int locked_dvb_net_open(struct inode *inode, struct file *file) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_net *dvbnet = dvbdev->priv; + int ret; + + if (mutex_lock_interruptible(&dvbnet->remove_mutex)) + return -ERESTARTSYS; + + if (dvbnet->exit) { + mutex_unlock(&dvbnet->remove_mutex); + return -ENODEV; + } + + ret = dvb_generic_open(inode, file); + + mutex_unlock(&dvbnet->remove_mutex); + + return ret; +} + static int dvb_net_close(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_net *dvbnet = dvbdev->priv; + mutex_lock(&dvbnet->remove_mutex); + dvb_generic_release(inode, file); - if(dvbdev->users == 1 && dvbnet->exit == 1) + if (dvbdev->users == 1 && dvbnet->exit == 1) { + mutex_unlock(&dvbnet->remove_mutex); wake_up(&dvbdev->wait_queue); + } else { + mutex_unlock(&dvbnet->remove_mutex); + } + return 0; } @@ -1580,7 +1608,7 @@ static int dvb_net_close(struct inode *inode, struct file *file) static const struct file_operations dvb_net_fops = { .owner = THIS_MODULE, .unlocked_ioctl = dvb_net_ioctl, - .open = dvb_generic_open, + .open = locked_dvb_net_open, .release = dvb_net_close, .llseek = noop_llseek, }; @@ -1599,10 +1627,13 @@ void dvb_net_release (struct dvb_net *dvbnet) { int i; + mutex_lock(&dvbnet->remove_mutex); dvbnet->exit = 1; + mutex_unlock(&dvbnet->remove_mutex); + if (dvbnet->dvbdev->users < 1) wait_event(dvbnet->dvbdev->wait_queue, - dvbnet->dvbdev->users==1); + dvbnet->dvbdev->users == 1); dvb_unregister_device(dvbnet->dvbdev); @@ -1621,6 +1652,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet, int i; mutex_init(&dvbnet->ioctl_mutex); + mutex_init(&dvbnet->remove_mutex); dvbnet->demux = dmx; for (i=0; i static DEFINE_MUTEX(dvbdev_mutex); +static LIST_HEAD(dvbdevfops_list); static int dvbdev_debug; module_param(dvbdev_debug, int, 0644); @@ -453,14 +454,15 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, enum dvb_device_type type, int demux_sink_pads) { struct dvb_device *dvbdev; - struct file_operations *dvbdevfops; + struct file_operations *dvbdevfops = NULL; + struct dvbdevfops_node *node = NULL, *new_node = NULL; struct device *clsdev; int minor; int id, ret; mutex_lock(&dvbdev_register_lock); - if ((id = dvbdev_get_free_id (adap, type)) < 0){ + if ((id = dvbdev_get_free_id (adap, type)) < 0) { mutex_unlock(&dvbdev_register_lock); *pdvbdev = NULL; pr_err("%s: couldn't find free device id\n", __func__); @@ -468,18 +470,45 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, } *pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL); - if (!dvbdev){ mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } - dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL); + /* + * When a device of the same type is probe()d more than once, + * the first allocated fops are used. This prevents memory leaks + * that can occur when the same device is probe()d repeatedly. + */ + list_for_each_entry(node, &dvbdevfops_list, list_head) { + if (node->fops->owner == adap->module && + node->type == type && + node->template == template) { + dvbdevfops = node->fops; + break; + } + } - if (!dvbdevfops){ - kfree (dvbdev); - mutex_unlock(&dvbdev_register_lock); - return -ENOMEM; + if (dvbdevfops == NULL) { + dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL); + if (!dvbdevfops) { + kfree(dvbdev); + mutex_unlock(&dvbdev_register_lock); + return -ENOMEM; + } + + new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL); + if (!new_node) { + kfree(dvbdevfops); + kfree(dvbdev); + mutex_unlock(&dvbdev_register_lock); + return -ENOMEM; + } + + new_node->fops = dvbdevfops; + new_node->type = type; + new_node->template = template; + list_add_tail (&new_node->list_head, &dvbdevfops_list); } memcpy(dvbdev, template, sizeof(struct dvb_device)); @@ -490,20 +519,20 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, dvbdev->priv = priv; dvbdev->fops = dvbdevfops; init_waitqueue_head (&dvbdev->wait_queue); - dvbdevfops->owner = adap->module; - list_add_tail (&dvbdev->list_head, &adap->device_list); - down_write(&minor_rwsem); #ifdef CONFIG_DVB_DYNAMIC_MINORS for (minor = 0; minor < MAX_DVB_MINORS; minor++) if (dvb_minors[minor] == NULL) break; - if (minor == MAX_DVB_MINORS) { + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } list_del (&dvbdev->list_head); - kfree(dvbdevfops); kfree(dvbdev); up_write(&minor_rwsem); mutex_unlock(&dvbdev_register_lock); @@ -512,41 +541,47 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, #else minor = nums2minor(adap->num, type, id); #endif - dvbdev->minor = minor; dvb_minors[minor] = dvb_device_get(dvbdev); up_write(&minor_rwsem); - ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads); if (ret) { pr_err("%s: dvb_register_media_device failed to create the mediagraph\n", __func__); - + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } dvb_media_device_free(dvbdev); list_del (&dvbdev->list_head); - kfree(dvbdevfops); kfree(dvbdev); mutex_unlock(&dvbdev_register_lock); return ret; } - mutex_unlock(&dvbdev_register_lock); - clsdev = device_create(dvb_class, adap->device, MKDEV(DVB_MAJOR, minor), dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id); if (IS_ERR(clsdev)) { pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n", __func__, adap->num, dnames[type], id, PTR_ERR(clsdev)); + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } dvb_media_device_free(dvbdev); list_del (&dvbdev->list_head); - kfree(dvbdevfops); kfree(dvbdev); + mutex_unlock(&dvbdev_register_lock); return PTR_ERR(clsdev); } + dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n", adap->num, dnames[type], id, minor, minor); + mutex_unlock(&dvbdev_register_lock); return 0; } EXPORT_SYMBOL(dvb_register_device); @@ -575,7 +610,6 @@ static void dvb_free_device(struct kref *ref) { struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref); - kfree (dvbdev->fops); kfree (dvbdev); } @@ -1081,9 +1115,17 @@ static int __init init_dvbdev(void) static void __exit exit_dvbdev(void) { + struct dvbdevfops_node *node, *next; + class_destroy(dvb_class); cdev_del(&dvb_device_cdev); unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS); + + list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) { + list_del (&node->list_head); + kfree(node->fops); + kfree(node); + } } subsys_initcall(init_dvbdev); diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c index 1f1753f2ab1a3..0782f8377eb2f 100644 --- a/drivers/media/dvb-frontends/mn88443x.c +++ b/drivers/media/dvb-frontends/mn88443x.c @@ -798,7 +798,7 @@ MODULE_DEVICE_TABLE(i2c, mn88443x_i2c_id); static struct i2c_driver mn88443x_driver = { .driver = { .name = "mn88443x", - .of_match_table = of_match_ptr(mn88443x_of_match), + .of_match_table = mn88443x_of_match, }, .probe_new = mn88443x_probe, .remove = mn88443x_remove, diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c index 8287851b5ffdc..d85bfbb77a250 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c @@ -697,7 +697,7 @@ static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num) netup_unidvb_dma_enable(dma, 0); msleep(50); cancel_work_sync(&dma->work); - del_timer(&dma->timeout); + del_timer_sync(&dma->timeout); } static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev) @@ -887,12 +887,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev, ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0), ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1), pci_dev->irq); - if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED, - "netup_unidvb", pci_dev) < 0) { - dev_err(&pci_dev->dev, - "%s(): can't get IRQ %d\n", __func__, pci_dev->irq); - goto irq_request_err; - } + ndev->dma_size = 2 * 188 * NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT; ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev, @@ -933,6 +928,14 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev, dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n"); goto dma_setup_err; } + + if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED, + "netup_unidvb", pci_dev) < 0) { + dev_err(&pci_dev->dev, + "%s(): can't get IRQ %d\n", __func__, pci_dev->irq); + goto dma_setup_err; + } + dev_info(&pci_dev->dev, "netup_unidvb: device has been initialized\n"); return 0; @@ -951,8 +954,6 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev, dma_free_coherent(&pci_dev->dev, ndev->dma_size, ndev->dma_virt, ndev->dma_phys); dma_alloc_err: - free_irq(pci_dev->irq, pci_dev); -irq_request_err: iounmap(ndev->lmmio1); pci_bar1_error: iounmap(ndev->lmmio0); diff --git a/drivers/media/usb/dvb-usb-v2/ce6230.c b/drivers/media/usb/dvb-usb-v2/ce6230.c index 44540de1a2066..d3b5cb4a24daf 100644 --- a/drivers/media/usb/dvb-usb-v2/ce6230.c +++ b/drivers/media/usb/dvb-usb-v2/ce6230.c @@ -101,6 +101,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap, if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { if (msg[i].addr == ce6230_zl10353_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = DEMOD_READ; req.value = msg[i].addr >> 1; req.index = msg[i].buf[0]; @@ -117,6 +121,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap, } else { if (msg[i].addr == ce6230_zl10353_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = DEMOD_WRITE; req.value = msg[i].addr >> 1; req.index = msg[i].buf[0]; diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c index 7ed0ab9e429b1..0e4773fc025c9 100644 --- a/drivers/media/usb/dvb-usb-v2/ec168.c +++ b/drivers/media/usb/dvb-usb-v2/ec168.c @@ -115,6 +115,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], while (i < num) { if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { if (msg[i].addr == ec168_ec100_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = READ_DEMOD; req.value = 0; req.index = 0xff00 + msg[i].buf[0]; /* reg */ @@ -131,6 +135,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], } } else { if (msg[i].addr == ec168_ec100_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = WRITE_DEMOD; req.value = msg[i].buf[1]; /* val */ req.index = 0xff00 + msg[i].buf[0]; /* reg */ @@ -139,6 +147,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = ec168_ctrl_msg(d, &req); i += 1; } else { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = WRITE_I2C; req.value = msg[i].buf[0]; /* val */ req.index = 0x0100 + msg[i].addr; /* I2C addr */ diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 795a012d40200..f7884bb56fccf 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -176,6 +176,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = -EOPNOTSUPP; goto err_mutex_unlock; } else if (msg[0].addr == 0x10) { + if (msg[0].len < 1 || msg[1].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 1 - integrated demod */ if (msg[0].buf[0] == 0x00) { /* return demod page from driver cache */ @@ -189,6 +193,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = rtl28xxu_ctrl_msg(d, &req); } } else if (msg[0].len < 2) { + if (msg[0].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 2 - old I2C */ req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1); req.index = CMD_I2C_RD; @@ -217,8 +225,16 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = -EOPNOTSUPP; goto err_mutex_unlock; } else if (msg[0].addr == 0x10) { + if (msg[0].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 1 - integrated demod */ if (msg[0].buf[0] == 0x00) { + if (msg[0].len < 2) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* save demod page for later demod access */ dev->page = msg[0].buf[1]; ret = 0; @@ -231,6 +247,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = rtl28xxu_ctrl_msg(d, &req); } } else if ((msg[0].len < 23) && (!dev->new_i2c_write)) { + if (msg[0].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 2 - old I2C */ req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1); req.index = CMD_I2C_WR; diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c index 7d78ee09be5e1..a31c6f82f4e90 100644 --- a/drivers/media/usb/dvb-usb/az6027.c +++ b/drivers/media/usb/dvb-usb/az6027.c @@ -988,6 +988,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n /* write/read request */ if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) { req = 0xB9; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff)); value = msg[i].addr + (msg[i].len << 8); length = msg[i + 1].len + 6; @@ -1001,6 +1005,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n /* demod 16bit addr */ req = 0xBD; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff)); value = msg[i].addr + (2 << 8); length = msg[i].len - 2; @@ -1026,6 +1034,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n } else { req = 0xBD; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } index = msg[i].buf[0] & 0x00FF; value = msg[i].addr + (1 << 8); length = msg[i].len - 1; diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c index 2756815a780bc..32134be169148 100644 --- a/drivers/media/usb/dvb-usb/digitv.c +++ b/drivers/media/usb/dvb-usb/digitv.c @@ -63,6 +63,10 @@ static int digitv_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num warn("more than 2 i2c messages at a time is not handled yet. TODO."); for (i = 0; i < num; i++) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { if (digitv_ctrl_msg(d, USB_READ_COFDM, msg[i].buf[0], NULL, 0, diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 0ca764282c767..8747960e61461 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -946,7 +946,7 @@ static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) for (i = 0; i < 6; i++) { obuf[1] = 0xf0 + i; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) - break; + return -1; else mac[i] = ibuf[0]; } diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig index 9501b10b31aa5..0df10270dbdfc 100644 --- a/drivers/media/usb/pvrusb2/Kconfig +++ b/drivers/media/usb/pvrusb2/Kconfig @@ -37,6 +37,7 @@ config VIDEO_PVRUSB2_DVB bool "pvrusb2 ATSC/DVB support" default y depends on VIDEO_PVRUSB2 && DVB_CORE + depends on VIDEO_PVRUSB2=m || DVB_CORE=y select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index 38822cedd93a9..c4474d4c44e28 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -1544,8 +1544,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec) dvb_dmx_release(&dec->demux); if (dec->fe) { dvb_unregister_frontend(dec->fe); - if (dec->fe->ops.release) - dec->fe->ops.release(dec->fe); + dvb_frontend_detach(dec->fe); } dvb_unregister_adapter(&dec->adapter); } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 00c33edb9fb94..d920c41783893 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -264,6 +264,7 @@ static ssize_t power_ro_lock_store(struct device *dev, goto out_put; } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; blk_execute_rq(req, false); ret = req_to_mmc_queue_req(req)->drv_op_result; blk_mq_free_request(req); @@ -651,6 +652,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, idatas[0] = idata; req_to_mmc_queue_req(req)->drv_op = rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; req_to_mmc_queue_req(req)->drv_op_data = idatas; req_to_mmc_queue_req(req)->ioc_count = 1; blk_execute_rq(req, false); @@ -722,6 +724,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, } req_to_mmc_queue_req(req)->drv_op = rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; req_to_mmc_queue_req(req)->drv_op_data = idata; req_to_mmc_queue_req(req)->ioc_count = n; blk_execute_rq(req, false); @@ -2806,6 +2809,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) if (IS_ERR(req)) return PTR_ERR(req); req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; blk_execute_rq(req, false); ret = req_to_mmc_queue_req(req)->drv_op_result; if (ret >= 0) { @@ -2844,6 +2848,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) goto out_free; } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; blk_execute_rq(req, false); err = req_to_mmc_queue_req(req)->drv_op_result; diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index b24aa27da50c4..d2f6250546893 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -540,9 +540,11 @@ static int sdhci_cdns_probe(struct platform_device *pdev) if (host->mmc->caps & MMC_CAP_HW_RESET) { priv->rst_hw = devm_reset_control_get_optional_exclusive(dev, NULL); - if (IS_ERR(priv->rst_hw)) - return dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw), - "reset controller error\n"); + if (IS_ERR(priv->rst_hw)) { + ret = dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw), + "reset controller error\n"); + goto free; + } if (priv->rst_hw) host->mmc_host_ops.card_hw_reset = sdhci_cdns_mmc_hw_reset; } diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index d7c0c0b9e26c5..eebf94604a7fd 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1634,6 +1634,10 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, if (ret) return ret; + /* HS400/HS400ES require 8 bit bus */ + if (!(host->mmc->caps & MMC_CAP_8_BIT_DATA)) + host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES); + if (mmc_gpio_get_cd(host->mmc) >= 0) host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; @@ -1724,10 +1728,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) host->mmc_host_ops.init_card = usdhc_init_card; } - err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); - if (err) - goto disable_ahb_clk; - if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) sdhci_esdhc_ops.platform_execute_tuning = esdhc_executing_tuning; @@ -1735,15 +1735,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; - if (host->mmc->caps & MMC_CAP_8_BIT_DATA && - imx_data->socdata->flags & ESDHC_FLAG_HS400) + if (imx_data->socdata->flags & ESDHC_FLAG_HS400) host->mmc->caps2 |= MMC_CAP2_HS400; if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23) host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN; - if (host->mmc->caps & MMC_CAP_8_BIT_DATA && - imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { + if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { host->mmc->caps2 |= MMC_CAP2_HS400_ES; host->mmc_host_ops.hs400_enhanced_strobe = esdhc_hs400_enhanced_strobe; @@ -1765,6 +1763,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto disable_ahb_clk; } + err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); + if (err) + goto disable_ahb_clk; + sdhci_esdhc_imx_hwinit(host); err = sdhci_add_host(host); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index fce81fafbedd0..007cec23a92f8 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3950,7 +3950,11 @@ static int bond_slave_netdev_event(unsigned long event, unblock_netpoll_tx(); break; case NETDEV_FEAT_CHANGE: - bond_compute_features(bond); + if (!bond->notifier_ctx) { + bond->notifier_ctx = true; + bond_compute_features(bond); + bond->notifier_ctx = false; + } break; case NETDEV_RESEND_IGMP: /* Propagate to master device */ @@ -6345,6 +6349,8 @@ static int bond_init(struct net_device *bond_dev) if (!bond->wq) return -ENOMEM; + bond->notifier_ctx = false; + spin_lock_init(&bond->stats_lock); netdev_lockdep_set_classes(bond_dev); diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 3ceccafd701b2..b190007c01bec 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -95,7 +95,7 @@ config CAN_AT91 config CAN_BXCAN tristate "STM32 Basic Extended CAN (bxCAN) devices" - depends on OF || ARCH_STM32 || COMPILE_TEST + depends on ARCH_STM32 || COMPILE_TEST depends on HAS_IOMEM select CAN_RX_OFFLOAD help diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 199cb200f2bdd..4621266851ed4 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1346,7 +1346,7 @@ static int at91_can_probe(struct platform_device *pdev) return err; } -static int at91_can_remove(struct platform_device *pdev) +static void at91_can_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct at91_priv *priv = netdev_priv(dev); @@ -1362,8 +1362,6 @@ static int at91_can_remove(struct platform_device *pdev) clk_put(priv->clk); free_candev(dev); - - return 0; } static const struct platform_device_id at91_can_id_table[] = { @@ -1381,7 +1379,7 @@ MODULE_DEVICE_TABLE(platform, at91_can_id_table); static struct platform_driver at91_can_driver = { .probe = at91_can_probe, - .remove = at91_can_remove, + .remove_new = at91_can_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = of_match_ptr(at91_can_dt_ids), diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c index e26ccd41e3cba..39de7164bc4e1 100644 --- a/drivers/net/can/bxcan.c +++ b/drivers/net/can/bxcan.c @@ -118,7 +118,7 @@ #define BXCAN_FiR1_REG(b) (0x40 + (b) * 8) #define BXCAN_FiR2_REG(b) (0x44 + (b) * 8) -#define BXCAN_FILTER_ID(primary) (primary ? 0 : 14) +#define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0) /* Filter primary register (FMR) bits */ #define BXCAN_FMR_CANSB_MASK GENMASK(13, 8) @@ -135,6 +135,12 @@ enum bxcan_lec_code { BXCAN_LEC_UNUSED }; +enum bxcan_cfg { + BXCAN_CFG_SINGLE = 0, + BXCAN_CFG_DUAL_PRIMARY, + BXCAN_CFG_DUAL_SECONDARY +}; + /* Structure of the message buffer */ struct bxcan_mb { u32 id; /* can identifier */ @@ -167,7 +173,7 @@ struct bxcan_priv { struct regmap *gcan; int tx_irq; int sce_irq; - bool primary; + enum bxcan_cfg cfg; struct clk *clk; spinlock_t rmw_lock; /* lock for read-modify-write operations */ unsigned int tx_head; @@ -202,17 +208,17 @@ static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr, spin_unlock_irqrestore(&priv->rmw_lock, flags); } -static void bxcan_disable_filters(struct bxcan_priv *priv, bool primary) +static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg) { - unsigned int fid = BXCAN_FILTER_ID(primary); + unsigned int fid = BXCAN_FILTER_ID(cfg); u32 fmask = BIT(fid); regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0); } -static void bxcan_enable_filters(struct bxcan_priv *priv, bool primary) +static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg) { - unsigned int fid = BXCAN_FILTER_ID(primary); + unsigned int fid = BXCAN_FILTER_ID(cfg); u32 fmask = BIT(fid); /* Filter settings: @@ -680,7 +686,7 @@ static int bxcan_chip_start(struct net_device *ndev) BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK | BXCAN_BTR_SJW_MASK, set); - bxcan_enable_filters(priv, priv->primary); + bxcan_enable_filters(priv, priv->cfg); /* Clear all internal status */ priv->tx_head = 0; @@ -806,7 +812,7 @@ static void bxcan_chip_stop(struct net_device *ndev) BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 | BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 | BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0); - bxcan_disable_filters(priv, priv->primary); + bxcan_disable_filters(priv, priv->cfg); bxcan_enter_sleep_mode(priv); priv->can.state = CAN_STATE_STOPPED; } @@ -931,7 +937,7 @@ static int bxcan_probe(struct platform_device *pdev) struct clk *clk = NULL; void __iomem *regs; struct regmap *gcan; - bool primary; + enum bxcan_cfg cfg; int err, rx_irq, tx_irq, sce_irq; regs = devm_platform_ioremap_resource(pdev, 0); @@ -946,7 +952,13 @@ static int bxcan_probe(struct platform_device *pdev) return PTR_ERR(gcan); } - primary = of_property_read_bool(np, "st,can-primary"); + if (of_property_read_bool(np, "st,can-primary")) + cfg = BXCAN_CFG_DUAL_PRIMARY; + else if (of_property_read_bool(np, "st,can-secondary")) + cfg = BXCAN_CFG_DUAL_SECONDARY; + else + cfg = BXCAN_CFG_SINGLE; + clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "failed to get clock\n"); @@ -954,22 +966,16 @@ static int bxcan_probe(struct platform_device *pdev) } rx_irq = platform_get_irq_byname(pdev, "rx0"); - if (rx_irq < 0) { - dev_err(dev, "failed to get rx0 irq\n"); + if (rx_irq < 0) return rx_irq; - } tx_irq = platform_get_irq_byname(pdev, "tx"); - if (tx_irq < 0) { - dev_err(dev, "failed to get tx irq\n"); + if (tx_irq < 0) return tx_irq; - } sce_irq = platform_get_irq_byname(pdev, "sce"); - if (sce_irq < 0) { - dev_err(dev, "failed to get sce irq\n"); + if (sce_irq < 0) return sce_irq; - } ndev = alloc_candev(sizeof(struct bxcan_priv), BXCAN_TX_MB_NUM); if (!ndev) { @@ -992,7 +998,7 @@ static int bxcan_probe(struct platform_device *pdev) priv->clk = clk; priv->tx_irq = tx_irq; priv->sce_irq = sce_irq; - priv->primary = primary; + priv->cfg = cfg; priv->can.clock.freq = clk_get_rate(clk); spin_lock_init(&priv->rmw_lock); priv->tx_head = 0; @@ -1027,7 +1033,7 @@ static int bxcan_probe(struct platform_device *pdev) return err; } -static int bxcan_remove(struct platform_device *pdev) +static void bxcan_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct bxcan_priv *priv = netdev_priv(ndev); @@ -1036,7 +1042,6 @@ static int bxcan_remove(struct platform_device *pdev) clk_disable_unprepare(priv->clk); can_rx_offload_del(&priv->offload); free_candev(ndev); - return 0; } static int __maybe_unused bxcan_suspend(struct device *dev) @@ -1088,7 +1093,7 @@ static struct platform_driver bxcan_driver = { .of_match_table = bxcan_of_match, }, .probe = bxcan_probe, - .remove = bxcan_remove, + .remove_new = bxcan_remove, }; module_platform_driver(bxcan_driver); diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 03ccb7cfacaf8..925930b6c4ca5 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -410,7 +410,7 @@ static int c_can_plat_probe(struct platform_device *pdev) return ret; } -static int c_can_plat_remove(struct platform_device *pdev) +static void c_can_plat_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); @@ -418,8 +418,6 @@ static int c_can_plat_remove(struct platform_device *pdev) unregister_c_can_dev(dev); pm_runtime_disable(priv->device); free_c_can_dev(dev); - - return 0; } #ifdef CONFIG_PM @@ -487,7 +485,7 @@ static struct platform_driver c_can_plat_driver = { .of_match_table = c_can_of_table, }, .probe = c_can_plat_probe, - .remove = c_can_plat_remove, + .remove_new = c_can_plat_remove, .suspend = c_can_suspend, .resume = c_can_resume, .id_table = c_can_id_table, diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c index 8f6dccd5a5879..22009440a983e 100644 --- a/drivers/net/can/cc770/cc770_isa.c +++ b/drivers/net/can/cc770/cc770_isa.c @@ -285,7 +285,7 @@ static int cc770_isa_probe(struct platform_device *pdev) return err; } -static int cc770_isa_remove(struct platform_device *pdev) +static void cc770_isa_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct cc770_priv *priv = netdev_priv(dev); @@ -303,13 +303,11 @@ static int cc770_isa_remove(struct platform_device *pdev) release_region(port[idx], CC770_IOSIZE); } free_cc770dev(dev); - - return 0; } static struct platform_driver cc770_isa_driver = { .probe = cc770_isa_probe, - .remove = cc770_isa_remove, + .remove_new = cc770_isa_remove, .driver = { .name = KBUILD_MODNAME, }, diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c index 8dcc32e4e30ef..13bcfba05f18f 100644 --- a/drivers/net/can/cc770/cc770_platform.c +++ b/drivers/net/can/cc770/cc770_platform.c @@ -230,7 +230,7 @@ static int cc770_platform_probe(struct platform_device *pdev) return err; } -static int cc770_platform_remove(struct platform_device *pdev) +static void cc770_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct cc770_priv *priv = netdev_priv(dev); @@ -242,8 +242,6 @@ static int cc770_platform_remove(struct platform_device *pdev) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); - - return 0; } static const struct of_device_id cc770_platform_table[] = { @@ -259,7 +257,7 @@ static struct platform_driver cc770_platform_driver = { .of_match_table = cc770_platform_table, }, .probe = cc770_platform_probe, - .remove = cc770_platform_remove, + .remove_new = cc770_platform_remove, }; module_platform_driver(cc770_platform_driver); diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c index a17561d971929..55bb10b157b49 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_platform.c +++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c @@ -86,7 +86,7 @@ static int ctucan_platform_probe(struct platform_device *pdev) * This function frees all the resources allocated to the device. * Return: 0 always */ -static int ctucan_platform_remove(struct platform_device *pdev) +static void ctucan_platform_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ctucan_priv *priv = netdev_priv(ndev); @@ -97,8 +97,6 @@ static int ctucan_platform_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); netif_napi_del(&priv->napi); free_candev(ndev); - - return 0; } static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume); @@ -113,7 +111,7 @@ MODULE_DEVICE_TABLE(of, ctucan_of_match); static struct platform_driver ctucanfd_driver = { .probe = ctucan_platform_probe, - .remove = ctucan_platform_remove, + .remove_new = ctucan_platform_remove, .driver = { .name = DRV_NAME, .pm = &ctucan_platform_pm_ops, diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c index 241ec636e91fd..f6d05b3ef59ab 100644 --- a/drivers/net/can/dev/skb.c +++ b/drivers/net/can/dev/skb.c @@ -54,7 +54,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, /* check flag whether this packet has to be looped back */ if (!(dev->flags & IFF_ECHO) || (skb->protocol != htons(ETH_P_CAN) && - skb->protocol != htons(ETH_P_CANFD))) { + skb->protocol != htons(ETH_P_CANFD) && + skb->protocol != htons(ETH_P_CANXL))) { kfree_skb(skb); return 0; } diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index 6d638c93977bc..ff0fc18baf133 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -2218,7 +2218,7 @@ static int flexcan_probe(struct platform_device *pdev) return err; } -static int flexcan_remove(struct platform_device *pdev) +static void flexcan_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -2227,8 +2227,6 @@ static int flexcan_remove(struct platform_device *pdev) unregister_flexcandev(dev); pm_runtime_disable(&pdev->dev); free_candev(dev); - - return 0; } static int __maybe_unused flexcan_suspend(struct device *device) @@ -2379,7 +2377,7 @@ static struct platform_driver flexcan_driver = { .of_match_table = flexcan_of_match, }, .probe = flexcan_probe, - .remove = flexcan_remove, + .remove_new = flexcan_remove, .id_table = flexcan_id_table, }; diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 4bedcc3eea0d6..3174efdae271a 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1696,7 +1696,7 @@ static int grcan_probe(struct platform_device *ofdev) return err; } -static int grcan_remove(struct platform_device *ofdev) +static void grcan_remove(struct platform_device *ofdev) { struct net_device *dev = platform_get_drvdata(ofdev); struct grcan_priv *priv = netdev_priv(dev); @@ -1706,8 +1706,6 @@ static int grcan_remove(struct platform_device *ofdev) irq_dispose_mapping(dev->irq); netif_napi_del(&priv->napi); free_candev(dev); - - return 0; } static const struct of_device_id grcan_match[] = { @@ -1726,7 +1724,7 @@ static struct platform_driver grcan_driver = { .of_match_table = grcan_match, }, .probe = grcan_probe, - .remove = grcan_remove, + .remove_new = grcan_remove, }; module_platform_driver(grcan_driver); diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 07eaf724a5727..1d6642c94f2f5 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -1013,15 +1013,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) return ret; } -static int ifi_canfd_plat_remove(struct platform_device *pdev) +static void ifi_canfd_plat_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); unregister_candev(ndev); platform_set_drvdata(pdev, NULL); free_candev(ndev); - - return 0; } static const struct of_device_id ifi_canfd_of_table[] = { @@ -1036,7 +1034,7 @@ static struct platform_driver ifi_canfd_plat_driver = { .of_match_table = ifi_canfd_of_table, }, .probe = ifi_canfd_plat_probe, - .remove = ifi_canfd_plat_remove, + .remove_new = ifi_canfd_plat_remove, }; module_platform_driver(ifi_canfd_plat_driver); diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 0732a50921418..d048ea565b892 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -2023,7 +2023,7 @@ static int ican3_probe(struct platform_device *pdev) return ret; } -static int ican3_remove(struct platform_device *pdev) +static void ican3_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ican3_dev *mod = netdev_priv(ndev); @@ -2042,8 +2042,6 @@ static int ican3_remove(struct platform_device *pdev) iounmap(mod->dpm); free_candev(ndev); - - return 0; } static struct platform_driver ican3_driver = { @@ -2051,7 +2049,7 @@ static struct platform_driver ican3_driver = { .name = DRV_NAME, }, .probe = ican3_probe, - .remove = ican3_remove, + .remove_new = ican3_remove, }; module_platform_driver(ican3_driver); diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 53e8a914c88b5..be189edb256ce 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -71,10 +71,12 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) /* Shared receive buffer registers */ #define KVASER_PCIEFD_SRB_BASE 0x1f200 +#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4) #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) +#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214) #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) /* EPCS flash controller registers */ #define KVASER_PCIEFD_SPI_BASE 0x1fc00 @@ -111,6 +113,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); /* DMA support */ #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) +/* SRB current packet level */ +#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff + /* DMA Enable */ #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) @@ -526,7 +531,7 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | - KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD; + KVASER_PCIEFD_KCAN_IRQ_TAR; iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); @@ -554,6 +559,8 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) mode |= KVASER_PCIEFD_KCAN_MODE_LOM; + else + mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; mode |= KVASER_PCIEFD_KCAN_MODE_EEN; mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; @@ -572,7 +579,7 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) spin_lock_irqsave(&can->lock, irq); iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); - iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); @@ -615,7 +622,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); - iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); @@ -719,6 +726,7 @@ static int kvaser_pciefd_stop(struct net_device *netdev) iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); del_timer(&can->bec_poll_timer); } + can->can.state = CAN_STATE_STOPPED; close_candev(netdev); return ret; @@ -1007,8 +1015,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) SET_NETDEV_DEV(netdev, &pcie->pci->dev); iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); - iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | - KVASER_PCIEFD_KCAN_IRQ_TFD, + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); pcie->can[i] = can; @@ -1058,6 +1065,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) { int i; u32 srb_status; + u32 srb_packet_count; dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; /* Disable the DMA */ @@ -1085,6 +1093,15 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) KVASER_PCIEFD_SRB_CMD_RDB1, pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + /* Empty Rx FIFO */ + srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) & + KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK; + while (srb_packet_count) { + /* Drop current packet in FIFO */ + ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG); + srb_packet_count--; + } + srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); @@ -1425,9 +1442,6 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, cmd = KVASER_PCIEFD_KCAN_CMD_AT; cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); - - iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD, - can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && p->header[0] & KVASER_PCIEFD_SPACK_IRM && cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && @@ -1714,15 +1728,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) netdev_err(can->can.dev, "Tx FIFO overflow\n"); - if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) { - u8 count = ioread32(can->reg_base + - KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; - - if (count == 0) - iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, - can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); - } - if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) netdev_err(can->can.dev, "Fail to change bittiming, when not in reset mode\n"); @@ -1824,6 +1829,11 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, if (err) goto err_teardown_can_ctrls; + err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, + IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); + if (err) + goto err_teardown_can_ctrls; + iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); @@ -1844,11 +1854,6 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); - err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, - IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); - if (err) - goto err_teardown_can_ctrls; - err = kvaser_pciefd_reg_candev(pcie); if (err) goto err_free_irq; @@ -1856,6 +1861,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, return 0; err_free_irq: + /* Disable PCI interrupts */ + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); free_irq(pcie->pci->irq, pcie); err_teardown_can_ctrls: diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index 9c1dcf838006d..94dc826441136 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -164,7 +164,7 @@ static __maybe_unused int m_can_resume(struct device *dev) return m_can_class_resume(dev); } -static int m_can_plat_remove(struct platform_device *pdev) +static void m_can_plat_remove(struct platform_device *pdev) { struct m_can_plat_priv *priv = platform_get_drvdata(pdev); struct m_can_classdev *mcan_class = &priv->cdev; @@ -172,8 +172,6 @@ static int m_can_plat_remove(struct platform_device *pdev) m_can_class_unregister(mcan_class); m_can_class_free_dev(mcan_class->net); - - return 0; } static int __maybe_unused m_can_runtime_suspend(struct device *dev) @@ -223,7 +221,7 @@ static struct platform_driver m_can_plat_driver = { .pm = &m_can_pmops, }, .probe = m_can_plat_probe, - .remove = m_can_plat_remove, + .remove_new = m_can_plat_remove, }; module_platform_driver(m_can_plat_driver); diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index b0ed798ae70fe..4837df6efa926 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -349,7 +349,7 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) return err; } -static int mpc5xxx_can_remove(struct platform_device *ofdev) +static void mpc5xxx_can_remove(struct platform_device *ofdev) { const struct of_device_id *match; const struct mpc5xxx_can_data *data; @@ -365,8 +365,6 @@ static int mpc5xxx_can_remove(struct platform_device *ofdev) iounmap(priv->reg_base); irq_dispose_mapping(dev->irq); free_candev(dev); - - return 0; } #ifdef CONFIG_PM @@ -437,7 +435,7 @@ static struct platform_driver mpc5xxx_can_driver = { .of_match_table = mpc5xxx_can_table, }, .probe = mpc5xxx_can_probe, - .remove = mpc5xxx_can_remove, + .remove_new = mpc5xxx_can_remove, #ifdef CONFIG_PM .suspend = mpc5xxx_can_suspend, .resume = mpc5xxx_can_resume, diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index cc43c9c5e38c5..f5aa5dbacaf21 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -824,7 +824,7 @@ static int rcar_can_probe(struct platform_device *pdev) return err; } -static int rcar_can_remove(struct platform_device *pdev) +static void rcar_can_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct rcar_can_priv *priv = netdev_priv(ndev); @@ -832,7 +832,6 @@ static int rcar_can_remove(struct platform_device *pdev) unregister_candev(ndev); netif_napi_del(&priv->napi); free_candev(ndev); - return 0; } static int __maybe_unused rcar_can_suspend(struct device *dev) @@ -908,7 +907,7 @@ static struct platform_driver rcar_can_driver = { .pm = &rcar_can_pm_ops, }, .probe = rcar_can_probe, - .remove = rcar_can_remove, + .remove_new = rcar_can_remove, }; module_platform_driver(rcar_can_driver); diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 963c42f437559..e4d7489134399 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -2078,7 +2078,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) return err; } -static int rcar_canfd_remove(struct platform_device *pdev) +static void rcar_canfd_remove(struct platform_device *pdev) { struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev); u32 ch; @@ -2096,8 +2096,6 @@ static int rcar_canfd_remove(struct platform_device *pdev) clk_disable_unprepare(gpriv->clkp); reset_control_assert(gpriv->rstc1); reset_control_assert(gpriv->rstc2); - - return 0; } static int __maybe_unused rcar_canfd_suspend(struct device *dev) @@ -2130,7 +2128,7 @@ static struct platform_driver rcar_canfd_driver = { .pm = &rcar_canfd_pm_ops, }, .probe = rcar_canfd_probe, - .remove = rcar_canfd_remove, + .remove_new = rcar_canfd_remove, }; module_platform_driver(rcar_canfd_driver); diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index db3e767d5320f..fca5a9a1d8579 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -223,7 +223,7 @@ static int sja1000_isa_probe(struct platform_device *pdev) return err; } -static int sja1000_isa_remove(struct platform_device *pdev) +static void sja1000_isa_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sja1000_priv *priv = netdev_priv(dev); @@ -241,13 +241,11 @@ static int sja1000_isa_remove(struct platform_device *pdev) release_region(port[idx], SJA1000_IOSIZE); } free_sja1000dev(dev); - - return 0; } static struct platform_driver sja1000_isa_driver = { .probe = sja1000_isa_probe, - .remove = sja1000_isa_remove, + .remove_new = sja1000_isa_remove, .driver = { .name = DRV_NAME, }, diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index 6779d5357069f..b4889b5746e55 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -317,19 +317,17 @@ static int sp_probe(struct platform_device *pdev) return err; } -static int sp_remove(struct platform_device *pdev) +static void sp_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_sja1000dev(dev); free_sja1000dev(dev); - - return 0; } static struct platform_driver sp_driver = { .probe = sp_probe, - .remove = sp_remove, + .remove_new = sp_remove, .driver = { .name = DRV_NAME, .of_match_table = sp_of_table, diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index c72f505d29fee..bd25137062c5e 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -729,7 +729,7 @@ static const struct attribute_group softing_pdev_group = { /* * platform driver */ -static int softing_pdev_remove(struct platform_device *pdev) +static void softing_pdev_remove(struct platform_device *pdev) { struct softing *card = platform_get_drvdata(pdev); int j; @@ -747,7 +747,6 @@ static int softing_pdev_remove(struct platform_device *pdev) iounmap(card->dpram); kfree(card); - return 0; } static int softing_pdev_probe(struct platform_device *pdev) @@ -855,7 +854,7 @@ static struct platform_driver softing_driver = { .name = KBUILD_MODNAME, }, .probe = softing_pdev_probe, - .remove = softing_pdev_remove, + .remove_new = softing_pdev_remove, }; module_platform_driver(softing_driver); diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 2b78f9197681b..0827830bbf28c 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -791,14 +791,12 @@ static const struct of_device_id sun4ican_of_match[] = { MODULE_DEVICE_TABLE(of, sun4ican_of_match); -static int sun4ican_remove(struct platform_device *pdev) +static void sun4ican_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_netdev(dev); free_candev(dev); - - return 0; } static int sun4ican_probe(struct platform_device *pdev) @@ -901,7 +899,7 @@ static struct platform_driver sun4i_can_driver = { .of_match_table = sun4ican_of_match, }, .probe = sun4ican_probe, - .remove = sun4ican_remove, + .remove_new = sun4ican_remove, }; module_platform_driver(sun4i_can_driver); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 27700f72eac25..9bab0b4cc4499 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -963,7 +963,7 @@ static int ti_hecc_probe(struct platform_device *pdev) return err; } -static int ti_hecc_remove(struct platform_device *pdev) +static void ti_hecc_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(ndev); @@ -973,8 +973,6 @@ static int ti_hecc_remove(struct platform_device *pdev) clk_put(priv->clk); can_rx_offload_del(&priv->offload); free_candev(ndev); - - return 0; } #ifdef CONFIG_PM @@ -1028,7 +1026,7 @@ static struct platform_driver ti_hecc_driver = { .of_match_table = ti_hecc_dt_ids, }, .probe = ti_hecc_probe, - .remove = ti_hecc_remove, + .remove_new = ti_hecc_remove, .suspend = ti_hecc_suspend, .resume = ti_hecc_resume, }; diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index 445504ababced..58fcd2b348207 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -38,6 +38,18 @@ config CAN_ETAS_ES58X To compile this driver as a module, choose M here: the module will be called etas_es58x. +config CAN_F81604 + tristate "Fintek F81604 USB to 2CAN interface" + help + This driver supports the Fintek F81604 USB to 2CAN interface. + The device can support CAN2.0A/B protocol and also support + 2 output pins to control external terminator (optional). + + To compile this driver as a module, choose M here: the module will + be called f81604. + + (see also https://www.fintek.com.tw). + config CAN_GS_USB tristate "Geschwister Schneider UG and candleLight compatible interfaces" help diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index 1ea16be5743b4..8b11088e9a595 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o obj-$(CONFIG_CAN_ESD_USB) += esd_usb.o obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x/ +obj-$(CONFIG_CAN_F81604) += f81604.o obj-$(CONFIG_CAN_GS_USB) += gs_usb.o obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/ obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c new file mode 100644 index 0000000000000..ec8cef7fd2d53 --- /dev/null +++ b/drivers/net/can/usb/f81604.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Fintek F81604 USB-to-2CAN controller driver. + * + * Copyright (C) 2023 Ji-Ze Hong (Peter Hong) + */ +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +/* vendor and product id */ +#define F81604_VENDOR_ID 0x2c42 +#define F81604_PRODUCT_ID 0x1709 +#define F81604_CAN_CLOCK (12 * MEGA) +#define F81604_MAX_DEV 2 +#define F81604_SET_DEVICE_RETRY 10 + +#define F81604_USB_TIMEOUT 2000 +#define F81604_SET_GET_REGISTER 0xA0 +#define F81604_PORT_OFFSET 0x1000 +#define F81604_MAX_RX_URBS 4 + +#define F81604_CMD_DATA 0x00 + +#define F81604_DLC_LEN_MASK GENMASK(3, 0) +#define F81604_DLC_EFF_BIT BIT(7) +#define F81604_DLC_RTR_BIT BIT(6) + +#define F81604_SFF_SHIFT 5 +#define F81604_EFF_SHIFT 3 + +#define F81604_BRP_MASK GENMASK(5, 0) +#define F81604_SJW_MASK GENMASK(7, 6) + +#define F81604_SEG1_MASK GENMASK(3, 0) +#define F81604_SEG2_MASK GENMASK(6, 4) + +#define F81604_CLEAR_ALC 0 +#define F81604_CLEAR_ECC 1 +#define F81604_CLEAR_OVERRUN 2 + +/* device setting */ +#define F81604_CTRL_MODE_REG 0x80 +#define F81604_TX_ONESHOT (0x03 << 3) +#define F81604_TX_NORMAL (0x01 << 3) +#define F81604_RX_AUTO_RELEASE_BUF BIT(1) +#define F81604_INT_WHEN_CHANGE BIT(0) + +#define F81604_TERMINATOR_REG 0x105 +#define F81604_CAN0_TERM BIT(2) +#define F81604_CAN1_TERM BIT(3) + +#define F81604_TERMINATION_DISABLED CAN_TERMINATION_DISABLED +#define F81604_TERMINATION_ENABLED 120 + +/* SJA1000 registers - manual section 6.4 (Pelican Mode) */ +#define F81604_SJA1000_MOD 0x00 +#define F81604_SJA1000_CMR 0x01 +#define F81604_SJA1000_IR 0x03 +#define F81604_SJA1000_IER 0x04 +#define F81604_SJA1000_ALC 0x0B +#define F81604_SJA1000_ECC 0x0C +#define F81604_SJA1000_RXERR 0x0E +#define F81604_SJA1000_TXERR 0x0F +#define F81604_SJA1000_ACCC0 0x10 +#define F81604_SJA1000_ACCM0 0x14 +#define F81604_MAX_FILTER_CNT 4 + +/* Common registers - manual section 6.5 */ +#define F81604_SJA1000_BTR0 0x06 +#define F81604_SJA1000_BTR1 0x07 +#define F81604_SJA1000_BTR1_SAMPLE_TRIPLE BIT(7) +#define F81604_SJA1000_OCR 0x08 +#define F81604_SJA1000_CDR 0x1F + +/* mode register */ +#define F81604_SJA1000_MOD_RM 0x01 +#define F81604_SJA1000_MOD_LOM 0x02 +#define F81604_SJA1000_MOD_STM 0x04 + +/* commands */ +#define F81604_SJA1000_CMD_CDO 0x08 + +/* interrupt sources */ +#define F81604_SJA1000_IRQ_BEI 0x80 +#define F81604_SJA1000_IRQ_ALI 0x40 +#define F81604_SJA1000_IRQ_EPI 0x20 +#define F81604_SJA1000_IRQ_DOI 0x08 +#define F81604_SJA1000_IRQ_EI 0x04 +#define F81604_SJA1000_IRQ_TI 0x02 +#define F81604_SJA1000_IRQ_RI 0x01 +#define F81604_SJA1000_IRQ_ALL 0xFF +#define F81604_SJA1000_IRQ_OFF 0x00 + +/* status register content */ +#define F81604_SJA1000_SR_BS 0x80 +#define F81604_SJA1000_SR_ES 0x40 +#define F81604_SJA1000_SR_TCS 0x08 + +/* ECC register */ +#define F81604_SJA1000_ECC_SEG 0x1F +#define F81604_SJA1000_ECC_DIR 0x20 +#define F81604_SJA1000_ECC_BIT 0x00 +#define F81604_SJA1000_ECC_FORM 0x40 +#define F81604_SJA1000_ECC_STUFF 0x80 +#define F81604_SJA1000_ECC_MASK 0xc0 + +/* ALC register */ +#define F81604_SJA1000_ALC_MASK 0x1f + +/* table of devices that work with this driver */ +static const struct usb_device_id f81604_table[] = { + { USB_DEVICE(F81604_VENDOR_ID, F81604_PRODUCT_ID) }, + {} /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, f81604_table); + +static const struct ethtool_ops f81604_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const u16 f81604_termination[] = { F81604_TERMINATION_DISABLED, + F81604_TERMINATION_ENABLED }; + +struct f81604_priv { + struct net_device *netdev[F81604_MAX_DEV]; +}; + +struct f81604_port_priv { + struct can_priv can; + struct net_device *netdev; + struct sk_buff *echo_skb; + + unsigned long clear_flags; + struct work_struct clear_reg_work; + + struct usb_device *dev; + struct usb_interface *intf; + + struct usb_anchor urbs_anchor; +}; + +/* Interrupt endpoint data format: + * Byte 0: Status register. + * Byte 1: Interrupt register. + * Byte 2: Interrupt enable register. + * Byte 3: Arbitration lost capture(ALC) register. + * Byte 4: Error code capture(ECC) register. + * Byte 5: Error warning limit register. + * Byte 6: RX error counter register. + * Byte 7: TX error counter register. + * Byte 8: Reserved. + */ +struct f81604_int_data { + u8 sr; + u8 isrc; + u8 ier; + u8 alc; + u8 ecc; + u8 ewlr; + u8 rxerr; + u8 txerr; + u8 val; +} __packed __aligned(4); + +struct f81604_sff { + __be16 id; + u8 data[CAN_MAX_DLEN]; +} __packed __aligned(2); + +struct f81604_eff { + __be32 id; + u8 data[CAN_MAX_DLEN]; +} __packed __aligned(2); + +struct f81604_can_frame { + u8 cmd; + + /* According for F81604 DLC define: + * bit 3~0: data length (0~8) + * bit6: is RTR flag. + * bit7: is EFF frame. + */ + u8 dlc; + + union { + struct f81604_sff sff; + struct f81604_eff eff; + }; +} __packed __aligned(2); + +static const u8 bulk_in_addr[F81604_MAX_DEV] = { 2, 4 }; +static const u8 bulk_out_addr[F81604_MAX_DEV] = { 1, 3 }; +static const u8 int_in_addr[F81604_MAX_DEV] = { 1, 3 }; + +static int f81604_write(struct usb_device *dev, u16 reg, u8 data) +{ + int ret; + + ret = usb_control_msg_send(dev, 0, F81604_SET_GET_REGISTER, + USB_TYPE_VENDOR | USB_DIR_OUT, 0, reg, + &data, sizeof(data), F81604_USB_TIMEOUT, + GFP_KERNEL); + if (ret) + dev_err(&dev->dev, "%s: reg: %x data: %x failed: %pe\n", + __func__, reg, data, ERR_PTR(ret)); + + return ret; +} + +static int f81604_read(struct usb_device *dev, u16 reg, u8 *data) +{ + int ret; + + ret = usb_control_msg_recv(dev, 0, F81604_SET_GET_REGISTER, + USB_TYPE_VENDOR | USB_DIR_IN, 0, reg, data, + sizeof(*data), F81604_USB_TIMEOUT, + GFP_KERNEL); + + if (ret < 0) + dev_err(&dev->dev, "%s: reg: %x failed: %pe\n", __func__, reg, + ERR_PTR(ret)); + + return ret; +} + +static int f81604_update_bits(struct usb_device *dev, u16 reg, u8 mask, + u8 data) +{ + int ret; + u8 tmp; + + ret = f81604_read(dev, reg, &tmp); + if (ret) + return ret; + + tmp &= ~mask; + tmp |= (mask & data); + + return f81604_write(dev, reg, tmp); +} + +static int f81604_sja1000_write(struct f81604_port_priv *priv, u16 reg, + u8 data) +{ + int port = priv->netdev->dev_port; + int real_reg; + + real_reg = reg + F81604_PORT_OFFSET * port + F81604_PORT_OFFSET; + return f81604_write(priv->dev, real_reg, data); +} + +static int f81604_sja1000_read(struct f81604_port_priv *priv, u16 reg, + u8 *data) +{ + int port = priv->netdev->dev_port; + int real_reg; + + real_reg = reg + F81604_PORT_OFFSET * port + F81604_PORT_OFFSET; + return f81604_read(priv->dev, real_reg, data); +} + +static int f81604_set_reset_mode(struct f81604_port_priv *priv) +{ + int ret, i; + u8 tmp; + + /* disable interrupts */ + ret = f81604_sja1000_write(priv, F81604_SJA1000_IER, + F81604_SJA1000_IRQ_OFF); + if (ret) + return ret; + + for (i = 0; i < F81604_SET_DEVICE_RETRY; i++) { + ret = f81604_sja1000_read(priv, F81604_SJA1000_MOD, &tmp); + if (ret) + return ret; + + /* check reset bit */ + if (tmp & F81604_SJA1000_MOD_RM) { + priv->can.state = CAN_STATE_STOPPED; + return 0; + } + + /* reset chip */ + ret = f81604_sja1000_write(priv, F81604_SJA1000_MOD, + F81604_SJA1000_MOD_RM); + if (ret) + return ret; + } + + return -EPERM; +} + +static int f81604_set_normal_mode(struct f81604_port_priv *priv) +{ + u8 tmp, ier = 0; + u8 mod_reg = 0; + int ret, i; + + for (i = 0; i < F81604_SET_DEVICE_RETRY; i++) { + ret = f81604_sja1000_read(priv, F81604_SJA1000_MOD, &tmp); + if (ret) + return ret; + + /* check reset bit */ + if ((tmp & F81604_SJA1000_MOD_RM) == 0) { + priv->can.state = CAN_STATE_ERROR_ACTIVE; + /* enable interrupts, RI handled by bulk-in */ + ier = F81604_SJA1000_IRQ_ALL & ~F81604_SJA1000_IRQ_RI; + if (!(priv->can.ctrlmode & + CAN_CTRLMODE_BERR_REPORTING)) + ier &= ~F81604_SJA1000_IRQ_BEI; + + return f81604_sja1000_write(priv, F81604_SJA1000_IER, + ier); + } + + /* set chip to normal mode */ + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + mod_reg |= F81604_SJA1000_MOD_LOM; + if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK) + mod_reg |= F81604_SJA1000_MOD_STM; + + ret = f81604_sja1000_write(priv, F81604_SJA1000_MOD, mod_reg); + if (ret) + return ret; + } + + return -EPERM; +} + +static int f81604_chipset_init(struct f81604_port_priv *priv) +{ + int i, ret; + + /* set clock divider and output control register */ + ret = f81604_sja1000_write(priv, F81604_SJA1000_CDR, + CDR_CBP | CDR_PELICAN); + if (ret) + return ret; + + /* set acceptance filter (accept all) */ + for (i = 0; i < F81604_MAX_FILTER_CNT; ++i) { + ret = f81604_sja1000_write(priv, F81604_SJA1000_ACCC0 + i, 0); + if (ret) + return ret; + } + + for (i = 0; i < F81604_MAX_FILTER_CNT; ++i) { + ret = f81604_sja1000_write(priv, F81604_SJA1000_ACCM0 + i, + 0xFF); + if (ret) + return ret; + } + + return f81604_sja1000_write(priv, F81604_SJA1000_OCR, + OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL | + OCR_MODE_NORMAL); +} + +static void f81604_process_rx_packet(struct net_device *netdev, + struct f81604_can_frame *frame) +{ + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + if (frame->cmd != F81604_CMD_DATA) + return; + + skb = alloc_can_skb(netdev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + cf->len = can_cc_dlc2len(frame->dlc & F81604_DLC_LEN_MASK); + + if (frame->dlc & F81604_DLC_EFF_BIT) { + cf->can_id = get_unaligned_be32(&frame->eff.id) >> + F81604_EFF_SHIFT; + cf->can_id |= CAN_EFF_FLAG; + + if (!(frame->dlc & F81604_DLC_RTR_BIT)) + memcpy(cf->data, frame->eff.data, cf->len); + } else { + cf->can_id = get_unaligned_be16(&frame->sff.id) >> + F81604_SFF_SHIFT; + + if (!(frame->dlc & F81604_DLC_RTR_BIT)) + memcpy(cf->data, frame->sff.data, cf->len); + } + + if (frame->dlc & F81604_DLC_RTR_BIT) + cf->can_id |= CAN_RTR_FLAG; + else + stats->rx_bytes += cf->len; + + stats->rx_packets++; + netif_rx(skb); +} + +static void f81604_read_bulk_callback(struct urb *urb) +{ + struct f81604_can_frame *frame = urb->transfer_buffer; + struct net_device *netdev = urb->context; + int ret; + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + netdev_info(netdev, "%s: URB aborted %pe\n", __func__, + ERR_PTR(urb->status)); + + switch (urb->status) { + case 0: /* success */ + break; + + case -ENOENT: + case -EPIPE: + case -EPROTO: + case -ESHUTDOWN: + return; + + default: + goto resubmit_urb; + } + + if (urb->actual_length != sizeof(*frame)) { + netdev_warn(netdev, "URB length %u not equal to %zu\n", + urb->actual_length, sizeof(*frame)); + goto resubmit_urb; + } + + f81604_process_rx_packet(netdev, frame); + +resubmit_urb: + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret == -ENODEV) + netif_device_detach(netdev); + else if (ret) + netdev_err(netdev, + "%s: failed to resubmit read bulk urb: %pe\n", + __func__, ERR_PTR(ret)); +} + +static void f81604_handle_tx(struct f81604_port_priv *priv, + struct f81604_int_data *data) +{ + struct net_device *netdev = priv->netdev; + struct net_device_stats *stats = &netdev->stats; + + /* transmission buffer released */ + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT && + !(data->sr & F81604_SJA1000_SR_TCS)) { + stats->tx_errors++; + can_free_echo_skb(netdev, 0, NULL); + } else { + /* transmission complete */ + stats->tx_bytes += can_get_echo_skb(netdev, 0, NULL); + stats->tx_packets++; + } + + netif_wake_queue(netdev); +} + +static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv, + struct f81604_int_data *data) +{ + enum can_state can_state = priv->can.state; + struct net_device *netdev = priv->netdev; + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + /* Note: ALC/ECC will not auto clear by read here, must be cleared by + * read register (via clear_reg_work). + */ + + skb = alloc_can_err_skb(netdev, &cf); + if (skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = data->txerr; + cf->data[7] = data->rxerr; + } + + if (data->isrc & F81604_SJA1000_IRQ_DOI) { + /* data overrun interrupt */ + netdev_dbg(netdev, "data overrun interrupt\n"); + + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } + + stats->rx_over_errors++; + stats->rx_errors++; + + set_bit(F81604_CLEAR_OVERRUN, &priv->clear_flags); + } + + if (data->isrc & F81604_SJA1000_IRQ_EI) { + /* error warning interrupt */ + netdev_dbg(netdev, "error warning interrupt\n"); + + if (data->sr & F81604_SJA1000_SR_BS) + can_state = CAN_STATE_BUS_OFF; + else if (data->sr & F81604_SJA1000_SR_ES) + can_state = CAN_STATE_ERROR_WARNING; + else + can_state = CAN_STATE_ERROR_ACTIVE; + } + + if (data->isrc & F81604_SJA1000_IRQ_BEI) { + /* bus error interrupt */ + netdev_dbg(netdev, "bus error interrupt\n"); + + priv->can.can_stats.bus_error++; + stats->rx_errors++; + + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + /* set error type */ + switch (data->ecc & F81604_SJA1000_ECC_MASK) { + case F81604_SJA1000_ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case F81604_SJA1000_ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case F81604_SJA1000_ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + break; + } + + /* set error location */ + cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG; + + /* Error occurred during transmission? */ + if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) + cf->data[2] |= CAN_ERR_PROT_TX; + } + + set_bit(F81604_CLEAR_ECC, &priv->clear_flags); + } + + if (data->isrc & F81604_SJA1000_IRQ_EPI) { + if (can_state == CAN_STATE_ERROR_PASSIVE) + can_state = CAN_STATE_ERROR_WARNING; + else + can_state = CAN_STATE_ERROR_PASSIVE; + + /* error passive interrupt */ + netdev_dbg(netdev, "error passive interrupt: %d\n", can_state); + } + + if (data->isrc & F81604_SJA1000_IRQ_ALI) { + /* arbitration lost interrupt */ + netdev_dbg(netdev, "arbitration lost interrupt\n"); + + priv->can.can_stats.arbitration_lost++; + + if (skb) { + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] = data->alc & F81604_SJA1000_ALC_MASK; + } + + set_bit(F81604_CLEAR_ALC, &priv->clear_flags); + } + + if (can_state != priv->can.state) { + enum can_state tx_state, rx_state; + + tx_state = data->txerr >= data->rxerr ? can_state : 0; + rx_state = data->txerr <= data->rxerr ? can_state : 0; + + can_change_state(netdev, cf, tx_state, rx_state); + + if (can_state == CAN_STATE_BUS_OFF) + can_bus_off(netdev); + } + + if (priv->clear_flags) + schedule_work(&priv->clear_reg_work); + + if (skb) + netif_rx(skb); +} + +static void f81604_read_int_callback(struct urb *urb) +{ + struct f81604_int_data *data = urb->transfer_buffer; + struct net_device *netdev = urb->context; + struct f81604_port_priv *priv; + int ret; + + priv = netdev_priv(netdev); + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + netdev_info(netdev, "%s: Int URB aborted: %pe\n", __func__, + ERR_PTR(urb->status)); + + switch (urb->status) { + case 0: /* success */ + break; + + case -ENOENT: + case -EPIPE: + case -EPROTO: + case -ESHUTDOWN: + return; + + default: + goto resubmit_urb; + } + + /* handle Errors */ + if (data->isrc & (F81604_SJA1000_IRQ_DOI | F81604_SJA1000_IRQ_EI | + F81604_SJA1000_IRQ_BEI | F81604_SJA1000_IRQ_EPI | + F81604_SJA1000_IRQ_ALI)) + f81604_handle_can_bus_errors(priv, data); + + /* handle TX */ + if (priv->can.state != CAN_STATE_BUS_OFF && + (data->isrc & F81604_SJA1000_IRQ_TI)) + f81604_handle_tx(priv, data); + +resubmit_urb: + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret == -ENODEV) + netif_device_detach(netdev); + else if (ret) + netdev_err(netdev, "%s: failed to resubmit int urb: %pe\n", + __func__, ERR_PTR(ret)); +} + +static void f81604_unregister_urbs(struct f81604_port_priv *priv) +{ + usb_kill_anchored_urbs(&priv->urbs_anchor); +} + +static int f81604_register_urbs(struct f81604_port_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct f81604_int_data *int_data; + int id = netdev->dev_port; + struct urb *int_urb; + int rx_urb_cnt; + int ret; + + for (rx_urb_cnt = 0; rx_urb_cnt < F81604_MAX_RX_URBS; ++rx_urb_cnt) { + struct f81604_can_frame *frame; + struct urb *rx_urb; + + rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!rx_urb) { + ret = -ENOMEM; + break; + } + + frame = kmalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + usb_free_urb(rx_urb); + ret = -ENOMEM; + break; + } + + usb_fill_bulk_urb(rx_urb, priv->dev, + usb_rcvbulkpipe(priv->dev, bulk_in_addr[id]), + frame, sizeof(*frame), + f81604_read_bulk_callback, netdev); + + rx_urb->transfer_flags |= URB_FREE_BUFFER; + usb_anchor_urb(rx_urb, &priv->urbs_anchor); + + ret = usb_submit_urb(rx_urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(rx_urb); + usb_free_urb(rx_urb); + break; + } + + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(rx_urb); + } + + if (rx_urb_cnt == 0) { + netdev_warn(netdev, "%s: submit rx urb failed: %pe\n", + __func__, ERR_PTR(ret)); + + goto error; + } + + int_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!int_urb) { + ret = -ENOMEM; + goto error; + } + + int_data = kmalloc(sizeof(*int_data), GFP_KERNEL); + if (!int_data) { + usb_free_urb(int_urb); + ret = -ENOMEM; + goto error; + } + + usb_fill_int_urb(int_urb, priv->dev, + usb_rcvintpipe(priv->dev, int_in_addr[id]), int_data, + sizeof(*int_data), f81604_read_int_callback, netdev, + 1); + + int_urb->transfer_flags |= URB_FREE_BUFFER; + usb_anchor_urb(int_urb, &priv->urbs_anchor); + + ret = usb_submit_urb(int_urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(int_urb); + usb_free_urb(int_urb); + + netdev_warn(netdev, "%s: submit int urb failed: %pe\n", + __func__, ERR_PTR(ret)); + goto error; + } + + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(int_urb); + + return 0; + +error: + f81604_unregister_urbs(priv); + return ret; +} + +static int f81604_start(struct net_device *netdev) +{ + struct f81604_port_priv *priv = netdev_priv(netdev); + int ret; + u8 mode; + u8 tmp; + + mode = F81604_RX_AUTO_RELEASE_BUF | F81604_INT_WHEN_CHANGE; + + /* Set TR/AT mode */ + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + mode |= F81604_TX_ONESHOT; + else + mode |= F81604_TX_NORMAL; + + ret = f81604_sja1000_write(priv, F81604_CTRL_MODE_REG, mode); + if (ret) + return ret; + + /* set reset mode */ + ret = f81604_set_reset_mode(priv); + if (ret) + return ret; + + ret = f81604_chipset_init(priv); + if (ret) + return ret; + + /* Clear error counters and error code capture */ + ret = f81604_sja1000_write(priv, F81604_SJA1000_TXERR, 0); + if (ret) + return ret; + + ret = f81604_sja1000_write(priv, F81604_SJA1000_RXERR, 0); + if (ret) + return ret; + + /* Read clear for ECC/ALC/IR register */ + ret = f81604_sja1000_read(priv, F81604_SJA1000_ECC, &tmp); + if (ret) + return ret; + + ret = f81604_sja1000_read(priv, F81604_SJA1000_ALC, &tmp); + if (ret) + return ret; + + ret = f81604_sja1000_read(priv, F81604_SJA1000_IR, &tmp); + if (ret) + return ret; + + ret = f81604_register_urbs(priv); + if (ret) + return ret; + + ret = f81604_set_normal_mode(priv); + if (ret) { + f81604_unregister_urbs(priv); + return ret; + } + + return 0; +} + +static int f81604_set_bittiming(struct net_device *dev) +{ + struct f81604_port_priv *priv = netdev_priv(dev); + struct can_bittiming *bt = &priv->can.bittiming; + u8 btr0, btr1; + int ret; + + btr0 = FIELD_PREP(F81604_BRP_MASK, bt->brp - 1) | + FIELD_PREP(F81604_SJW_MASK, bt->sjw - 1); + + btr1 = FIELD_PREP(F81604_SEG1_MASK, + bt->prop_seg + bt->phase_seg1 - 1) | + FIELD_PREP(F81604_SEG2_MASK, bt->phase_seg2 - 1); + + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + btr1 |= F81604_SJA1000_BTR1_SAMPLE_TRIPLE; + + ret = f81604_sja1000_write(priv, F81604_SJA1000_BTR0, btr0); + if (ret) { + netdev_warn(dev, "%s: Set BTR0 failed: %pe\n", __func__, + ERR_PTR(ret)); + return ret; + } + + ret = f81604_sja1000_write(priv, F81604_SJA1000_BTR1, btr1); + if (ret) { + netdev_warn(dev, "%s: Set BTR1 failed: %pe\n", __func__, + ERR_PTR(ret)); + return ret; + } + + return 0; +} + +static int f81604_set_mode(struct net_device *netdev, enum can_mode mode) +{ + int ret; + + switch (mode) { + case CAN_MODE_START: + ret = f81604_start(netdev); + if (!ret && netif_queue_stopped(netdev)) + netif_wake_queue(netdev); + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +static void f81604_write_bulk_callback(struct urb *urb) +{ + struct net_device *netdev = urb->context; + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + netdev_info(netdev, "%s: Tx URB error: %pe\n", __func__, + ERR_PTR(urb->status)); +} + +static void f81604_clear_reg_work(struct work_struct *work) +{ + struct f81604_port_priv *priv; + u8 tmp; + + priv = container_of(work, struct f81604_port_priv, clear_reg_work); + + /* dummy read for clear Arbitration lost capture(ALC) register. */ + if (test_and_clear_bit(F81604_CLEAR_ALC, &priv->clear_flags)) + f81604_sja1000_read(priv, F81604_SJA1000_ALC, &tmp); + + /* dummy read for clear Error code capture(ECC) register. */ + if (test_and_clear_bit(F81604_CLEAR_ECC, &priv->clear_flags)) + f81604_sja1000_read(priv, F81604_SJA1000_ECC, &tmp); + + /* dummy write for clear data overrun flag. */ + if (test_and_clear_bit(F81604_CLEAR_OVERRUN, &priv->clear_flags)) + f81604_sja1000_write(priv, F81604_SJA1000_CMR, + F81604_SJA1000_CMD_CDO); +} + +static netdev_tx_t f81604_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct can_frame *cf = (struct can_frame *)skb->data; + struct f81604_port_priv *priv = netdev_priv(netdev); + struct net_device_stats *stats = &netdev->stats; + struct f81604_can_frame *frame; + struct urb *write_urb; + int ret; + + if (can_dev_dropped_skb(netdev, skb)) + return NETDEV_TX_OK; + + netif_stop_queue(netdev); + + write_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!write_urb) + goto nomem_urb; + + frame = kzalloc(sizeof(*frame), GFP_ATOMIC); + if (!frame) + goto nomem_buf; + + usb_fill_bulk_urb(write_urb, priv->dev, + usb_sndbulkpipe(priv->dev, + bulk_out_addr[netdev->dev_port]), + frame, sizeof(*frame), f81604_write_bulk_callback, + priv->netdev); + + write_urb->transfer_flags |= URB_FREE_BUFFER; + + frame->cmd = F81604_CMD_DATA; + frame->dlc = cf->len; + + if (cf->can_id & CAN_RTR_FLAG) + frame->dlc |= F81604_DLC_RTR_BIT; + + if (cf->can_id & CAN_EFF_FLAG) { + u32 id = (cf->can_id & CAN_EFF_MASK) << F81604_EFF_SHIFT; + + put_unaligned_be32(id, &frame->eff.id); + + frame->dlc |= F81604_DLC_EFF_BIT; + + if (!(cf->can_id & CAN_RTR_FLAG)) + memcpy(&frame->eff.data, cf->data, cf->len); + } else { + u32 id = (cf->can_id & CAN_SFF_MASK) << F81604_SFF_SHIFT; + + put_unaligned_be16(id, &frame->sff.id); + + if (!(cf->can_id & CAN_RTR_FLAG)) + memcpy(&frame->sff.data, cf->data, cf->len); + } + + can_put_echo_skb(skb, netdev, 0, 0); + + ret = usb_submit_urb(write_urb, GFP_ATOMIC); + if (ret) { + netdev_err(netdev, "%s: failed to resubmit tx bulk urb: %pe\n", + __func__, ERR_PTR(ret)); + + can_free_echo_skb(netdev, 0, NULL); + stats->tx_dropped++; + stats->tx_errors++; + + if (ret == -ENODEV) + netif_device_detach(netdev); + else + netif_wake_queue(netdev); + } + + /* let usb core take care of this urb */ + usb_free_urb(write_urb); + + return NETDEV_TX_OK; + +nomem_buf: + usb_free_urb(write_urb); + +nomem_urb: + dev_kfree_skb(skb); + stats->tx_dropped++; + stats->tx_errors++; + netif_wake_queue(netdev); + + return NETDEV_TX_OK; +} + +static int f81604_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct f81604_port_priv *priv = netdev_priv(netdev); + u8 txerr, rxerr; + int ret; + + ret = f81604_sja1000_read(priv, F81604_SJA1000_TXERR, &txerr); + if (ret) + return ret; + + ret = f81604_sja1000_read(priv, F81604_SJA1000_RXERR, &rxerr); + if (ret) + return ret; + + bec->txerr = txerr; + bec->rxerr = rxerr; + + return 0; +} + +/* Open USB device */ +static int f81604_open(struct net_device *netdev) +{ + int ret; + + ret = open_candev(netdev); + if (ret) + return ret; + + ret = f81604_start(netdev); + if (ret) { + if (ret == -ENODEV) + netif_device_detach(netdev); + + close_candev(netdev); + return ret; + } + + netif_start_queue(netdev); + return 0; +} + +/* Close USB device */ +static int f81604_close(struct net_device *netdev) +{ + struct f81604_port_priv *priv = netdev_priv(netdev); + + f81604_set_reset_mode(priv); + + netif_stop_queue(netdev); + cancel_work_sync(&priv->clear_reg_work); + close_candev(netdev); + + f81604_unregister_urbs(priv); + + return 0; +} + +static const struct net_device_ops f81604_netdev_ops = { + .ndo_open = f81604_open, + .ndo_stop = f81604_close, + .ndo_start_xmit = f81604_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static const struct can_bittiming_const f81604_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 64, + .brp_inc = 1, +}; + +/* Called by the usb core when driver is unloaded or device is removed */ +static void f81604_disconnect(struct usb_interface *intf) +{ + struct f81604_priv *priv = usb_get_intfdata(intf); + int i; + + for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { + if (!priv->netdev[i]) + continue; + + unregister_netdev(priv->netdev[i]); + free_candev(priv->netdev[i]); + } +} + +static int __f81604_set_termination(struct usb_device *dev, int idx, u16 term) +{ + u8 mask, data = 0; + + if (idx == 0) + mask = F81604_CAN0_TERM; + else + mask = F81604_CAN1_TERM; + + if (term) + data = mask; + + return f81604_update_bits(dev, F81604_TERMINATOR_REG, mask, data); +} + +static int f81604_set_termination(struct net_device *netdev, u16 term) +{ + struct f81604_port_priv *port_priv = netdev_priv(netdev); + + ASSERT_RTNL(); + + return __f81604_set_termination(port_priv->dev, netdev->dev_port, + term); +} + +static int f81604_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *dev = interface_to_usbdev(intf); + struct net_device *netdev; + struct f81604_priv *priv; + int i, ret; + + priv = devm_kzalloc(&intf->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + usb_set_intfdata(intf, priv); + + for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { + ret = __f81604_set_termination(dev, i, 0); + if (ret) { + dev_err(&intf->dev, + "Setting termination of CH#%d failed: %pe\n", + i, ERR_PTR(ret)); + return ret; + } + } + + for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { + struct f81604_port_priv *port_priv; + + netdev = alloc_candev(sizeof(*port_priv), 1); + if (!netdev) { + dev_err(&intf->dev, "Couldn't alloc candev: %d\n", i); + ret = -ENOMEM; + + goto failure_cleanup; + } + + port_priv = netdev_priv(netdev); + + INIT_WORK(&port_priv->clear_reg_work, f81604_clear_reg_work); + init_usb_anchor(&port_priv->urbs_anchor); + + port_priv->intf = intf; + port_priv->dev = dev; + port_priv->netdev = netdev; + port_priv->can.clock.freq = F81604_CAN_CLOCK; + + port_priv->can.termination_const = f81604_termination; + port_priv->can.termination_const_cnt = + ARRAY_SIZE(f81604_termination); + port_priv->can.bittiming_const = &f81604_bittiming_const; + port_priv->can.do_set_bittiming = f81604_set_bittiming; + port_priv->can.do_set_mode = f81604_set_mode; + port_priv->can.do_set_termination = f81604_set_termination; + port_priv->can.do_get_berr_counter = f81604_get_berr_counter; + port_priv->can.ctrlmode_supported = + CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_PRESUME_ACK; + + netdev->ethtool_ops = &f81604_ethtool_ops; + netdev->netdev_ops = &f81604_netdev_ops; + netdev->flags |= IFF_ECHO; + netdev->dev_port = i; + + SET_NETDEV_DEV(netdev, &intf->dev); + + ret = register_candev(netdev); + if (ret) { + netdev_err(netdev, "register CAN device failed: %pe\n", + ERR_PTR(ret)); + free_candev(netdev); + + goto failure_cleanup; + } + + priv->netdev[i] = netdev; + } + + return 0; + +failure_cleanup: + f81604_disconnect(intf); + return ret; +} + +static struct usb_driver f81604_driver = { + .name = KBUILD_MODNAME, + .probe = f81604_probe, + .disconnect = f81604_disconnect, + .id_table = f81604_table, +}; + +module_usb_driver(f81604_driver); + +MODULE_AUTHOR("Ji-Ze Hong (Peter Hong) "); +MODULE_DESCRIPTION("Fintek F81604 USB to 2xCANBUS"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 43c812ea1de02..797c69a0314d7 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1898,20 +1898,18 @@ static int xcan_probe(struct platform_device *pdev) * This function frees all the resources allocated to the device. * Return: 0 always */ -static int xcan_remove(struct platform_device *pdev) +static void xcan_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); unregister_candev(ndev); pm_runtime_disable(&pdev->dev); free_candev(ndev); - - return 0; } static struct platform_driver xcan_driver = { .probe = xcan_probe, - .remove = xcan_remove, + .remove_new = xcan_remove, .driver = { .name = DRIVER_NAME, .pm = &xcan_dev_pm_ops, diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index cbe8318753471..b560e73c14caf 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1462,7 +1462,6 @@ int lan9303_remove(struct lan9303 *chip) /* assert reset to the whole device to prevent it from doing anything */ gpiod_set_value_cansleep(chip->reset_gpio, 1); - gpiod_unexport(chip->reset_gpio); return 0; } diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index f56fca1b1a222..84d502589f8e3 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -28,13 +28,13 @@ static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { - regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); + regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0); } static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, bool set) { - regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset), + regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset), bits, set ? bits : 0); } @@ -941,7 +941,6 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) { u8 learn[DSA_MAX_PORTS]; int first, index, cnt; - struct ksz_port *p; const u16 *regs; regs = dev->info->regs; @@ -955,9 +954,6 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) cnt = dev->info->port_cnt; } for (index = first; index < cnt; index++) { - p = &dev->ports[index]; - if (!p->on) - continue; ksz_pread8(dev, index, regs[P_STP_CTRL], &learn[index]); if (!(learn[index] & PORT_LEARN_DISABLE)) ksz_pwrite8(dev, index, regs[P_STP_CTRL], @@ -965,9 +961,6 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) } ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true); for (index = first; index < cnt; index++) { - p = &dev->ports[index]; - if (!p->on) - continue; if (!(learn[index] & PORT_LEARN_DISABLE)) ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]); } @@ -1338,25 +1331,14 @@ void ksz8_config_cpu_port(struct dsa_switch *ds) ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true); - p = &dev->ports[dev->cpu_port]; - p->on = 1; - ksz8_port_setup(dev, dev->cpu_port, true); for (i = 0; i < dev->phy_port_cnt; i++) { - p = &dev->ports[i]; - ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED); - - /* Last port may be disabled. */ - if (i == dev->phy_port_cnt) - break; - p->on = 1; } for (i = 0; i < dev->phy_port_cnt; i++) { p = &dev->ports[i]; - if (!p->on) - continue; + if (!ksz_is_ksz88x3(dev)) { ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote); if (remote & KSZ8_PORT_FIBER_MODE) @@ -1425,14 +1407,14 @@ int ksz8_setup(struct dsa_switch *ds) ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true); /* Enable aggressive back off algorithm in half duplex mode. */ - regmap_update_bits(dev->regmap[0], REG_SW_CTRL_1, + regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); /* * Make sure unicast VLAN boundary is set as default and * enable no excessive collision drop. */ - regmap_update_bits(dev->regmap[0], REG_SW_CTRL_2, + regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2, UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c index 3698112138b78..fd6e2e69a42ae 100644 --- a/drivers/net/dsa/microchip/ksz8863_smi.c +++ b/drivers/net/dsa/microchip/ksz8863_smi.c @@ -104,6 +104,7 @@ static const struct regmap_config ksz8863_regmap_config[] = { .cache_type = REGCACHE_NONE, .lock = ksz_regmap_lock, .unlock = ksz_regmap_unlock, + .max_register = U8_MAX, }, { .name = "#16", @@ -113,6 +114,7 @@ static const struct regmap_config ksz8863_regmap_config[] = { .cache_type = REGCACHE_NONE, .lock = ksz_regmap_lock, .unlock = ksz_regmap_unlock, + .max_register = U8_MAX, }, { .name = "#32", @@ -122,11 +124,14 @@ static const struct regmap_config ksz8863_regmap_config[] = { .cache_type = REGCACHE_NONE, .lock = ksz_regmap_lock, .unlock = ksz_regmap_unlock, + .max_register = U8_MAX, } }; static int ksz8863_smi_probe(struct mdio_device *mdiodev) { + struct device *ddev = &mdiodev->dev; + const struct ksz_chip_data *chip; struct regmap_config rc; struct ksz_device *dev; int ret; @@ -136,9 +141,15 @@ static int ksz8863_smi_probe(struct mdio_device *mdiodev) if (!dev) return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(ksz8863_regmap_config); i++) { + chip = device_get_match_data(ddev); + if (!chip) + return -EINVAL; + + for (i = 0; i < __KSZ_NUM_REGMAPS; i++) { rc = ksz8863_regmap_config[i]; rc.lock_arg = &dev->regmap_mutex; + rc.wr_table = chip->wr_table; + rc.rd_table = chip->rd_table; dev->regmap[i] = devm_regmap_init(&mdiodev->dev, ®map_smi[i], dev, &rc); diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index bf13d47c26cf7..3019f54049fcd 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -21,25 +21,25 @@ static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { - regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); + regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0); } static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, bool set) { - regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset), + regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset), bits, set ? bits : 0); } static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set) { - regmap_update_bits(dev->regmap[2], addr, bits, set ? bits : 0); + regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0); } static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset, u32 bits, bool set) { - regmap_update_bits(dev->regmap[2], PORT_CTRL_ADDR(port, offset), + regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset), bits, set ? bits : 0); } @@ -52,7 +52,7 @@ int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu) frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, + return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK, frame_size); } @@ -60,7 +60,7 @@ static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev) { unsigned int val; - return regmap_read_poll_timeout(dev->regmap[0], REG_SW_VLAN_CTRL, + return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL, val, !(val & VLAN_START), 10, 1000); } @@ -147,7 +147,7 @@ static int ksz9477_wait_alu_ready(struct ksz_device *dev) { unsigned int val; - return regmap_read_poll_timeout(dev->regmap[2], REG_SW_ALU_CTRL__4, + return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4, val, !(val & ALU_START), 10, 1000); } @@ -155,7 +155,7 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev) { unsigned int val; - return regmap_read_poll_timeout(dev->regmap[2], + return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_STAT_CTRL__4, val, !(val & ALU_STAT_START), 10, 1000); @@ -170,7 +170,7 @@ int ksz9477_reset_switch(struct ksz_device *dev) ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true); /* turn off SPI DO Edge select */ - regmap_update_bits(dev->regmap[0], REG_SW_GLOBAL_SERIAL_CTRL_0, + regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0, SPI_AUTO_EDGE_DETECTION, 0); /* default configuration */ @@ -213,7 +213,7 @@ void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt) data |= (addr << MIB_COUNTER_INDEX_S); ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data); - ret = regmap_read_poll_timeout(dev->regmap[2], + ret = regmap_read_poll_timeout(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4), val, !(val & MIB_COUNTER_READ), 10, 1000); /* failed to read MIB. get out of loop */ @@ -346,7 +346,7 @@ void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) const u16 *regs = dev->info->regs; u8 data; - regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2, + regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2, SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S, SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S); @@ -1165,7 +1165,7 @@ int ksz9477_setup(struct dsa_switch *ds) ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true); /* Now we can configure default MTU value */ - ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK, + ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK, VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); if (ret) return ret; diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index 97a317263a2f4..497be833f7073 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -24,7 +24,7 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c) if (!dev) return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) { + for (i = 0; i < __KSZ_NUM_REGMAPS; i++) { rc = ksz9477_regmap_config[i]; rc.lock_arg = &dev->regmap_mutex; dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index a4428be5f483c..768f649d2f401 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1075,6 +1075,45 @@ static const struct regmap_access_table ksz9896_register_set = { .n_yes_ranges = ARRAY_SIZE(ksz9896_valid_regs), }; +static const struct regmap_range ksz8873_valid_regs[] = { + regmap_reg_range(0x00, 0x01), + /* global control register */ + regmap_reg_range(0x02, 0x0f), + + /* port registers */ + regmap_reg_range(0x10, 0x1d), + regmap_reg_range(0x1e, 0x1f), + regmap_reg_range(0x20, 0x2d), + regmap_reg_range(0x2e, 0x2f), + regmap_reg_range(0x30, 0x39), + regmap_reg_range(0x3f, 0x3f), + + /* advanced control registers */ + regmap_reg_range(0x60, 0x6f), + regmap_reg_range(0x70, 0x75), + regmap_reg_range(0x76, 0x78), + regmap_reg_range(0x79, 0x7a), + regmap_reg_range(0x7b, 0x83), + regmap_reg_range(0x8e, 0x99), + regmap_reg_range(0x9a, 0xa5), + regmap_reg_range(0xa6, 0xa6), + regmap_reg_range(0xa7, 0xaa), + regmap_reg_range(0xab, 0xae), + regmap_reg_range(0xaf, 0xba), + regmap_reg_range(0xbb, 0xbc), + regmap_reg_range(0xbd, 0xbd), + regmap_reg_range(0xc0, 0xc0), + regmap_reg_range(0xc2, 0xc2), + regmap_reg_range(0xc3, 0xc3), + regmap_reg_range(0xc4, 0xc4), + regmap_reg_range(0xc6, 0xc6), +}; + +static const struct regmap_access_table ksz8873_register_set = { + .yes_ranges = ksz8873_valid_regs, + .n_yes_ranges = ARRAY_SIZE(ksz8873_valid_regs), +}; + const struct ksz_chip_data ksz_switch_chips[] = { [KSZ8563] = { .chip_id = KSZ8563_CHIP_ID, @@ -1214,6 +1253,8 @@ const struct ksz_chip_data ksz_switch_chips[] = { .supports_mii = {false, false, true}, .supports_rmii = {false, false, true}, .internal_phy = {true, true, false}, + .wr_table = &ksz8873_register_set, + .rd_table = &ksz8873_register_set, }, [KSZ9477] = { @@ -2095,7 +2136,7 @@ static int ksz_setup(struct dsa_switch *ds) } /* set broadcast storm protection 10% rate */ - regmap_update_bits(dev->regmap[1], regs[S_BROADCAST_CTRL], + regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL], BROADCAST_STORM_RATE, (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100); @@ -2106,7 +2147,7 @@ static int ksz_setup(struct dsa_switch *ds) ds->num_tx_queues = dev->info->num_tx_queues; - regmap_update_bits(dev->regmap[0], regs[S_MULTICAST_CTRL], + regmap_update_bits(ksz_regmap_8(dev), regs[S_MULTICAST_CTRL], MULTICAST_STORM_DISABLE, MULTICAST_STORM_DISABLE); ksz_init_mib_timer(dev); @@ -2156,7 +2197,7 @@ static int ksz_setup(struct dsa_switch *ds) } /* start switch */ - regmap_update_bits(dev->regmap[0], regs[S_START_CTRL], + regmap_update_bits(ksz_regmap_8(dev), regs[S_START_CTRL], SW_START, SW_START); return 0; diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 8abecaf6089ef..5aa58aac3e074 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -22,6 +22,13 @@ struct ksz_device; struct ksz_port; +enum ksz_regmap_width { + KSZ_REGMAP_8, + KSZ_REGMAP_16, + KSZ_REGMAP_32, + __KSZ_NUM_REGMAPS, +}; + struct vlan_table { u32 table[3]; }; @@ -101,7 +108,6 @@ struct ksz_port { int stp_state; struct phy_device phydev; - u32 on:1; /* port is not disabled by hardware */ u32 fiber:1; /* port is fiber */ u32 force:1; u32 read:1; /* read MIB counters in background */ @@ -137,7 +143,7 @@ struct ksz_device { const struct ksz_dev_ops *dev_ops; struct device *dev; - struct regmap *regmap[3]; + struct regmap *regmap[__KSZ_NUM_REGMAPS]; void *priv; int irq; @@ -377,11 +383,25 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit); extern const struct ksz_chip_data ksz_switch_chips[]; /* Common register access functions */ +static inline struct regmap *ksz_regmap_8(struct ksz_device *dev) +{ + return dev->regmap[KSZ_REGMAP_8]; +} + +static inline struct regmap *ksz_regmap_16(struct ksz_device *dev) +{ + return dev->regmap[KSZ_REGMAP_16]; +} + +static inline struct regmap *ksz_regmap_32(struct ksz_device *dev) +{ + return dev->regmap[KSZ_REGMAP_32]; +} static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val) { unsigned int value; - int ret = regmap_read(dev->regmap[0], reg, &value); + int ret = regmap_read(ksz_regmap_8(dev), reg, &value); if (ret) dev_err(dev->dev, "can't read 8bit reg: 0x%x %pe\n", reg, @@ -394,7 +414,7 @@ static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val) static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val) { unsigned int value; - int ret = regmap_read(dev->regmap[1], reg, &value); + int ret = regmap_read(ksz_regmap_16(dev), reg, &value); if (ret) dev_err(dev->dev, "can't read 16bit reg: 0x%x %pe\n", reg, @@ -407,7 +427,7 @@ static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val) static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val) { unsigned int value; - int ret = regmap_read(dev->regmap[2], reg, &value); + int ret = regmap_read(ksz_regmap_32(dev), reg, &value); if (ret) dev_err(dev->dev, "can't read 32bit reg: 0x%x %pe\n", reg, @@ -422,7 +442,7 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val) u32 value[2]; int ret; - ret = regmap_bulk_read(dev->regmap[2], reg, value, 2); + ret = regmap_bulk_read(ksz_regmap_32(dev), reg, value, 2); if (ret) dev_err(dev->dev, "can't read 64bit reg: 0x%x %pe\n", reg, ERR_PTR(ret)); @@ -436,7 +456,7 @@ static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value) { int ret; - ret = regmap_write(dev->regmap[0], reg, value); + ret = regmap_write(ksz_regmap_8(dev), reg, value); if (ret) dev_err(dev->dev, "can't write 8bit reg: 0x%x %pe\n", reg, ERR_PTR(ret)); @@ -448,7 +468,7 @@ static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value) { int ret; - ret = regmap_write(dev->regmap[1], reg, value); + ret = regmap_write(ksz_regmap_16(dev), reg, value); if (ret) dev_err(dev->dev, "can't write 16bit reg: 0x%x %pe\n", reg, ERR_PTR(ret)); @@ -460,7 +480,7 @@ static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value) { int ret; - ret = regmap_write(dev->regmap[2], reg, value); + ret = regmap_write(ksz_regmap_32(dev), reg, value); if (ret) dev_err(dev->dev, "can't write 32bit reg: 0x%x %pe\n", reg, ERR_PTR(ret)); @@ -473,7 +493,7 @@ static inline int ksz_rmw16(struct ksz_device *dev, u32 reg, u16 mask, { int ret; - ret = regmap_update_bits(dev->regmap[1], reg, mask, value); + ret = regmap_update_bits(ksz_regmap_16(dev), reg, mask, value); if (ret) dev_err(dev->dev, "can't rmw 16bit reg 0x%x: %pe\n", reg, ERR_PTR(ret)); @@ -486,7 +506,7 @@ static inline int ksz_rmw32(struct ksz_device *dev, u32 reg, u32 mask, { int ret; - ret = regmap_update_bits(dev->regmap[2], reg, mask, value); + ret = regmap_update_bits(ksz_regmap_32(dev), reg, mask, value); if (ret) dev_err(dev->dev, "can't rmw 32bit reg 0x%x: %pe\n", reg, ERR_PTR(ret)); @@ -503,12 +523,19 @@ static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value) val[0] = swab32(value & 0xffffffffULL); val[1] = swab32(value >> 32ULL); - return regmap_bulk_write(dev->regmap[2], reg, val, 2); + return regmap_bulk_write(ksz_regmap_32(dev), reg, val, 2); } static inline int ksz_rmw8(struct ksz_device *dev, int offset, u8 mask, u8 val) { - return regmap_update_bits(dev->regmap[0], offset, mask, val); + int ret; + + ret = regmap_update_bits(ksz_regmap_8(dev), offset, mask, val); + if (ret) + dev_err(dev->dev, "can't rmw 8bit reg 0x%x: %pe\n", offset, + ERR_PTR(ret)); + + return ret; } static inline int ksz_pread8(struct ksz_device *dev, int port, int offset, @@ -549,12 +576,20 @@ static inline int ksz_pwrite32(struct ksz_device *dev, int port, int offset, data); } -static inline void ksz_prmw8(struct ksz_device *dev, int port, int offset, - u8 mask, u8 val) +static inline int ksz_prmw8(struct ksz_device *dev, int port, int offset, + u8 mask, u8 val) { - regmap_update_bits(dev->regmap[0], - dev->dev_ops->get_port_addr(port, offset), - mask, val); + int ret; + + ret = regmap_update_bits(ksz_regmap_8(dev), + dev->dev_ops->get_port_addr(port, offset), + mask, val); + if (ret) + dev_err(dev->dev, "can't rmw 8bit reg 0x%x: %pe\n", + dev->dev_ops->get_port_addr(port, offset), + ERR_PTR(ret)); + + return ret; } static inline void ksz_regmap_lock(void *__mtx) @@ -709,9 +744,9 @@ static inline int is_lan937x(struct ksz_device *dev) #define KSZ_REGMAP_TABLE(ksz, swp, regbits, regpad, regalign) \ static const struct regmap_config ksz##_regmap_config[] = { \ - KSZ_REGMAP_ENTRY(8, swp, (regbits), (regpad), (regalign)), \ - KSZ_REGMAP_ENTRY(16, swp, (regbits), (regpad), (regalign)), \ - KSZ_REGMAP_ENTRY(32, swp, (regbits), (regpad), (regalign)), \ + [KSZ_REGMAP_8] = KSZ_REGMAP_ENTRY(8, swp, (regbits), (regpad), (regalign)), \ + [KSZ_REGMAP_16] = KSZ_REGMAP_ENTRY(16, swp, (regbits), (regpad), (regalign)), \ + [KSZ_REGMAP_32] = KSZ_REGMAP_ENTRY(32, swp, (regbits), (regpad), (regalign)), \ } #endif diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c index 96c52e8fb51b6..279338451621b 100644 --- a/drivers/net/dsa/microchip/ksz_spi.c +++ b/drivers/net/dsa/microchip/ksz_spi.c @@ -63,7 +63,7 @@ static int ksz_spi_probe(struct spi_device *spi) else regmap_config = ksz9477_regmap_config; - for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) { + for (i = 0; i < __KSZ_NUM_REGMAPS; i++) { rc = regmap_config[i]; rc.lock_arg = &dev->regmap_mutex; rc.wr_table = chip->wr_table; diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c index 399a3905e6cad..b479a628b1ae5 100644 --- a/drivers/net/dsa/microchip/lan937x_main.c +++ b/drivers/net/dsa/microchip/lan937x_main.c @@ -20,13 +20,13 @@ static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { - return regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); + return regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0); } static int lan937x_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, bool set) { - return regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset), + return regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset), bits, set ? bits : 0); } @@ -86,7 +86,7 @@ static int lan937x_internal_phy_write(struct ksz_device *dev, int addr, int reg, if (ret < 0) return ret; - ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2, + ret = regmap_read_poll_timeout(ksz_regmap_16(dev), REG_VPHY_IND_CTRL__2, value, !(value & VPHY_IND_BUSY), 10, 1000); if (ret < 0) { @@ -116,7 +116,7 @@ static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg, if (ret < 0) return ret; - ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2, + ret = regmap_read_poll_timeout(ksz_regmap_16(dev), REG_VPHY_IND_CTRL__2, value, !(value & VPHY_IND_BUSY), 10, 1000); if (ret < 0) { diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 64a2f2f83735d..5bbe95fa951c2 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -841,29 +841,38 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port, } } +static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port, + unsigned int mode, phy_interface_t interface) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err = 0; + + /* In inband mode, the link may come up at any time while the link + * is not forced down. Force the link down while we reconfigure the + * interface mode. + */ + if (mode == MLO_AN_INBAND && + chip->ports[port].interface != interface && + chip->info->ops->port_set_link) { + mv88e6xxx_reg_lock(chip); + err = chip->info->ops->port_set_link(chip, port, + LINK_FORCED_DOWN); + mv88e6xxx_reg_unlock(chip); + } + + return err; +} + static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) { struct mv88e6xxx_chip *chip = ds->priv; - struct mv88e6xxx_port *p; int err = 0; - p = &chip->ports[port]; - mv88e6xxx_reg_lock(chip); if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) { - /* In inband mode, the link may come up at any time while the - * link is not forced down. Force the link down while we - * reconfigure the interface mode. - */ - if (mode == MLO_AN_INBAND && - p->interface != state->interface && - chip->info->ops->port_set_link) - chip->info->ops->port_set_link(chip, port, - LINK_FORCED_DOWN); - err = mv88e6xxx_port_config_interface(chip, port, state->interface); if (err && err != -EOPNOTSUPP) @@ -880,24 +889,38 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, err = 0; } +err_unlock: + mv88e6xxx_reg_unlock(chip); + + if (err && err != -EOPNOTSUPP) + dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port); +} + +static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port, + unsigned int mode, phy_interface_t interface) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err = 0; + /* Undo the forced down state above after completing configuration * irrespective of its state on entry, which allows the link to come * up in the in-band case where there is no separate SERDES. Also * ensure that the link can come up if the PPU is in use and we are * in PHY mode (we treat the PPU as an effective in-band mechanism.) */ + mv88e6xxx_reg_lock(chip); + if (chip->info->ops->port_set_link && - ((mode == MLO_AN_INBAND && p->interface != state->interface) || + ((mode == MLO_AN_INBAND && + chip->ports[port].interface != interface) || (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port)))) - chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); + err = chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); - p->interface = state->interface; - -err_unlock: mv88e6xxx_reg_unlock(chip); - if (err && err != -EOPNOTSUPP) - dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port); + chip->ports[port].interface = interface; + + return err; } static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, @@ -7002,7 +7025,9 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_teardown = mv88e6xxx_port_teardown, .phylink_get_caps = mv88e6xxx_get_caps, .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state, + .phylink_mac_prepare = mv88e6xxx_mac_prepare, .phylink_mac_config = mv88e6xxx_mac_config, + .phylink_mac_finish = mv88e6xxx_mac_finish, .phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart, .phylink_mac_link_down = mv88e6xxx_mac_link_down, .phylink_mac_link_up = mv88e6xxx_mac_link_up, diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index aec9d4fd20e36..d19b6303b91f0 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -276,7 +276,7 @@ /* Offset 0x10: Extended Port Control Command */ #define MV88E6393X_PORT_EPC_CMD 0x10 #define MV88E6393X_PORT_EPC_CMD_BUSY 0x8000 -#define MV88E6393X_PORT_EPC_CMD_WRITE 0x0300 +#define MV88E6393X_PORT_EPC_CMD_WRITE 0x3000 #define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE 0x02 /* Offset 0x11: Extended Port Control Data */ diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index cfb3faeaa5bfa..030738fef60e5 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1021,7 +1021,6 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) for (port = 0; port < felix->info->num_ports; port++) { struct ocelot_port *ocelot_port = ocelot->ports[port]; struct phylink_pcs *phylink_pcs; - struct mdio_device *mdio_device; if (dsa_is_unused_port(felix->ds, port)) continue; @@ -1029,16 +1028,10 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL) continue; - mdio_device = mdio_device_create(felix->imdio, port); - if (IS_ERR(mdio_device)) + phylink_pcs = lynx_pcs_create_mdiodev(felix->imdio, port); + if (IS_ERR(phylink_pcs)) continue; - phylink_pcs = lynx_pcs_create(mdio_device); - if (!phylink_pcs) { - mdio_device_free(mdio_device); - continue; - } - felix->pcs[port] = phylink_pcs; dev_info(dev, "Found PCS at internal MDIO address %d\n", port); @@ -1054,14 +1047,9 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot) for (port = 0; port < ocelot->num_phys_ports; port++) { struct phylink_pcs *phylink_pcs = felix->pcs[port]; - struct mdio_device *mdio_device; - - if (!phylink_pcs) - continue; - mdio_device = lynx_get_mdio_device(phylink_pcs); - mdio_device_free(mdio_device); - lynx_pcs_destroy(phylink_pcs); + if (phylink_pcs) + lynx_pcs_destroy(phylink_pcs); } mdiobus_unregister(felix->imdio); mdiobus_free(felix->imdio); diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 96d4972a62f0f..15003b2af264d 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -912,7 +912,6 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) for (port = 0; port < felix->info->num_ports; port++) { struct ocelot_port *ocelot_port = ocelot->ports[port]; struct phylink_pcs *phylink_pcs; - struct mdio_device *mdio_device; int addr = port + 4; if (dsa_is_unused_port(felix->ds, port)) @@ -921,16 +920,10 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL) continue; - mdio_device = mdio_device_create(felix->imdio, addr); - if (IS_ERR(mdio_device)) + phylink_pcs = lynx_pcs_create_mdiodev(felix->imdio, addr); + if (IS_ERR(phylink_pcs)) continue; - phylink_pcs = lynx_pcs_create(mdio_device); - if (!phylink_pcs) { - mdio_device_free(mdio_device); - continue; - } - felix->pcs[port] = phylink_pcs; dev_info(dev, "Found PCS at internal MDIO address %d\n", addr); @@ -946,14 +939,9 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot) for (port = 0; port < ocelot->num_phys_ports; port++) { struct phylink_pcs *phylink_pcs = felix->pcs[port]; - struct mdio_device *mdio_device; - - if (!phylink_pcs) - continue; - mdio_device = lynx_get_mdio_device(phylink_pcs); - mdio_device_free(mdio_device); - lynx_pcs_destroy(phylink_pcs); + if (phylink_pcs) + lynx_pcs_destroy(phylink_pcs); } /* mdiobus_unregister and mdiobus_free handled by devres */ diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c index 919027cf20124..c37d2e5372302 100644 --- a/drivers/net/dsa/rzn1_a5psw.c +++ b/drivers/net/dsa/rzn1_a5psw.c @@ -120,6 +120,22 @@ static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable) a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable); } +static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable) +{ + u32 mask = A5PSW_PORT_ENA_TX(port); + u32 reg = enable ? mask : 0; + + /* Even though the port TX is disabled through TXENA bit in the + * PORT_ENA register, it can still send BPDUs. This depends on the tag + * configuration added when sending packets from the CPU port to the + * switch port. Indeed, when using forced forwarding without filtering, + * even disabled ports will be able to send packets that are tagged. + * This allows to implement STP support when ports are in a state where + * forwarding traffic should be stopped but BPDUs should still be sent. + */ + a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg); +} + static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable) { u32 port_ena = 0; @@ -292,6 +308,22 @@ static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) return 0; } +static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn) +{ + u32 mask = A5PSW_INPUT_LEARN_DIS(port); + u32 reg = !learn ? mask : 0; + + a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg); +} + +static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block) +{ + u32 mask = A5PSW_INPUT_LEARN_BLOCK(port); + u32 reg = block ? mask : 0; + + a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg); +} + static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port, bool set) { @@ -308,6 +340,14 @@ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port, a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports); } +static void a5psw_port_set_standalone(struct a5psw *a5psw, int port, + bool standalone) +{ + a5psw_port_learning_set(a5psw, port, !standalone); + a5psw_flooding_set_resolution(a5psw, port, !standalone); + a5psw_port_mgmtfwd_set(a5psw, port, standalone); +} + static int a5psw_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, bool *tx_fwd_offload, @@ -323,8 +363,7 @@ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port, } a5psw->br_dev = bridge.dev; - a5psw_flooding_set_resolution(a5psw, port, true); - a5psw_port_mgmtfwd_set(a5psw, port, false); + a5psw_port_set_standalone(a5psw, port, false); return 0; } @@ -334,8 +373,7 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port, { struct a5psw *a5psw = ds->priv; - a5psw_flooding_set_resolution(a5psw, port, false); - a5psw_port_mgmtfwd_set(a5psw, port, true); + a5psw_port_set_standalone(a5psw, port, true); /* No more ports bridged */ if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT)) @@ -344,28 +382,35 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port, static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { - u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port); + bool learning_enabled, rx_enabled, tx_enabled; struct a5psw *a5psw = ds->priv; - u32 reg = 0; switch (state) { case BR_STATE_DISABLED: case BR_STATE_BLOCKING: - reg |= A5PSW_INPUT_LEARN_DIS(port); - reg |= A5PSW_INPUT_LEARN_BLOCK(port); - break; case BR_STATE_LISTENING: - reg |= A5PSW_INPUT_LEARN_DIS(port); + rx_enabled = false; + tx_enabled = false; + learning_enabled = false; break; case BR_STATE_LEARNING: - reg |= A5PSW_INPUT_LEARN_BLOCK(port); + rx_enabled = false; + tx_enabled = false; + learning_enabled = true; break; case BR_STATE_FORWARDING: - default: + rx_enabled = true; + tx_enabled = true; + learning_enabled = true; break; + default: + dev_err(ds->dev, "invalid STP state: %d\n", state); + return; } - a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg); + a5psw_port_learning_set(a5psw, port, learning_enabled); + a5psw_port_rx_block_set(a5psw, port, !rx_enabled); + a5psw_port_tx_enable(a5psw, port, tx_enabled); } static void a5psw_port_fast_age(struct dsa_switch *ds, int port) @@ -673,7 +718,7 @@ static int a5psw_setup(struct dsa_switch *ds) } /* Configure management port */ - reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD; + reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE; a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg); /* Set pattern 0 to forward all frame to mgmt port */ @@ -722,13 +767,15 @@ static int a5psw_setup(struct dsa_switch *ds) if (dsa_port_is_unused(dp)) continue; - /* Enable egress flooding for CPU port */ - if (dsa_port_is_cpu(dp)) + /* Enable egress flooding and learning for CPU port */ + if (dsa_port_is_cpu(dp)) { a5psw_flooding_set_resolution(a5psw, port, true); + a5psw_port_learning_set(a5psw, port, true); + } - /* Enable management forward only for user ports */ + /* Enable standalone mode for user ports */ if (dsa_port_is_user(dp)) - a5psw_port_mgmtfwd_set(a5psw, port, true); + a5psw_port_set_standalone(a5psw, port, true); } return 0; diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h index c67abd49c013d..b869192eef3f7 100644 --- a/drivers/net/dsa/rzn1_a5psw.h +++ b/drivers/net/dsa/rzn1_a5psw.h @@ -19,6 +19,7 @@ #define A5PSW_PORT_OFFSET(port) (0x400 * (port)) #define A5PSW_PORT_ENA 0x8 +#define A5PSW_PORT_ENA_TX(port) BIT(port) #define A5PSW_PORT_ENA_RX_SHIFT 16 #define A5PSW_PORT_ENA_TX_RX(port) (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \ BIT(port)) @@ -36,7 +37,7 @@ #define A5PSW_INPUT_LEARN_BLOCK(p) BIT(p) #define A5PSW_MGMT_CFG 0x20 -#define A5PSW_MGMT_CFG_DISCARD BIT(7) +#define A5PSW_MGMT_CFG_ENABLE BIT(6) #define A5PSW_MODE_CFG 0x24 #define A5PSW_MODE_STATS_RESET BIT(31) diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index d2f4358cc5503..ba3e7aa1a28fb 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -66,8 +66,10 @@ static int max_interrupt_work = 20; #include #include #include - #include + +#include + #include #include diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index 82f94b1635bf8..5267e9dcd87ef 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -195,6 +195,7 @@ static int tc589_probe(struct pcmcia_device *link) { struct el3_private *lp; struct net_device *dev; + int ret; dev_dbg(&link->dev, "3c589_attach()\n"); @@ -218,7 +219,15 @@ static int tc589_probe(struct pcmcia_device *link) dev->ethtool_ops = &netdev_ethtool_ops; - return tc589_config(link); + ret = tc589_config(link); + if (ret) + goto err_free_netdev; + + return 0; + +err_free_netdev: + free_netdev(dev); + return ret; } static void tc589_detach(struct pcmcia_device *link) diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index cb04a3071f92d..7d89ec1cf273d 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -50,6 +50,7 @@ static const char version2[] = #include #include #include +#include #include diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c index ae10b7de41e8f..22ca804b2e95d 100644 --- a/drivers/net/ethernet/8390/smc-ultra.c +++ b/drivers/net/ethernet/8390/smc-ultra.c @@ -64,6 +64,7 @@ static const char version[] = #include #include #include +#include #include #include diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c index 9a36667d00b65..ffd639477dfcb 100644 --- a/drivers/net/ethernet/8390/wd.c +++ b/drivers/net/ethernet/8390/wd.c @@ -35,6 +35,7 @@ static const char version[] = #include #include #include +#include #include diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 66e3af73ec41a..190ff1bcd94e1 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1036,10 +1036,6 @@ static struct net_device_ops altera_tse_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -static void alt_tse_mac_an_restart(struct phylink_config *config) -{ -} - static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -1096,7 +1092,6 @@ static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config, } static const struct phylink_mac_ops alt_tse_phylink_ops = { - .mac_an_restart = alt_tse_mac_an_restart, .mac_config = alt_tse_mac_config, .mac_link_down = alt_tse_mac_link_down, .mac_link_up = alt_tse_mac_link_up, diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 8971665a4b2ac..6cf38180cc019 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -59,6 +59,7 @@ static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@c #include #include #include +#include #include #include diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h index d820ae03a9660..0e244f0e25fd3 100644 --- a/drivers/net/ethernet/arc/emac.h +++ b/drivers/net/ethernet/arc/emac.h @@ -220,6 +220,6 @@ static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask) int arc_mdio_probe(struct arc_emac_priv *priv); int arc_mdio_remove(struct arc_emac_priv *priv); int arc_emac_probe(struct net_device *ndev, int interface); -int arc_emac_remove(struct net_device *ndev); +void arc_emac_remove(struct net_device *ndev); #endif /* ARC_EMAC_H */ diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c index 800620b8f10d4..ce3147e886a10 100644 --- a/drivers/net/ethernet/arc/emac_arc.c +++ b/drivers/net/ethernet/arc/emac_arc.c @@ -61,11 +61,11 @@ static int emac_arc_probe(struct platform_device *pdev) static int emac_arc_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - int err; - err = arc_emac_remove(ndev); + arc_emac_remove(ndev); free_netdev(ndev); - return err; + + return 0; } static const struct of_device_id emac_arc_dt_ids[] = { diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index ba0646b3b122e..2b427d8a18315 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -1008,7 +1008,7 @@ int arc_emac_probe(struct net_device *ndev, int interface) } EXPORT_SYMBOL_GPL(arc_emac_probe); -int arc_emac_remove(struct net_device *ndev) +void arc_emac_remove(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); @@ -1019,8 +1019,6 @@ int arc_emac_remove(struct net_device *ndev) if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); - - return 0; } EXPORT_SYMBOL_GPL(arc_emac_remove); diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 1c9ca3bcb8714..509101112279d 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -248,9 +248,8 @@ static int emac_rockchip_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct rockchip_priv_data *priv = netdev_priv(ndev); - int err; - err = arc_emac_remove(ndev); + arc_emac_remove(ndev); clk_disable_unprepare(priv->refclk); @@ -261,7 +260,7 @@ static int emac_rockchip_remove(struct platform_device *pdev) clk_disable_unprepare(priv->macclk); free_netdev(ndev); - return err; + return 0; } static struct platform_driver emac_rockchip_driver = { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index f28ffc31df220..eca0c92c0c84d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -3450,7 +3450,7 @@ static int bcmgenet_open(struct net_device *dev) return ret; } -static void bcmgenet_netif_stop(struct net_device *dev) +static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -3465,6 +3465,8 @@ static void bcmgenet_netif_stop(struct net_device *dev) /* Disable MAC transmit. TX DMA disabled must be done before this */ umac_enable_set(priv, CMD_TX_EN, false); + if (stop_phy) + phy_stop(dev->phydev); bcmgenet_disable_rx_napi(priv); bcmgenet_intr_disable(priv); @@ -3485,7 +3487,7 @@ static int bcmgenet_close(struct net_device *dev) netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); - bcmgenet_netif_stop(dev); + bcmgenet_netif_stop(dev, false); /* Really kill the PHY state machine and disconnect from it */ phy_disconnect(dev->phydev); @@ -4303,7 +4305,7 @@ static int bcmgenet_suspend(struct device *d) netif_device_detach(dev); - bcmgenet_netif_stop(dev); + bcmgenet_netif_stop(dev, true); if (!device_may_wakeup(d)) phy_suspend(dev->phydev); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 9ed3d1ab2ca58..285d3825cad30 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -719,12 +719,10 @@ static int cn23xx_setup_pf_mbox(struct octeon_device *oct) for (i = 0; i < oct->sriov_info.max_vfs; i++) { q_no = i * oct->sriov_info.rings_per_vf; - mbox = vmalloc(sizeof(*mbox)); + mbox = vzalloc(sizeof(*mbox)); if (!mbox) goto free_mbox; - memset(mbox, 0, sizeof(struct octeon_mbox)); - spin_lock_init(&mbox->lock); mbox->oct_dev = oct; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c index fda49404968c3..b3bd2767d3ddf 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -279,12 +279,10 @@ static int cn23xx_setup_vf_mbox(struct octeon_device *oct) { struct octeon_mbox *mbox = NULL; - mbox = vmalloc(sizeof(*mbox)); + mbox = vzalloc(sizeof(*mbox)); if (!mbox) return 1; - memset(mbox, 0, sizeof(struct octeon_mbox)); - spin_lock_init(&mbox->lock); mbox->oct_dev = oct; diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 06a0c00af99c7..276c32c3926a7 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -72,6 +72,8 @@ #include #include +#include + #include #include #if ALLOW_DMA diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index b1871e6c40069..cb70855e2b9ae 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -273,7 +273,7 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac, mac->pcs = lynx_pcs_create(mdiodev); if (!mac->pcs) { netdev_err(mac->net_dev, "lynx_pcs_create() failed\n"); - put_device(&mdiodev->dev); + mdio_device_free(mdiodev); return -ENOMEM; } @@ -286,10 +286,9 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac) if (phylink_pcs) { struct mdio_device *mdio = lynx_get_mdio_device(phylink_pcs); - struct device *dev = &mdio->dev; lynx_pcs_destroy(phylink_pcs); - put_device(dev); + mdio_device_free(mdio); mac->pcs = NULL; } } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 7cd22d370caa3..1416262d42967 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -863,7 +863,6 @@ static int enetc_imdio_create(struct enetc_pf *pf) struct device *dev = &pf->si->pdev->dev; struct enetc_mdio_priv *mdio_priv; struct phylink_pcs *phylink_pcs; - struct mdio_device *mdio_device; struct mii_bus *bus; int err; @@ -889,17 +888,9 @@ static int enetc_imdio_create(struct enetc_pf *pf) goto free_mdio_bus; } - mdio_device = mdio_device_create(bus, 0); - if (IS_ERR(mdio_device)) { - err = PTR_ERR(mdio_device); - dev_err(dev, "cannot create mdio device (%d)\n", err); - goto unregister_mdiobus; - } - - phylink_pcs = lynx_pcs_create(mdio_device); - if (!phylink_pcs) { - mdio_device_free(mdio_device); - err = -ENOMEM; + phylink_pcs = lynx_pcs_create_mdiodev(bus, 0); + if (IS_ERR(phylink_pcs)) { + err = PTR_ERR(phylink_pcs); dev_err(dev, "cannot create lynx pcs (%d)\n", err); goto unregister_mdiobus; } @@ -918,13 +909,8 @@ static int enetc_imdio_create(struct enetc_pf *pf) static void enetc_imdio_remove(struct enetc_pf *pf) { - struct mdio_device *mdio_device; - - if (pf->pcs) { - mdio_device = lynx_get_mdio_device(pf->pcs); - mdio_device_free(mdio_device); + if (pf->pcs) lynx_pcs_destroy(pf->pcs); - } if (pf->imdio) { mdiobus_unregister(pf->imdio); mdiobus_free(pf->imdio); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index cd215ab20ff95..632bb4d589d76 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1011,24 +1011,6 @@ static void fec_enet_enable_ring(struct net_device *ndev) } } -static void fec_enet_reset_skb(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_enet_priv_tx_q *txq; - int i, j; - - for (i = 0; i < fep->num_tx_queues; i++) { - txq = fep->tx_queue[i]; - - for (j = 0; j < txq->bd.ring_size; j++) { - if (txq->tx_skbuff[j]) { - dev_kfree_skb_any(txq->tx_skbuff[j]); - txq->tx_skbuff[j] = NULL; - } - } - } -} - /* * This function is called to start or restart the FEC during a link * change, transmit timeout, or to reconfigure the FEC. The network @@ -1071,9 +1053,6 @@ fec_restart(struct net_device *ndev) fec_enet_enable_ring(ndev); - /* Reset tx SKB buffers. */ - fec_enet_reset_skb(ndev); - /* Enable MII mode */ if (fep->full_duplex == DUPLEX_FULL) { /* FD enable */ @@ -3798,7 +3777,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free < MAX_SKB_FRAGS + 1) { netdev_err(fep->netdev, "NOT enough BD for SG!\n"); - xdp_return_frame(frame); return -EBUSY; } @@ -3835,6 +3813,11 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, index = fec_enet_get_bd_index(last_bdp, &txq->bd); txq->tx_skbuff[index] = NULL; + /* Make sure the updates to rest of the descriptor are performed before + * transferring ownership. + */ + dma_wmb(); + /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ @@ -3844,8 +3827,14 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, /* If this was the last BD in the ring, start at the beginning again. */ bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); + /* Make sure the update to bdp are performed before txq->bd.cur. */ + dma_wmb(); + txq->bd.cur = bdp; + /* Trigger transmission start */ + writel(0, txq->bd.reg_desc_active); + return 0; } @@ -3874,12 +3863,6 @@ static int fec_enet_xdp_xmit(struct net_device *dev, sent_frames++; } - /* Make sure the update to bdp and tx_skbuff are performed. */ - wmb(); - - /* Trigger transmission start */ - writel(0, txq->bd.reg_desc_active); - __netif_tx_unlock(nq); return sent_frames; @@ -4031,6 +4014,11 @@ static int fec_enet_init(struct net_device *ndev) ndev->hw_features = ndev->features; + if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) + ndev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT; + fec_restart(ndev); if (fep->quirks & FEC_QUIRK_MIB_CLEAR) @@ -4478,9 +4466,11 @@ fec_drv_remove(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; int ret; - ret = pm_runtime_resume_and_get(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) - return ret; + dev_err(&pdev->dev, + "Failed to resume device in remove callback (%pe)\n", + ERR_PTR(ret)); cancel_work_sync(&fep->tx_timeout_work); fec_ptp_stop(pdev); @@ -4493,8 +4483,13 @@ fec_drv_remove(struct platform_device *pdev) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); - clk_disable_unprepare(fep->clk_ahb); - clk_disable_unprepare(fep->clk_ipg); + /* After pm_runtime_get_sync() failed, the clks are still off, so skip + * disabling them again. + */ + if (ret >= 0) { + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + } pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index caa00c72aeeba..8fb70db63b8b8 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -31,6 +31,7 @@ // Minimum amount of time between queue kicks in msec (10 seconds) #define MIN_TX_TIMEOUT_GAP (1000 * 10) +#define DQO_TX_MAX 0x3FFFF const char gve_version_str[] = GVE_VERSION; static const char gve_version_prefix[] = GVE_VERSION_PREFIX; @@ -2047,6 +2048,10 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) goto err; } + /* Big TCP is only supported on DQ*/ + if (!gve_is_gqi(priv)) + netif_set_tso_max_size(priv->dev, DQO_TX_MAX); + priv->num_registered_pages = 0; priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; /* gvnic has one Notification Block per MSI-x vector, except for the diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index b76143bfd5947..3c09e66ba1ab1 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -8,6 +8,7 @@ #include "gve_adminq.h" #include "gve_utils.h" #include "gve_dqo.h" +#include #include #include #include @@ -646,6 +647,9 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, goto drop; } + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto drop; + num_buffer_descs = gve_num_buffer_descs_needed(skb); } else { num_buffer_descs = gve_num_buffer_descs_needed(skb); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index cbbab5b2b402b..b85c412683ddc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -331,9 +331,25 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw) return head == hw->cmq.csq.next_to_use; } -static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, +static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout) +{ + static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = { + {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS}, + }; + u32 i; + + for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++) + if (cmdq_tx_timeout_map[i].opcode == opcode) + return cmdq_tx_timeout_map[i].tx_timeout; + + return tx_timeout; +} + +static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode, bool *is_completed) { + u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode, + hw->cmq.tx_timeout); u32 timeout = 0; do { @@ -343,7 +359,7 @@ static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, } udelay(1); timeout++; - } while (timeout < hw->cmq.tx_timeout); + } while (timeout < cmdq_tx_timeout); } static int hclge_comm_cmd_convert_err_code(u16 desc_ret) @@ -407,7 +423,8 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, * if multi descriptors to be sent, use the first one to check */ if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) - hclge_comm_wait_for_resp(hw, &is_completed); + hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode), + &is_completed); if (!is_completed) ret = -EBADE; @@ -529,7 +546,7 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; /* Setup Tx write back timeout */ - cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT; + cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT; /* Setup queue rings */ ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index de72ecbfd5ad8..18f1b4bf362da 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -54,7 +54,8 @@ #define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B) #define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3 #define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024 -#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000 +#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000 +#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000 enum hclge_opcode_type { /* Generic commands */ @@ -360,6 +361,11 @@ struct hclge_comm_caps_bit_map { u16 local_bit; }; +struct hclge_cmdq_tx_timeout_map { + u32 opcode; + u32 tx_timeout; +}; + struct hclge_comm_firmware_compat_cmd { __le32 compat; u8 rsv[20]; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 4c3e90a1c4d07..d385ffc218766 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -130,7 +130,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { .name = "tx_bd_queue", .cmd = HNAE3_DBG_CMD_TX_BD, .dentry = HNS3_DBG_DENTRY_TX_BD, - .buf_len = HNS3_DBG_READ_LEN_4MB, + .buf_len = HNS3_DBG_READ_LEN_5MB, .init = hns3_dbg_bd_file_init, }, { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h index 97578eabb7d8b..4a5ef8a90a104 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -10,6 +10,7 @@ #define HNS3_DBG_READ_LEN_128KB 0x20000 #define HNS3_DBG_READ_LEN_1MB 0x100000 #define HNS3_DBG_READ_LEN_4MB 0x400000 +#define HNS3_DBG_READ_LEN_5MB 0x500000 #define HNS3_DBG_WRITE_LEN 1024 #define HNS3_DBG_DATA_STR_LEN 32 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 4fb5406c1951d..2689b108f7df7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -8053,12 +8053,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle) /* If it is not PF reset or FLR, the firmware will disable the MAC, * so it only need to stop phy here. */ - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && - hdev->reset_type != HNAE3_FUNC_RESET && - hdev->reset_type != HNAE3_FLR_RESET) { - hclge_mac_stop_phy(hdev); - hclge_update_link_status(hdev); - return; + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, + HCLGE_PFC_DISABLE); + if (hdev->reset_type != HNAE3_FUNC_RESET && + hdev->reset_type != HNAE3_FLR_RESET) { + hclge_mac_stop_phy(hdev); + hclge_update_link_status(hdev); + return; + } } hclge_reset_tqp(handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 4a33f65190e2b..922c0da3660c7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -171,8 +171,8 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, - u8 pfc_bitmap) +int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, + u8 pfc_bitmap) { struct hclge_desc desc; struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 68f28a98e380b..dd6f1fd486cf2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -164,6 +164,9 @@ struct hclge_bp_to_qs_map_cmd { u32 rsvd1; }; +#define HCLGE_PFC_DISABLE 0 +#define HCLGE_PFC_TX_RX_DISABLE 0 + struct hclge_pfc_en_cmd { u8 tx_rx_en_bitmap; u8 pri_en_bitmap; @@ -235,6 +238,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); void hclge_tm_pfc_info_update(struct hclge_dev *hdev); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev, bool init); +int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, + u8 pfc_bitmap); int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index f240462503419..dd08989a4c7c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1436,7 +1436,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) * might happen in case reset assertion was made by PF. Yes, this also * means we might end up waiting bit more even for VF reset. */ - msleep(5000); + if (hdev->reset_type == HNAE3_VF_FULL_RESET) + msleep(5000); + else + msleep(500); return 0; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index bd7ef59b1f2e7..771a3c909c459 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4198,7 +4198,7 @@ void e1000e_reset(struct e1000_adapter *adapter) /** * e1000e_trigger_lsc - trigger an LSC interrupt - * @adapter: + * @adapter: board private structure * * Fire a link status change interrupt to start the watchdog. **/ diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 9afbbdac35903..7c0578b5457b9 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -2238,11 +2238,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_process_config(adapter); adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; - /* Request VLAN offload settings */ - if (VLAN_V2_ALLOWED(adapter)) - iavf_set_vlan_offload_features(adapter, 0, - netdev->features); - iavf_set_queue_vlan_tag_loc(adapter); was_mac_changed = !ether_addr_equal(netdev->dev_addr, diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 5d89392f969bf..817977e3039d5 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -18,6 +18,7 @@ ice-y := ice_main.o \ ice_txrx_lib.o \ ice_txrx.o \ ice_fltr.o \ + ice_irq.o \ ice_pf_vsi_vlan_ops.o \ ice_vsi_vlan_ops.o \ ice_vsi_vlan_lib.o \ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index aa32111afd6ed..b4bca1d964a94 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -74,6 +75,7 @@ #include "ice_lag.h" #include "ice_vsi_vlan_ops.h" #include "ice_gnss.h" +#include "ice_irq.h" #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 @@ -103,11 +105,6 @@ #define ICE_Q_WAIT_RETRY_LIMIT 10 #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) #define ICE_MAX_LG_RSS_QS 256 -#define ICE_RES_VALID_BIT 0x8000 -#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) -#define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1) -/* All VF control VSIs share the same IRQ, so assign a unique ID for them */ -#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ @@ -245,12 +242,6 @@ struct ice_tc_cfg { struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; }; -struct ice_res_tracker { - u16 num_entries; - u16 end; - u16 list[]; -}; - struct ice_qs_cfg { struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */ unsigned long *pf_map; @@ -348,7 +339,9 @@ struct ice_vsi { u32 rx_buf_failed; u32 rx_page_failed; u16 num_q_vectors; - u16 base_vector; /* IRQ base for OS reserved vectors */ + /* tell if only dynamic irq allocation is allowed */ + bool irq_dyn_alloc; + enum ice_vsi_type type; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ @@ -479,6 +472,7 @@ struct ice_q_vector { char name[ICE_INT_NAME_STR_LEN]; u16 total_events; /* net_dim(): number of interrupts processed */ + struct msi_map irq; } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { @@ -539,7 +533,7 @@ struct ice_pf { /* OS reserved IRQ details */ struct msix_entry *msix_entries; - struct ice_res_tracker *irq_tracker; + struct ice_irq_tracker irq_tracker; /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the * number of MSIX vectors needed for all SR-IOV VFs from the number of * MSIX vectors allowed on this PF. @@ -583,8 +577,7 @@ struct ice_pf { u32 hw_csum_rx_error; u32 oicr_err_reg; - u16 oicr_idx; /* Other interrupt cause MSIX vector index */ - u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ + struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ u16 max_pf_txqs; /* Total Tx queues PF wide */ u16 max_pf_rxqs; /* Total Rx queues PF wide */ u16 num_lan_msix; /* Total MSIX vectors for base driver */ @@ -670,7 +663,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, struct ice_q_vector *q_vector) { u32 vector = (vsi && q_vector) ? q_vector->reg_idx : - ((struct ice_pf *)hw->back)->oicr_idx; + ((struct ice_pf *)hw->back)->oicr_irq.index; int itr = ICE_ITR_NONE; u32 val; @@ -821,25 +814,6 @@ static inline bool ice_is_switchdev_running(struct ice_pf *pf) return pf->switchdev.is_running; } -/** - * ice_set_sriov_cap - enable SRIOV in PF flags - * @pf: PF struct - */ -static inline void ice_set_sriov_cap(struct ice_pf *pf) -{ - if (pf->hw.func_caps.common_cap.sr_iov_1_1) - set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); -} - -/** - * ice_clear_sriov_cap - disable SRIOV in PF flags - * @pf: PF struct - */ -static inline void ice_clear_sriov_cap(struct ice_pf *pf) -{ - clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); -} - #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 #define ICE_FD_STAT_PF_IDX(base_idx) \ ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 838d9b274d685..63d3e1dcbba55 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1087,7 +1087,7 @@ struct ice_aqc_get_phy_caps { #define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2) #define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3) #define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4) -#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5 +#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4 struct ice_aqc_get_phy_caps_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index fba178e076009..cca0e753f38ff 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -596,7 +596,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) { struct net_device *netdev; struct ice_pf *pf; - int base_idx, i; + int i; if (!vsi || vsi->type != ICE_VSI_PF) return 0; @@ -613,10 +613,9 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) if (unlikely(!netdev->rx_cpu_rmap)) return -EINVAL; - base_idx = vsi->base_vector; ice_for_each_q_vector(vsi, i) if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, - pf->msix_entries[base_idx + i].vector)) { + vsi->q_vectors[i]->irq.virq)) { ice_free_cpu_rx_rmap(vsi); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 1911d644dfa8d..4a12316f7b464 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -103,10 +103,10 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) { struct ice_pf *pf = vsi->back; struct ice_q_vector *q_vector; + int err; /* allocate q_vector */ - q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector), - GFP_KERNEL); + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); if (!q_vector) return -ENOMEM; @@ -118,9 +118,34 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) q_vector->rx.itr_mode = ITR_DYNAMIC; q_vector->tx.type = ICE_TX_CONTAINER; q_vector->rx.type = ICE_RX_CONTAINER; + q_vector->irq.index = -ENOENT; - if (vsi->type == ICE_VSI_VF) + if (vsi->type == ICE_VSI_VF) { + q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector); goto out; + } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) { + struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi); + + if (ctrl_vsi) { + if (unlikely(!ctrl_vsi->q_vectors)) { + err = -ENOENT; + goto err_free_q_vector; + } + + q_vector->irq = ctrl_vsi->q_vectors[0]->irq; + goto skip_alloc; + } + } + + q_vector->irq = ice_alloc_irq(pf, vsi->irq_dyn_alloc); + if (q_vector->irq.index < 0) { + err = -ENOMEM; + goto err_free_q_vector; + } + +skip_alloc: + q_vector->reg_idx = q_vector->irq.index; + /* only set affinity_mask if the CPU is online */ if (cpu_online(v_idx)) cpumask_set_cpu(v_idx, &q_vector->affinity_mask); @@ -137,6 +162,11 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) vsi->q_vectors[v_idx] = q_vector; return 0; + +err_free_q_vector: + kfree(q_vector); + + return err; } /** @@ -168,7 +198,19 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) if (vsi->netdev) netif_napi_del(&q_vector->napi); - devm_kfree(dev, q_vector); + /* release MSIX interrupt if q_vector had interrupt allocated */ + if (q_vector->irq.index < 0) + goto free_q_vector; + + /* only free last VF ctrl vsi interrupt */ + if (vsi->type == ICE_VSI_CTRL && vsi->vf && + ice_get_vf_ctrl_vsi(pf, vsi)) + goto free_q_vector; + + ice_free_irq(pf, q_vector->irq); + +free_q_vector: + kfree(q_vector); vsi->q_vectors[v_idx] = NULL; } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index c6d4926f0fcf5..850db8e0e6b00 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -932,10 +932,9 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN || first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) || skb->priority != TC_PRIO_CONTROL) { - first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M; + first->vid &= ~VLAN_PRIO_MASK; /* Mask the lower 3 bits to set the 802.1p priority */ - first->tx_flags |= (skb->priority & 0x7) << - ICE_TX_FLAGS_VLAN_PR_S; + first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; /* if this is not already set it means a VLAN 0 + priority needs * to be offloaded */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index f6dd3f8fd936e..ad0a007b73988 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -10,16 +10,15 @@ #include "ice_tc_lib.h" /** - * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC + * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index * @pf: pointer to PF struct * @vf: pointer to VF struct - * @mac: VF's MAC address * * This function adds advanced rule that forwards packets with - * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue. + * VF's VSI index to the corresponding switchdev ctrl VSI queue. */ -int -ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac) +static int +ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf) { struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; struct ice_adv_rule_info rule_info = { 0 }; @@ -32,76 +31,41 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac) if (!list) return -ENOMEM; - list[0].type = ICE_MAC_OFOS; - ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); - eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); + ice_rule_add_src_vsi_metadata(list); - rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.flag = ICE_FLTR_TX; rule_info.sw_act.vsi_handle = ctrl_vsi->idx; rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; - rule_info.rx = false; rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + ctrl_vsi->rxq_map[vf->vf_id]; rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; rule_info.flags_info.act_valid = true; rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; + rule_info.src_vsi = vf->lan_vsi_idx; err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, - vf->repr->mac_rule); + &vf->repr->sp_rule); if (err) - dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d", + dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d", vf->vf_id); - else - vf->repr->rule_added = true; kfree(list); return err; } /** - * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC - * @vf: pointer to vF struct - * - * This function replays VF's MAC rule after reset. - */ -void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) -{ - int err; - - if (!ice_is_switchdev_running(vf->pf)) - return; - - if (is_valid_ether_addr(vf->hw_lan_addr)) { - err = ice_eswitch_add_vf_mac_rule(vf->pf, vf, - vf->hw_lan_addr); - if (err) { - dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n", - vf->hw_lan_addr, vf->vf_id, err); - return; - } - vf->num_mac++; - - ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); - } -} - -/** - * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC + * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index * @vf: pointer to the VF struct * - * Delete the advanced rule that was used to forward packets with the VF's MAC - * address (src MAC) to the corresponding switchdev ctrl VSI queue. + * Delete the advanced rule that was used to forward packets with the VF's VSI + * index to the corresponding switchdev ctrl VSI queue. */ -void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) +static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf) { - if (!ice_is_switchdev_running(vf->pf)) - return; - - if (!vf->repr->rule_added) + if (!vf->repr) return; - ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule); - vf->repr->rule_added = false; + ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule); } /** @@ -237,6 +201,7 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); metadata_dst_free(vf->repr->dst); vf->repr->dst = NULL; + ice_eswitch_del_vf_sp_rule(vf); ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); @@ -264,25 +229,30 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); if (!vf->repr->dst) { - ice_fltr_add_mac_and_broadcast(vsi, - vf->hw_lan_addr, + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, + ICE_FWD_TO_VSI); + goto err; + } + + if (ice_eswitch_add_vf_sp_rule(pf, vf)) { + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); goto err; } if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { - ice_fltr_add_mac_and_broadcast(vsi, - vf->hw_lan_addr, + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); + ice_eswitch_del_vf_sp_rule(vf); metadata_dst_free(vf->repr->dst); vf->repr->dst = NULL; goto err; } if (ice_vsi_add_vlan_zero(vsi)) { - ice_fltr_add_mac_and_broadcast(vsi, - vf->hw_lan_addr, + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); + ice_eswitch_del_vf_sp_rule(vf); metadata_dst_free(vf->repr->dst); vf->repr->dst = NULL; ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index 6a413331572b6..b18bf83a2f5b2 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -20,11 +20,6 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); void ice_eswitch_update_repr(struct ice_vsi *vsi); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); -int -ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, - const u8 *mac); -void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf); -void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf); void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off); @@ -34,15 +29,6 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); static inline void ice_eswitch_release(struct ice_pf *pf) { } static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } -static inline void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) { } -static inline void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) { } - -static inline int -ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, - const u8 *mac) -{ - return -EOPNOTSUPP; -} static inline void ice_eswitch_set_target_vsi(struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index f86e814354a31..8d5cbbd0b3d5a 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -4,6 +4,7 @@ /* ethtool support for ice */ #include "ice.h" +#include "ice_ethtool.h" #include "ice_flow.h" #include "ice_fltr.h" #include "ice_lib.h" @@ -956,7 +957,7 @@ static u64 ice_intr_test(struct net_device *netdev) netdev_info(netdev, "interrupt test\n"); - wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_idx), + wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_irq.index), GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M | GLINT_DYN_CTL_SWINT_TRIG_M); @@ -1658,15 +1659,26 @@ ice_mask_min_supported_speeds(struct ice_hw *hw, *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G; } -#define ice_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \ - do { \ - if (req_speeds & (aq_link_speed) || \ - (!req_speeds && \ - (advert_phy_type_lo & phy_type_mask_lo || \ - advert_phy_type_hi & phy_type_mask_hi))) \ - ethtool_link_ksettings_add_link_mode(ks, advertising,\ - ethtool_link_mode); \ - } while (0) +/** + * ice_linkmode_set_bit - set link mode bit + * @phy_to_ethtool: PHY type to ethtool link mode struct to set + * @ks: ethtool link ksettings struct to fill out + * @req_speeds: speed requested by user + * @advert_phy_type: advertised PHY type + * @phy_type: PHY type + */ +static void +ice_linkmode_set_bit(const struct ice_phy_type_to_ethtool *phy_to_ethtool, + struct ethtool_link_ksettings *ks, u32 req_speeds, + u64 advert_phy_type, u32 phy_type) +{ + linkmode_set_bit(phy_to_ethtool->link_mode, ks->link_modes.supported); + + if (req_speeds & phy_to_ethtool->aq_link_speed || + (!req_speeds && advert_phy_type & BIT(phy_type))) + linkmode_set_bit(phy_to_ethtool->link_mode, + ks->link_modes.advertising); +} /** * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes @@ -1682,11 +1694,10 @@ ice_phy_type_to_ethtool(struct net_device *netdev, struct ice_pf *pf = vsi->back; u64 advert_phy_type_lo = 0; u64 advert_phy_type_hi = 0; - u64 phy_type_mask_lo = 0; - u64 phy_type_mask_hi = 0; u64 phy_types_high = 0; u64 phy_types_low = 0; - u16 req_speeds; + u32 req_speeds; + u32 i; req_speeds = vsi->port_info->phy.link_info.req_speeds; @@ -1743,272 +1754,22 @@ ice_phy_type_to_ethtool(struct net_device *netdev, advert_phy_type_hi = vsi->port_info->phy.phy_type_high; } - ethtool_link_ksettings_zero_link_mode(ks, supported); - ethtool_link_ksettings_zero_link_mode(ks, advertising); - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100BASE_TX | - ICE_PHY_TYPE_LOW_100M_SGMII; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100baseT_Full); - - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100MB, - 100baseT_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_T | - ICE_PHY_TYPE_LOW_1G_SGMII; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 1000baseT_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, - 1000baseT_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_KX; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 1000baseKX_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, - 1000baseKX_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_SX | - ICE_PHY_TYPE_LOW_1000BASE_LX; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 1000baseX_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, - 1000baseX_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_T; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 2500baseT_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB, - 2500baseT_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_X | - ICE_PHY_TYPE_LOW_2500BASE_KX; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 2500baseX_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB, - 2500baseX_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_5GBASE_T | - ICE_PHY_TYPE_LOW_5GBASE_KR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 5000baseT_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_5GB, - 5000baseT_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_T | - ICE_PHY_TYPE_LOW_10G_SFI_DA | - ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | - ICE_PHY_TYPE_LOW_10G_SFI_C2C; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseT_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, - 10000baseT_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_KR_CR1; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseKR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, - 10000baseKR_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_SR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseSR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, - 10000baseSR_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_LR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseLR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, - 10000baseLR_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_T | - ICE_PHY_TYPE_LOW_25GBASE_CR | - ICE_PHY_TYPE_LOW_25GBASE_CR_S | - ICE_PHY_TYPE_LOW_25GBASE_CR1 | - ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | - ICE_PHY_TYPE_LOW_25G_AUI_C2C; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 25000baseCR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, - 25000baseCR_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_SR | - ICE_PHY_TYPE_LOW_25GBASE_LR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 25000baseSR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, - 25000baseSR_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_KR | - ICE_PHY_TYPE_LOW_25GBASE_KR_S | - ICE_PHY_TYPE_LOW_25GBASE_KR1; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 25000baseKR_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, - 25000baseKR_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_KR4; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 40000baseKR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, - 40000baseKR4_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_CR4 | - ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | - ICE_PHY_TYPE_LOW_40G_XLAUI; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 40000baseCR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, - 40000baseCR4_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_SR4; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 40000baseSR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, - 40000baseSR4_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_LR4; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 40000baseLR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, - 40000baseLR4_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_CR2 | - ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | - ICE_PHY_TYPE_LOW_50G_LAUI2 | - ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | - ICE_PHY_TYPE_LOW_50G_AUI2 | - ICE_PHY_TYPE_LOW_50GBASE_CP | - ICE_PHY_TYPE_LOW_50GBASE_SR | - ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | - ICE_PHY_TYPE_LOW_50G_AUI1; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 50000baseCR2_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, - 50000baseCR2_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_KR2 | - ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 50000baseKR2_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, - 50000baseKR2_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_SR2 | - ICE_PHY_TYPE_LOW_50GBASE_LR2 | - ICE_PHY_TYPE_LOW_50GBASE_FR | - ICE_PHY_TYPE_LOW_50GBASE_LR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 50000baseSR2_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, - 50000baseSR2_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_CR4 | - ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | - ICE_PHY_TYPE_LOW_100G_CAUI4 | - ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | - ICE_PHY_TYPE_LOW_100G_AUI4 | - ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4; - phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | - ICE_PHY_TYPE_HIGH_100G_CAUI2 | - ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | - ICE_PHY_TYPE_HIGH_100G_AUI2; - if (phy_types_low & phy_type_mask_lo || - phy_types_high & phy_type_mask_hi) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseCR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseCR4_Full); - } - - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseCR2_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseCR2_Full); - } - - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseSR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseSR4_Full); - } - - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseSR2_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseSR2_Full); - } - - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 | - ICE_PHY_TYPE_LOW_100GBASE_DR; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseLR4_ER4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseLR4_ER4_Full); - } + linkmode_zero(ks->link_modes.supported); + linkmode_zero(ks->link_modes.advertising); - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 | - ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4; - if (phy_types_low & phy_type_mask_lo) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseKR4_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseKR4_Full); + for (i = 0; i < BITS_PER_TYPE(u64); i++) { + if (phy_types_low & BIT_ULL(i)) + ice_linkmode_set_bit(&phy_type_low_lkup[i], ks, + req_speeds, advert_phy_type_lo, + i); } - if (phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { - ethtool_link_ksettings_add_link_mode(ks, supported, - 100000baseKR2_Full); - ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, - 100000baseKR2_Full); + for (i = 0; i < BITS_PER_TYPE(u64); i++) { + if (phy_types_high & BIT_ULL(i)) + ice_linkmode_set_bit(&phy_type_high_lkup[i], ks, + req_speeds, advert_phy_type_hi, + i); } - } #define TEST_SET_BITS_TIMEOUT 50 diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h new file mode 100644 index 0000000000000..b403ee79cd5e4 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _ICE_ETHTOOL_H_ +#define _ICE_ETHTOOL_H_ + +struct ice_phy_type_to_ethtool { + u64 aq_link_speed; + u8 link_mode; +}; + +/* Macro to make PHY type to Ethtool link mode table entry. + * The index is the PHY type. + */ +#define ICE_PHY_TYPE(LINK_SPEED, ETHTOOL_LINK_MODE) {\ + .aq_link_speed = ICE_AQ_LINK_SPEED_##LINK_SPEED, \ + .link_mode = ETHTOOL_LINK_MODE_##ETHTOOL_LINK_MODE##_BIT, \ +} + +/* Lookup table mapping PHY type low to link speed and Ethtool link modes. + * Array index corresponds to HW PHY type bit, see + * ice_adminq_cmd.h:ICE_PHY_TYPE_LOW_*. + */ +static const struct ice_phy_type_to_ethtool +phy_type_low_lkup[] = { + [0] = ICE_PHY_TYPE(100MB, 100baseT_Full), + [1] = ICE_PHY_TYPE(100MB, 100baseT_Full), + [2] = ICE_PHY_TYPE(1000MB, 1000baseT_Full), + [3] = ICE_PHY_TYPE(1000MB, 1000baseX_Full), + [4] = ICE_PHY_TYPE(1000MB, 1000baseX_Full), + [5] = ICE_PHY_TYPE(1000MB, 1000baseKX_Full), + [6] = ICE_PHY_TYPE(1000MB, 1000baseT_Full), + [7] = ICE_PHY_TYPE(2500MB, 2500baseT_Full), + [8] = ICE_PHY_TYPE(2500MB, 2500baseX_Full), + [9] = ICE_PHY_TYPE(2500MB, 2500baseX_Full), + [10] = ICE_PHY_TYPE(5GB, 5000baseT_Full), + [11] = ICE_PHY_TYPE(5GB, 5000baseT_Full), + [12] = ICE_PHY_TYPE(10GB, 10000baseT_Full), + [13] = ICE_PHY_TYPE(10GB, 10000baseCR_Full), + [14] = ICE_PHY_TYPE(10GB, 10000baseSR_Full), + [15] = ICE_PHY_TYPE(10GB, 10000baseLR_Full), + [16] = ICE_PHY_TYPE(10GB, 10000baseKR_Full), + [17] = ICE_PHY_TYPE(10GB, 10000baseCR_Full), + [18] = ICE_PHY_TYPE(10GB, 10000baseKR_Full), + [19] = ICE_PHY_TYPE(25GB, 25000baseCR_Full), + [20] = ICE_PHY_TYPE(25GB, 25000baseCR_Full), + [21] = ICE_PHY_TYPE(25GB, 25000baseCR_Full), + [22] = ICE_PHY_TYPE(25GB, 25000baseCR_Full), + [23] = ICE_PHY_TYPE(25GB, 25000baseSR_Full), + [24] = ICE_PHY_TYPE(25GB, 25000baseSR_Full), + [25] = ICE_PHY_TYPE(25GB, 25000baseKR_Full), + [26] = ICE_PHY_TYPE(25GB, 25000baseKR_Full), + [27] = ICE_PHY_TYPE(25GB, 25000baseKR_Full), + [28] = ICE_PHY_TYPE(25GB, 25000baseSR_Full), + [29] = ICE_PHY_TYPE(25GB, 25000baseCR_Full), + [30] = ICE_PHY_TYPE(40GB, 40000baseCR4_Full), + [31] = ICE_PHY_TYPE(40GB, 40000baseSR4_Full), + [32] = ICE_PHY_TYPE(40GB, 40000baseLR4_Full), + [33] = ICE_PHY_TYPE(40GB, 40000baseKR4_Full), + [34] = ICE_PHY_TYPE(40GB, 40000baseSR4_Full), + [35] = ICE_PHY_TYPE(40GB, 40000baseCR4_Full), + [36] = ICE_PHY_TYPE(50GB, 50000baseCR2_Full), + [37] = ICE_PHY_TYPE(50GB, 50000baseSR2_Full), + [38] = ICE_PHY_TYPE(50GB, 50000baseSR2_Full), + [39] = ICE_PHY_TYPE(50GB, 50000baseKR2_Full), + [40] = ICE_PHY_TYPE(50GB, 50000baseSR2_Full), + [41] = ICE_PHY_TYPE(50GB, 50000baseCR2_Full), + [42] = ICE_PHY_TYPE(50GB, 50000baseSR2_Full), + [43] = ICE_PHY_TYPE(50GB, 50000baseCR2_Full), + [44] = ICE_PHY_TYPE(50GB, 50000baseCR_Full), + [45] = ICE_PHY_TYPE(50GB, 50000baseSR_Full), + [46] = ICE_PHY_TYPE(50GB, 50000baseLR_ER_FR_Full), + [47] = ICE_PHY_TYPE(50GB, 50000baseLR_ER_FR_Full), + [48] = ICE_PHY_TYPE(50GB, 50000baseKR_Full), + [49] = ICE_PHY_TYPE(50GB, 50000baseSR_Full), + [50] = ICE_PHY_TYPE(50GB, 50000baseCR_Full), + [51] = ICE_PHY_TYPE(100GB, 100000baseCR4_Full), + [52] = ICE_PHY_TYPE(100GB, 100000baseSR4_Full), + [53] = ICE_PHY_TYPE(100GB, 100000baseLR4_ER4_Full), + [54] = ICE_PHY_TYPE(100GB, 100000baseKR4_Full), + [55] = ICE_PHY_TYPE(100GB, 100000baseCR4_Full), + [56] = ICE_PHY_TYPE(100GB, 100000baseCR4_Full), + [57] = ICE_PHY_TYPE(100GB, 100000baseSR4_Full), + [58] = ICE_PHY_TYPE(100GB, 100000baseCR4_Full), + [59] = ICE_PHY_TYPE(100GB, 100000baseCR4_Full), + [60] = ICE_PHY_TYPE(100GB, 100000baseKR4_Full), + [61] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full), + [62] = ICE_PHY_TYPE(100GB, 100000baseSR2_Full), + [63] = ICE_PHY_TYPE(100GB, 100000baseLR4_ER4_Full), +}; + +/* Lookup table mapping PHY type high to link speed and Ethtool link modes. + * Array index corresponds to HW PHY type bit, see + * ice_adminq_cmd.h:ICE_PHY_TYPE_HIGH_* + */ +static const struct ice_phy_type_to_ethtool +phy_type_high_lkup[] = { + [0] = ICE_PHY_TYPE(100GB, 100000baseKR2_Full), + [1] = ICE_PHY_TYPE(100GB, 100000baseSR2_Full), + [2] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full), + [3] = ICE_PHY_TYPE(100GB, 100000baseSR2_Full), + [4] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full), +}; + +#endif /* !_ICE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index e6bc2285071e2..145b27f2a4ce8 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -229,20 +229,34 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) EXPORT_SYMBOL_GPL(ice_get_qos_params); /** - * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver + * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver * @pf: board private structure to initialize */ -static int ice_reserve_rdma_qvector(struct ice_pf *pf) +static int ice_alloc_rdma_qvectors(struct ice_pf *pf) { if (ice_is_rdma_ena(pf)) { - int index; - - index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix, - ICE_RES_RDMA_VEC_ID); - if (index < 0) - return index; - pf->num_avail_sw_msix -= pf->num_rdma_msix; - pf->rdma_base_vector = (u16)index; + int i; + + pf->msix_entries = kcalloc(pf->num_rdma_msix, + sizeof(*pf->msix_entries), + GFP_KERNEL); + if (!pf->msix_entries) + return -ENOMEM; + + /* RDMA is the only user of pf->msix_entries array */ + pf->rdma_base_vector = 0; + + for (i = 0; i < pf->num_rdma_msix; i++) { + struct msix_entry *entry = &pf->msix_entries[i]; + struct msi_map map; + + map = ice_alloc_irq(pf, false); + if (map.index < 0) + break; + + entry->entry = map.index; + entry->vector = map.virq; + } } return 0; } @@ -253,9 +267,21 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf) */ static void ice_free_rdma_qvector(struct ice_pf *pf) { - pf->num_avail_sw_msix -= pf->num_rdma_msix; - ice_free_res(pf->irq_tracker, pf->rdma_base_vector, - ICE_RES_RDMA_VEC_ID); + int i; + + if (!pf->msix_entries) + return; + + for (i = 0; i < pf->num_rdma_msix; i++) { + struct msi_map map; + + map.index = pf->msix_entries[i].entry; + map.virq = pf->msix_entries[i].vector; + ice_free_irq(pf, map); + } + + kfree(pf->msix_entries); + pf->msix_entries = NULL; } /** @@ -357,7 +383,7 @@ int ice_init_rdma(struct ice_pf *pf) } /* Reserve vector resources */ - ret = ice_reserve_rdma_qvector(pf); + ret = ice_alloc_rdma_qvectors(pf); if (ret < 0) { dev_err(dev, "failed to reserve vectors for RDMA\n"); goto err_reserve_rdma_qvector; diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c new file mode 100644 index 0000000000000..ad82ff7d19957 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_irq.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_irq.h" + +/** + * ice_init_irq_tracker - initialize interrupt tracker + * @pf: board private structure + * @max_vectors: maximum number of vectors that tracker can hold + * @num_static: number of preallocated interrupts + */ +static void +ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors, + unsigned int num_static) +{ + pf->irq_tracker.num_entries = max_vectors; + pf->irq_tracker.num_static = num_static; + xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC); +} + +/** + * ice_deinit_irq_tracker - free xarray tracker + * @pf: board private structure + */ +static void ice_deinit_irq_tracker(struct ice_pf *pf) +{ + xa_destroy(&pf->irq_tracker.entries); +} + +/** + * ice_free_irq_res - free a block of resources + * @pf: board private structure + * @index: starting index previously returned by ice_get_res + */ +static void ice_free_irq_res(struct ice_pf *pf, u16 index) +{ + struct ice_irq_entry *entry; + + entry = xa_erase(&pf->irq_tracker.entries, index); + kfree(entry); +} + +/** + * ice_get_irq_res - get an interrupt resource + * @pf: board private structure + * @dyn_only: force entry to be dynamically allocated + * + * Allocate new irq entry in the free slot of the tracker. Since xarray + * is used, always allocate new entry at the lowest possible index. Set + * proper allocation limit for maximum tracker entries. + * + * Returns allocated irq entry or NULL on failure. + */ +static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) +{ + struct xa_limit limit = { .max = pf->irq_tracker.num_entries, + .min = 0 }; + unsigned int num_static = pf->irq_tracker.num_static; + struct ice_irq_entry *entry; + unsigned int index; + int ret; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + /* skip preallocated entries if the caller says so */ + if (dyn_only) + limit.min = num_static; + + ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit, + GFP_KERNEL); + + if (ret) { + kfree(entry); + entry = NULL; + } else { + entry->index = index; + entry->dynamic = index >= num_static; + } + + return entry; +} + +/** + * ice_reduce_msix_usage - Reduce usage of MSI-X vectors + * @pf: board private structure + * @v_remain: number of remaining MSI-X vectors to be distributed + * + * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled. + * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of + * remaining vectors. + */ +static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) +{ + int v_rdma; + + if (!ice_is_rdma_ena(pf)) { + pf->num_lan_msix = v_remain; + return; + } + + /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */ + v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; + + if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) { + dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + + pf->num_rdma_msix = 0; + pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; + } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || + (v_remain - v_rdma < v_rdma)) { + /* Support minimum RDMA and give remaining vectors to LAN MSIX + */ + pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; + pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; + } else { + /* Split remaining MSIX with RDMA after accounting for AEQ MSIX + */ + pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + + ICE_RDMA_NUM_AEQ_MSIX; + pf->num_lan_msix = v_remain - pf->num_rdma_msix; + } +} + +/** + * ice_ena_msix_range - Request a range of MSIX vectors from the OS + * @pf: board private structure + * + * Compute the number of MSIX vectors wanted and request from the OS. Adjust + * device usage if there are not enough vectors. Return the number of vectors + * reserved or negative on failure. + */ +static int ice_ena_msix_range(struct ice_pf *pf) +{ + int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; + struct device *dev = ice_pf_to_dev(pf); + int err; + + hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; + num_cpus = num_online_cpus(); + + /* LAN miscellaneous handler */ + v_other = ICE_MIN_LAN_OICR_MSIX; + + /* Flow Director */ + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) + v_other += ICE_FDIR_MSIX; + + /* switchdev */ + v_other += ICE_ESWITCH_MSIX; + + v_wanted = v_other; + + /* LAN traffic */ + pf->num_lan_msix = num_cpus; + v_wanted += pf->num_lan_msix; + + /* RDMA auxiliary driver */ + if (ice_is_rdma_ena(pf)) { + pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; + v_wanted += pf->num_rdma_msix; + } + + if (v_wanted > hw_num_msix) { + int v_remain; + + dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", + v_wanted, hw_num_msix); + + if (hw_num_msix < ICE_MIN_MSIX) { + err = -ERANGE; + goto exit_err; + } + + v_remain = hw_num_msix - v_other; + if (v_remain < ICE_MIN_LAN_TXRX_MSIX) { + v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; + v_remain = ICE_MIN_LAN_TXRX_MSIX; + } + + ice_reduce_msix_usage(pf, v_remain); + v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; + + dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", + pf->num_lan_msix); + if (ice_is_rdma_ena(pf)) + dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", + pf->num_rdma_msix); + } + + /* actually reserve the vectors */ + v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted, + PCI_IRQ_MSIX); + if (v_actual < 0) { + dev_err(dev, "unable to reserve MSI-X vectors\n"); + err = v_actual; + goto exit_err; + } + + if (v_actual < v_wanted) { + dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", + v_wanted, v_actual); + + if (v_actual < ICE_MIN_MSIX) { + /* error if we can't get minimum vectors */ + pci_free_irq_vectors(pf->pdev); + err = -ERANGE; + goto exit_err; + } else { + int v_remain = v_actual - v_other; + + if (v_remain < ICE_MIN_LAN_TXRX_MSIX) + v_remain = ICE_MIN_LAN_TXRX_MSIX; + + ice_reduce_msix_usage(pf, v_remain); + + dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", + pf->num_lan_msix); + + if (ice_is_rdma_ena(pf)) + dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", + pf->num_rdma_msix); + } + } + + return v_actual; + +exit_err: + pf->num_rdma_msix = 0; + pf->num_lan_msix = 0; + return err; +} + +/** + * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme + * @pf: board private structure + */ +void ice_clear_interrupt_scheme(struct ice_pf *pf) +{ + pci_free_irq_vectors(pf->pdev); + ice_deinit_irq_tracker(pf); +} + +/** + * ice_init_interrupt_scheme - Determine proper interrupt scheme + * @pf: board private structure to initialize + */ +int ice_init_interrupt_scheme(struct ice_pf *pf) +{ + int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; + int vectors, max_vectors; + + vectors = ice_ena_msix_range(pf); + + if (vectors < 0) + return -ENOMEM; + + if (pci_msix_can_alloc_dyn(pf->pdev)) + max_vectors = total_vectors; + else + max_vectors = vectors; + + ice_init_irq_tracker(pf, max_vectors, vectors); + + return 0; +} + +/** + * ice_alloc_irq - Allocate new interrupt vector + * @pf: board private structure + * @dyn_only: force dynamic allocation of the interrupt + * + * Allocate new interrupt vector for a given owner id. + * return struct msi_map with interrupt details and track + * allocated interrupt appropriately. + * + * This function reserves new irq entry from the irq_tracker. + * if according to the tracker information all interrupts that + * were allocated with ice_pci_alloc_irq_vectors are already used + * and dynamically allocated interrupts are supported then new + * interrupt will be allocated with pci_msix_alloc_irq_at. + * + * Some callers may only support dynamically allocated interrupts. + * This is indicated with dyn_only flag. + * + * On failure, return map with negative .index. The caller + * is expected to check returned map index. + * + */ +struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only) +{ + int sriov_base_vector = pf->sriov_base_vector; + struct msi_map map = { .index = -ENOENT }; + struct device *dev = ice_pf_to_dev(pf); + struct ice_irq_entry *entry; + + entry = ice_get_irq_res(pf, dyn_only); + if (!entry) + return map; + + /* fail if we're about to violate SRIOV vectors space */ + if (sriov_base_vector && entry->index >= sriov_base_vector) + goto exit_free_res; + + if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) { + map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL); + if (map.index < 0) + goto exit_free_res; + dev_dbg(dev, "allocated new irq at index %d\n", map.index); + } else { + map.index = entry->index; + map.virq = pci_irq_vector(pf->pdev, map.index); + } + + return map; + +exit_free_res: + dev_err(dev, "Could not allocate irq at idx %d\n", entry->index); + ice_free_irq_res(pf, entry->index); + return map; +} + +/** + * ice_free_irq - Free interrupt vector + * @pf: board private structure + * @map: map with interrupt details + * + * Remove allocated interrupt from the interrupt tracker. If interrupt was + * allocated dynamically, free respective interrupt vector. + */ +void ice_free_irq(struct ice_pf *pf, struct msi_map map) +{ + struct ice_irq_entry *entry; + + entry = xa_load(&pf->irq_tracker.entries, map.index); + + if (!entry) { + dev_err(ice_pf_to_dev(pf), "Failed to get MSIX interrupt entry at index %d", + map.index); + return; + } + + dev_dbg(ice_pf_to_dev(pf), "Free irq at index %d\n", map.index); + + if (entry->dynamic) + pci_msix_free_irq(pf->pdev, map); + + ice_free_irq_res(pf, map.index); +} + +/** + * ice_get_max_used_msix_vector - Get the max used interrupt vector + * @pf: board private structure + * + * Return index of maximum used interrupt vectors with respect to the + * beginning of the MSIX table. Take into account that some interrupts + * may have been dynamically allocated after MSIX was initially enabled. + */ +int ice_get_max_used_msix_vector(struct ice_pf *pf) +{ + unsigned long start, index, max_idx; + void *entry; + + /* Treat all preallocated interrupts as used */ + start = pf->irq_tracker.num_static; + max_idx = start - 1; + + xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) { + if (index > max_idx) + max_idx = index; + } + + return max_idx; +} diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h new file mode 100644 index 0000000000000..f35efc08575ef --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_irq.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2023, Intel Corporation. */ + +#ifndef _ICE_IRQ_H_ +#define _ICE_IRQ_H_ + +struct ice_irq_entry { + unsigned int index; + bool dynamic; /* allocation type flag */ +}; + +struct ice_irq_tracker { + struct xarray entries; + u16 num_entries; /* total vectors available */ + u16 num_static; /* preallocated entries */ +}; + +int ice_init_interrupt_scheme(struct ice_pf *pf); +void ice_clear_interrupt_scheme(struct ice_pf *pf); + +struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only); +void ice_free_irq(struct ice_pf *pf, struct msi_map map); +int ice_get_max_used_msix_vector(struct ice_pf *pf); + +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index ee5b36941ba35..5a7753bda3245 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -6,15 +6,6 @@ #include "ice.h" #include "ice_lag.h" -/** - * ice_lag_nop_handler - no-op Rx handler to disable LAG - * @pskb: pointer to skb pointer - */ -rx_handler_result_t ice_lag_nop_handler(struct sk_buff __always_unused **pskb) -{ - return RX_HANDLER_PASS; -} - /** * ice_lag_set_primary - set PF LAG state as Primary * @lag: LAG info struct @@ -158,7 +149,6 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info) lag->upper_netdev = upper; } - ice_clear_sriov_cap(pf); ice_clear_rdma_cap(pf); lag->bonded = true; @@ -205,7 +195,6 @@ ice_lag_unlink(struct ice_lag *lag, } lag->peer_netdev = NULL; - ice_set_sriov_cap(pf); ice_set_rdma_cap(pf); lag->bonded = false; lag->role = ICE_LAG_NONE; @@ -229,7 +218,6 @@ static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev) if (lag->upper_netdev) { dev_put(lag->upper_netdev); lag->upper_netdev = NULL; - ice_set_sriov_cap(pf); ice_set_rdma_cap(pf); } /* perform some cleanup in case we come back */ diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index 51b5cf467ce24..2c373676c42f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -25,63 +25,9 @@ struct ice_lag { struct notifier_block notif_block; u8 bonded:1; /* currently bonded */ u8 primary:1; /* this is primary */ - u8 handler:1; /* did we register a rx_netdev_handler */ - /* each thing blocking bonding will increment this value by one. - * If this value is zero, then bonding is allowed. - */ - u16 dis_lag; u8 role; }; int ice_init_lag(struct ice_pf *pf); void ice_deinit_lag(struct ice_pf *pf); -rx_handler_result_t ice_lag_nop_handler(struct sk_buff **pskb); - -/** - * ice_disable_lag - increment LAG disable count - * @lag: LAG struct - */ -static inline void ice_disable_lag(struct ice_lag *lag) -{ - /* If LAG this PF is not already disabled, disable it */ - rtnl_lock(); - if (!netdev_is_rx_handler_busy(lag->netdev)) { - if (!netdev_rx_handler_register(lag->netdev, - ice_lag_nop_handler, - NULL)) - lag->handler = true; - } - rtnl_unlock(); - lag->dis_lag++; -} - -/** - * ice_enable_lag - decrement disable count for a PF - * @lag: LAG struct - * - * Decrement the disable counter for a port, and if that count reaches - * zero, then remove the no-op Rx handler from that netdev - */ -static inline void ice_enable_lag(struct ice_lag *lag) -{ - if (lag->dis_lag) - lag->dis_lag--; - if (!lag->dis_lag && lag->handler) { - rtnl_lock(); - netdev_rx_handler_unregister(lag->netdev); - rtnl_unlock(); - lag->handler = false; - } -} - -/** - * ice_is_lag_dis - is LAG disabled - * @lag: LAG struct - * - * Return true if bonding is disabled - */ -static inline bool ice_is_lag_dis(struct ice_lag *lag) -{ - return !!(lag->dis_lag); -} #endif /* _ICE_LAG_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 450317dfcca73..5ddb95d1073a1 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1370,190 +1370,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) return ret; } -/** - * ice_free_res - free a block of resources - * @res: pointer to the resource - * @index: starting index previously returned by ice_get_res - * @id: identifier to track owner - * - * Returns number of resources freed - */ -int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) -{ - int count = 0; - int i; - - if (!res || index >= res->end) - return -EINVAL; - - id |= ICE_RES_VALID_BIT; - for (i = index; i < res->end && res->list[i] == id; i++) { - res->list[i] = 0; - count++; - } - - return count; -} - -/** - * ice_search_res - Search the tracker for a block of resources - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner - * - * Returns the base item index of the block, or -ENOMEM for error - */ -static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) -{ - u16 start = 0, end = 0; - - if (needed > res->end) - return -ENOMEM; - - id |= ICE_RES_VALID_BIT; - - do { - /* skip already allocated entries */ - if (res->list[end++] & ICE_RES_VALID_BIT) { - start = end; - if ((start + needed) > res->end) - break; - } - - if (end == (start + needed)) { - int i = start; - - /* there was enough, so assign it to the requestor */ - while (i != end) - res->list[i++] = id; - - return start; - } - } while (end < res->end); - - return -ENOMEM; -} - -/** - * ice_get_free_res_count - Get free count from a resource tracker - * @res: Resource tracker instance - */ -static u16 ice_get_free_res_count(struct ice_res_tracker *res) -{ - u16 i, count = 0; - - for (i = 0; i < res->end; i++) - if (!(res->list[i] & ICE_RES_VALID_BIT)) - count++; - - return count; -} - -/** - * ice_get_res - get a block of resources - * @pf: board private structure - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner - * - * Returns the base item index of the block, or negative for error - */ -int -ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) -{ - if (!res || !pf) - return -EINVAL; - - if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { - dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n", - needed, res->num_entries, id); - return -EINVAL; - } - - return ice_search_res(res, needed, id); -} - -/** - * ice_get_vf_ctrl_res - Get VF control VSI resource - * @pf: pointer to the PF structure - * @vsi: the VSI to allocate a resource for - * - * Look up whether another VF has already allocated the control VSI resource. - * If so, re-use this resource so that we share it among all VFs. - * - * Otherwise, allocate the resource and return it. - */ -static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) -{ - struct ice_vf *vf; - unsigned int bkt; - int base; - - rcu_read_lock(); - ice_for_each_vf_rcu(pf, bkt, vf) { - if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { - base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; - rcu_read_unlock(); - return base; - } - } - rcu_read_unlock(); - - return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors, - ICE_RES_VF_CTRL_VEC_ID); -} - -/** - * ice_vsi_setup_vector_base - Set up the base vector for the given VSI - * @vsi: ptr to the VSI - * - * This should only be called after ice_vsi_alloc_def() which allocates the - * corresponding SW VSI structure and initializes num_queue_pairs for the - * newly allocated VSI. - * - * Returns 0 on success or negative on failure - */ -static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - struct device *dev; - u16 num_q_vectors; - int base; - - dev = ice_pf_to_dev(pf); - /* SRIOV doesn't grab irq_tracker entries for each VSI */ - if (vsi->type == ICE_VSI_VF) - return 0; - if (vsi->type == ICE_VSI_CHNL) - return 0; - - if (vsi->base_vector) { - dev_dbg(dev, "VSI %d has non-zero base vector %d\n", - vsi->vsi_num, vsi->base_vector); - return -EEXIST; - } - - num_q_vectors = vsi->num_q_vectors; - /* reserve slots from OS requested IRQs */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf) { - base = ice_get_vf_ctrl_res(pf, vsi); - } else { - base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, - vsi->idx); - } - - if (base < 0) { - dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n", - ice_get_free_res_count(pf->irq_tracker), - ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors); - return -ENOENT; - } - vsi->base_vector = (u16)base; - pf->num_avail_sw_msix -= num_q_vectors; - - return 0; -} - /** * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI * @vsi: the VSI having rings deallocated @@ -2409,50 +2225,6 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) ice_vsi_set_dcb_tc_cfg(vsi); } -/** - * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors - * @vsi: VSI to set the q_vectors register index on - */ -static int -ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) -{ - u16 i; - - if (!vsi || !vsi->q_vectors) - return -EINVAL; - - ice_for_each_q_vector(vsi, i) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; - - if (!q_vector) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n", - i, vsi->vsi_num); - goto clear_reg_idx; - } - - if (vsi->type == ICE_VSI_VF) { - struct ice_vf *vf = vsi->vf; - - q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector); - } else { - q_vector->reg_idx = - q_vector->v_idx + vsi->base_vector; - } - } - - return 0; - -clear_reg_idx: - ice_for_each_q_vector(vsi, i) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; - - if (q_vector) - q_vector->reg_idx = 0; - } - - return -EINVAL; -} - /** * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling * @vsi: the VSI being configured @@ -2611,37 +2383,6 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) vsi->agg_node->num_vsis); } -/** - * ice_free_vf_ctrl_res - Free the VF control VSI resource - * @pf: pointer to PF structure - * @vsi: the VSI to free resources for - * - * Check if the VF control VSI resource is still in use. If no VF is using it - * any more, release the VSI resource. Otherwise, leave it to be cleaned up - * once no other VF uses it. - */ -static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) -{ - struct ice_vf *vf; - unsigned int bkt; - - rcu_read_lock(); - ice_for_each_vf_rcu(pf, bkt, vf) { - if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { - rcu_read_unlock(); - return; - } - } - rcu_read_unlock(); - - /* No other VFs left that have control VSI. It is now safe to reclaim - * SW interrupts back to the common pool. - */ - ice_free_res(pf->irq_tracker, vsi->base_vector, - ICE_RES_VF_CTRL_VEC_ID); - pf->num_avail_sw_msix += vsi->num_q_vectors; -} - static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; @@ -2728,14 +2469,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) if (ret) goto unroll_vsi_init; - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto unroll_alloc_q_vector; - - ret = ice_vsi_set_q_vectors_reg_idx(vsi); - if (ret) - goto unroll_vector_base; - ret = ice_vsi_alloc_rings(vsi); if (ret) goto unroll_vector_base; @@ -2745,6 +2478,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) goto unroll_vector_base; ice_vsi_map_rings_to_vectors(vsi); + vsi->stat_offsets_loaded = false; + if (ice_is_xdp_ena_vsi(vsi)) { ret = ice_vsi_determine_xdp_res(vsi); if (ret) @@ -2786,13 +2521,12 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) if (ret) goto unroll_alloc_q_vector; - ret = ice_vsi_set_q_vectors_reg_idx(vsi); - if (ret) - goto unroll_vector_base; - ret = ice_vsi_alloc_ring_stats(vsi); if (ret) goto unroll_vector_base; + + vsi->stat_offsets_loaded = false; + /* Do not exit if configuring RSS had an issue, at least * receive traffic on first queue. Hence no need to capture * return value @@ -2822,8 +2556,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) unroll_vector_base: /* reclaim SW interrupts back to the common pool */ - ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); - pf->num_avail_sw_msix += vsi->num_q_vectors; unroll_alloc_q_vector: ice_vsi_free_q_vectors(vsi); unroll_vsi_init: @@ -2915,14 +2647,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi) * many interrupts each VF needs. SR-IOV MSIX resources are also * cleared in the same manner. */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf) { - ice_free_vf_ctrl_res(pf, vsi); - } else if (vsi->type != ICE_VSI_VF) { - /* reclaim SW interrupts back to the common pool */ - ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); - pf->num_avail_sw_msix += vsi->num_q_vectors; - vsi->base_vector = 0; - } if (vsi->type == ICE_VSI_VF && vsi->agg_node && vsi->agg_node->valid) @@ -2988,8 +2712,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) return vsi; err_vsi_cfg: - if (params->type == ICE_VSI_VF) - ice_enable_lag(pf->lag); ice_vsi_free(vsi); return NULL; @@ -3039,7 +2761,6 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) void ice_vsi_free_irq(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - int base = vsi->base_vector; int i; if (!vsi->q_vectors || !vsi->irqs_ready) @@ -3053,10 +2774,9 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) ice_free_cpu_rx_rmap(vsi); ice_for_each_q_vector(vsi, i) { - u16 vector = i + base; int irq_num; - irq_num = pf->msix_entries[vector].vector; + irq_num = vsi->q_vectors[i]->irq.virq; /* free only the irqs that were actually requested */ if (!vsi->q_vectors[i] || @@ -3188,7 +2908,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) */ void ice_vsi_dis_irq(struct ice_vsi *vsi) { - int base = vsi->base_vector; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; u32 val; @@ -3235,7 +2954,7 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) return; ice_for_each_q_vector(vsi, i) - synchronize_irq(pf->msix_entries[i + base].vector); + synchronize_irq(vsi->q_vectors[i]->irq.virq); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 75221478f2dc0..e985766e6bb5f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -104,11 +104,6 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked); void ice_vsi_decfg(struct ice_vsi *vsi); void ice_dis_vsi(struct ice_vsi *vsi, bool locked); -int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); - -int -ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id); - int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags); int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a1f7c8edc22f3..62e91512aeabc 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2490,7 +2490,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) { int q_vectors = vsi->num_q_vectors; struct ice_pf *pf = vsi->back; - int base = vsi->base_vector; struct device *dev; int rx_int_idx = 0; int tx_int_idx = 0; @@ -2501,7 +2500,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) for (vector = 0; vector < q_vectors; vector++) { struct ice_q_vector *q_vector = vsi->q_vectors[vector]; - irq_num = pf->msix_entries[base + vector].vector; + irq_num = q_vector->irq.virq; if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -2555,9 +2554,8 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) return 0; free_q_irqs: - while (vector) { - vector--; - irq_num = pf->msix_entries[base + vector].vector; + while (vector--) { + irq_num = vsi->q_vectors[vector]->irq.virq; if (!IS_ENABLED(CONFIG_RFS_ACCEL)) irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL); @@ -3047,7 +3045,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, val); /* SW_ITR_IDX = 0, but don't change INTENA */ - wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), + wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); } @@ -3234,6 +3232,7 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) */ static void ice_free_irq_msix_misc(struct ice_pf *pf) { + int misc_irq_num = pf->oicr_irq.virq; struct ice_hw *hw = &pf->hw; ice_dis_ctrlq_interrupts(hw); @@ -3242,14 +3241,10 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, 0); ice_flush(hw); - if (pf->msix_entries) { - synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); - devm_free_irq(ice_pf_to_dev(pf), - pf->msix_entries[pf->oicr_idx].vector, pf); - } + synchronize_irq(misc_irq_num); + devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); - pf->num_avail_sw_msix += 1; - ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); + ice_free_irq(pf, pf->oicr_irq); } /** @@ -3295,7 +3290,8 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - int oicr_idx, err = 0; + struct msi_map oicr_irq; + int err = 0; if (!pf->int_name[0]) snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", @@ -3309,30 +3305,26 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) goto skip_req_irq; /* reserve one vector in irq_tracker for misc interrupts */ - oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); - if (oicr_idx < 0) - return oicr_idx; - - pf->num_avail_sw_msix -= 1; - pf->oicr_idx = (u16)oicr_idx; - - err = devm_request_threaded_irq(dev, - pf->msix_entries[pf->oicr_idx].vector, - ice_misc_intr, ice_misc_intr_thread_fn, - 0, pf->int_name, pf); + oicr_irq = ice_alloc_irq(pf, false); + if (oicr_irq.index < 0) + return oicr_irq.index; + + pf->oicr_irq = oicr_irq; + err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, + ice_misc_intr_thread_fn, 0, + pf->int_name, pf); if (err) { dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", pf->int_name, err); - ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); - pf->num_avail_sw_msix += 1; + ice_free_irq(pf, pf->oicr_irq); return err; } skip_req_irq: ice_ena_misc_vector(pf); - ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); - wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), + ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); + wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); ice_flush(hw); @@ -3900,224 +3892,6 @@ static int ice_init_pf(struct ice_pf *pf) return 0; } -/** - * ice_reduce_msix_usage - Reduce usage of MSI-X vectors - * @pf: board private structure - * @v_remain: number of remaining MSI-X vectors to be distributed - * - * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled. - * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of - * remaining vectors. - */ -static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) -{ - int v_rdma; - - if (!ice_is_rdma_ena(pf)) { - pf->num_lan_msix = v_remain; - return; - } - - /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */ - v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; - - if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) { - dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); - clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); - - pf->num_rdma_msix = 0; - pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; - } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || - (v_remain - v_rdma < v_rdma)) { - /* Support minimum RDMA and give remaining vectors to LAN MSIX */ - pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; - pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; - } else { - /* Split remaining MSIX with RDMA after accounting for AEQ MSIX - */ - pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + - ICE_RDMA_NUM_AEQ_MSIX; - pf->num_lan_msix = v_remain - pf->num_rdma_msix; - } -} - -/** - * ice_ena_msix_range - Request a range of MSIX vectors from the OS - * @pf: board private structure - * - * Compute the number of MSIX vectors wanted and request from the OS. Adjust - * device usage if there are not enough vectors. Return the number of vectors - * reserved or negative on failure. - */ -static int ice_ena_msix_range(struct ice_pf *pf) -{ - int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; - struct device *dev = ice_pf_to_dev(pf); - int err, i; - - hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; - num_cpus = num_online_cpus(); - - /* LAN miscellaneous handler */ - v_other = ICE_MIN_LAN_OICR_MSIX; - - /* Flow Director */ - if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) - v_other += ICE_FDIR_MSIX; - - /* switchdev */ - v_other += ICE_ESWITCH_MSIX; - - v_wanted = v_other; - - /* LAN traffic */ - pf->num_lan_msix = num_cpus; - v_wanted += pf->num_lan_msix; - - /* RDMA auxiliary driver */ - if (ice_is_rdma_ena(pf)) { - pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; - v_wanted += pf->num_rdma_msix; - } - - if (v_wanted > hw_num_msix) { - int v_remain; - - dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", - v_wanted, hw_num_msix); - - if (hw_num_msix < ICE_MIN_MSIX) { - err = -ERANGE; - goto exit_err; - } - - v_remain = hw_num_msix - v_other; - if (v_remain < ICE_MIN_LAN_TXRX_MSIX) { - v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; - v_remain = ICE_MIN_LAN_TXRX_MSIX; - } - - ice_reduce_msix_usage(pf, v_remain); - v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; - - dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", - pf->num_lan_msix); - if (ice_is_rdma_ena(pf)) - dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", - pf->num_rdma_msix); - } - - pf->msix_entries = devm_kcalloc(dev, v_wanted, - sizeof(*pf->msix_entries), GFP_KERNEL); - if (!pf->msix_entries) { - err = -ENOMEM; - goto exit_err; - } - - for (i = 0; i < v_wanted; i++) - pf->msix_entries[i].entry = i; - - /* actually reserve the vectors */ - v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, - ICE_MIN_MSIX, v_wanted); - if (v_actual < 0) { - dev_err(dev, "unable to reserve MSI-X vectors\n"); - err = v_actual; - goto msix_err; - } - - if (v_actual < v_wanted) { - dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", - v_wanted, v_actual); - - if (v_actual < ICE_MIN_MSIX) { - /* error if we can't get minimum vectors */ - pci_disable_msix(pf->pdev); - err = -ERANGE; - goto msix_err; - } else { - int v_remain = v_actual - v_other; - - if (v_remain < ICE_MIN_LAN_TXRX_MSIX) - v_remain = ICE_MIN_LAN_TXRX_MSIX; - - ice_reduce_msix_usage(pf, v_remain); - - dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", - pf->num_lan_msix); - - if (ice_is_rdma_ena(pf)) - dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", - pf->num_rdma_msix); - } - } - - return v_actual; - -msix_err: - devm_kfree(dev, pf->msix_entries); - -exit_err: - pf->num_rdma_msix = 0; - pf->num_lan_msix = 0; - return err; -} - -/** - * ice_dis_msix - Disable MSI-X interrupt setup in OS - * @pf: board private structure - */ -static void ice_dis_msix(struct ice_pf *pf) -{ - pci_disable_msix(pf->pdev); - devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); - pf->msix_entries = NULL; -} - -/** - * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme - * @pf: board private structure - */ -static void ice_clear_interrupt_scheme(struct ice_pf *pf) -{ - ice_dis_msix(pf); - - if (pf->irq_tracker) { - devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); - pf->irq_tracker = NULL; - } -} - -/** - * ice_init_interrupt_scheme - Determine proper interrupt scheme - * @pf: board private structure to initialize - */ -static int ice_init_interrupt_scheme(struct ice_pf *pf) -{ - int vectors; - - vectors = ice_ena_msix_range(pf); - - if (vectors < 0) - return vectors; - - /* set up vector assignment tracking */ - pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), - struct_size(pf->irq_tracker, list, vectors), - GFP_KERNEL); - if (!pf->irq_tracker) { - ice_dis_msix(pf); - return -ENOMEM; - } - - /* populate SW interrupts pool with number of OS granted IRQs. */ - pf->num_avail_sw_msix = (u16)vectors; - pf->irq_tracker->num_entries = (u16)vectors; - pf->irq_tracker->end = pf->irq_tracker->num_entries; - - return 0; -} - /** * ice_is_wol_supported - check if WoL is supported * @hw: pointer to hardware info diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 02a4e1cf624e9..6a9364761165d 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -47,6 +47,7 @@ enum ice_protocol_type { ICE_L2TPV3, ICE_VLAN_EX, ICE_VLAN_IN, + ICE_HW_METADATA, ICE_VXLAN_GPE, ICE_SCTP_IL, ICE_PROTOCOL_LAST @@ -115,17 +116,7 @@ enum ice_prot_id { #define ICE_L2TPV3_HW 104 #define ICE_UDP_OF_HW 52 /* UDP Tunnels */ -#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */ -#define ICE_MDID_SIZE 2 - -#define ICE_TUN_FLAG_MDID 21 -#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID) -#define ICE_TUN_FLAG_MASK 0xFF - -#define ICE_VLAN_FLAG_MDID 20 -#define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID) -#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000 #define ICE_TUN_FLAG_FV_IND 2 @@ -230,6 +221,191 @@ struct ice_nvgre_hdr { __be32 tni_flow; }; +/* Metadata information + * + * Not all MDIDs can be used by switch block. It depends on package version. + * + * MDID 16 (Rx offset) + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | A | B | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * A = Source port where the transaction came from (3b). + * + * B = Destination TC of the packet. The TC is relative to a port (5b). + * + * MDID 17 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | PTYPE | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * PTYPE = Encodes the packet type (10b). + * + * MDID 18 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Packet length | R | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Packet length = Length of the packet in bytes + * (packet always carriers CRC) (14b). + * R = Reserved (2b). + * + * MDID 19 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Source VSI | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Source VSI = Source VSI of packet loopbacked in switch (for egress) (10b). + */ +#define ICE_MDID_SOURCE_VSI_MASK GENMASK(9, 0) + +/* + * MDID 20 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |A|B|C|D|E|F|R|R|G|H|I|J|K|L|M|N| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * A = DSI - set for DSI RX pkts. + * B = ipsec_decrypted - invalid on NIC. + * C = marker - this is a marker packet. + * D = from_network - for TX sets to 0 + * for RX: + * * 1 - packet is from external link + * * 0 - packet source is from internal + * E = source_interface_is_rx - reflect the physical interface from where the + * packet was received: + * * 1 - Rx + * * 0 - Tx + * F = from_mng - The bit signals that the packet's origin is the management. + * G = ucast - Outer L2 MAC address is unicast. + * H = mcast - Outer L2 MAC address is multicast. + * I = bcast - Outer L2 MAC address is broadcast. + * J = second_outer_mac_present - 2 outer MAC headers are present in the packet. + * K = STAG or BVLAN - Outer L2 header has STAG (ethernet type 0x88a8) or + * BVLAN (ethernet type 0x88a8). + * L = ITAG - Outer L2 header has ITAG *ethernet type 0x88e7) + * M = EVLAN (0x8100) - Outer L2 header has EVLAN (ethernet type 0x8100) + * N = EVLAN (0x9100) - Outer L2 header has EVLAN (ethernet type 0x9100) + */ +#define ICE_PKT_VLAN_STAG BIT(12) +#define ICE_PKT_VLAN_ITAG BIT(13) +#define ICE_PKT_VLAN_EVLAN (BIT(14) | BIT(15)) +#define ICE_PKT_VLAN_MASK (ICE_PKT_VLAN_STAG | ICE_PKT_VLAN_ITAG | \ + ICE_PKT_VLAN_EVLAN) +/* MDID 21 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |A|B|C|D|E|F|G|H|I|J|R|R|K|L|M|N| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * A = VLAN (0x8100) - Outer L2 header has VLAN (ethernet type 0x8100) + * B = NSHoE - Outer L2 header has NSH (ethernet type 0x894f) + * C = MPLS (0x8847) - There is at least 1 MPLS tag in the outer header + * (ethernet type 0x8847) + * D = MPLS (0x8848) - There is at least 1 MPLS tag in the outer header + * (ethernet type 0x8848) + * E = multi MPLS - There is more than a single MPLS tag in the outer header + * F = inner MPLS - There is inner MPLS tag in the packet + * G = tunneled MAC - Set if the packet includes a tunneled MAC + * H = tunneled VLAN - Same as VLAN, but for a tunneled header + * I = pkt_is_frag - Packet is fragmented (ipv4 or ipv6) + * J = ipv6_ext - The packet has routing or destination ipv6 extension in inner + * or outer ipv6 headers + * K = RoCE - UDP packet detected as RoCEv2 + * L = UDP_XSUM_0 - Set to 1 if L4 checksum is 0 in a UDP packet + * M = ESP - This is a ESP packet + * N = NAT_ESP - This is a ESP packet encapsulated in UDP NAT + */ +#define ICE_PKT_TUNNEL_MAC BIT(6) +#define ICE_PKT_TUNNEL_VLAN BIT(7) +#define ICE_PKT_TUNNEL_MASK (ICE_PKT_TUNNEL_MAC | ICE_PKT_TUNNEL_VLAN) + +/* MDID 22 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |A|B|C|D|E|F| G |H|I|J| K |L|M| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * A = fin - fin flag in tcp header + * B = sync - sync flag in tcp header + * C = rst - rst flag in tcp header + * D = psh - psh flag in tcp header + * E = ack - ack flag in tcp header + * F = urg - urg flag in tcp header + * G = tunnel type (3b) - Flags used to decode tunnel type: + * * b000 - not a VXLAN/Geneve/GRE tunnel + * * b001 - VXLAN-GPE + * * b010 - VXLAN (non-GPE) + * * b011 - Geneve + * * b100 - GRE (no key, no xsum) + * * b101 - GREK (key, no xsum) + * * b110 - GREC (no key, xsum) + * * b111 - GREKC (key, xsum) + * H = UDP_GRE - Packet is UDP (VXLAN or VLAN_GPE or Geneve or MPLSoUDP or GRE) + * tunnel + * I = OAM - VXLAN/Geneve/tunneled NSH packet with the OAM bit set + * J = tunneled NSH - Packet has NSHoGRE or NSHoUDP + * K = switch (2b) - Direction on switch + * * b00 - normal + * * b01 - TX force only LAN + * * b10 - TX disable LAN + * * b11 - direct to VSI + * L = swpe - Represents SWPE bit in TX command + * M = sw_cmd - Switch command + * + * MDID 23 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |A|B|C|D| R |E|F|R| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * A = MAC error - Produced by MAC according to L2 error conditions + * B = PPRS no offload - FIFO overflow in PPRS or any problematic condition in + * PPRS ANA + * C = abort - Set when malicious packet is detected + * D = partial analysis - ANA's analysing got cut in the middle + * (header > 504B etc.) + * E = FLM - Flow director hit indication + * F = FDLONG - Flow direector long bucket indication + * + */ +#define ICE_MDID_SIZE 2 +#define ICE_META_DATA_ID_HW 255 + +enum ice_hw_metadata_id { + ICE_SOURCE_PORT_MDID = 16, + ICE_PTYPE_MDID = 17, + ICE_PACKET_LENGTH_MDID = 18, + ICE_SOURCE_VSI_MDID = 19, + ICE_PKT_VLAN_MDID = 20, + ICE_PKT_TUNNEL_MDID = 21, + ICE_PKT_TCP_MDID = 22, + ICE_PKT_ERROR_MDID = 23, +}; + +enum ice_hw_metadata_offset { + ICE_SOURCE_PORT_MDID_OFFSET = ICE_MDID_SIZE * ICE_SOURCE_PORT_MDID, + ICE_PTYPE_MDID_OFFSET = ICE_MDID_SIZE * ICE_PTYPE_MDID, + ICE_PACKET_LENGTH_MDID_OFFSET = ICE_MDID_SIZE * ICE_PACKET_LENGTH_MDID, + ICE_SOURCE_VSI_MDID_OFFSET = ICE_MDID_SIZE * ICE_SOURCE_VSI_MDID, + ICE_PKT_VLAN_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_VLAN_MDID, + ICE_PKT_TUNNEL_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_TUNNEL_MDID, + ICE_PKT_TCP_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_TCP_MDID, + ICE_PKT_ERROR_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_ERROR_MDID, +}; + +enum ice_pkt_flags { + ICE_PKT_FLAGS_VLAN = 0, + ICE_PKT_FLAGS_TUNNEL = 1, + ICE_PKT_FLAGS_TCP = 2, + ICE_PKT_FLAGS_ERROR = 3, +}; + +struct ice_hw_metadata { + __be16 source_port; + __be16 ptype; + __be16 packet_length; + __be16 source_vsi; + __be16 flags[4]; +}; + union ice_prot_hdr { struct ice_ether_hdr eth_hdr; struct ice_ethtype_hdr ethertype; @@ -243,6 +419,7 @@ union ice_prot_hdr { struct ice_udp_gtp_hdr gtp_hdr; struct ice_pppoe_hdr pppoe_hdr; struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr; + struct ice_hw_metadata metadata; }; /* This is mapping table entry that maps every word within a given protocol diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index ac6f06f9a2ed0..d4b6c997141dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -911,7 +911,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) spin_unlock(&tx->lock); /* wait for potentially outstanding interrupt to complete */ - synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); + synchronize_irq(pf->oicr_irq.virq); ice_ptp_flush_tx_tracker(pf, tx); diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index fd1f8b0ad0ab4..e30e12321abd4 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -298,14 +298,6 @@ static int ice_repr_add(struct ice_vf *vf) if (!repr) return -ENOMEM; -#ifdef CONFIG_ICE_SWITCHDEV - repr->mac_rule = kzalloc(sizeof(*repr->mac_rule), GFP_KERNEL); - if (!repr->mac_rule) { - err = -ENOMEM; - goto err_alloc_rule; - } -#endif - repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); if (!repr->netdev) { err = -ENOMEM; @@ -351,11 +343,6 @@ static int ice_repr_add(struct ice_vf *vf) free_netdev(repr->netdev); repr->netdev = NULL; err_alloc: -#ifdef CONFIG_ICE_SWITCHDEV - kfree(repr->mac_rule); - repr->mac_rule = NULL; -err_alloc_rule: -#endif kfree(repr); vf->repr = NULL; return err; @@ -376,10 +363,6 @@ static void ice_repr_rem(struct ice_vf *vf) ice_devlink_destroy_vf_port(vf); free_netdev(vf->repr->netdev); vf->repr->netdev = NULL; -#ifdef CONFIG_ICE_SWITCHDEV - kfree(vf->repr->mac_rule); - vf->repr->mac_rule = NULL; -#endif kfree(vf->repr); vf->repr = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index 378a45bfa2564..9c2a6f496b3b9 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -13,9 +13,8 @@ struct ice_repr { struct net_device *netdev; struct metadata_dst *dst; #ifdef CONFIG_ICE_SWITCHDEV - /* info about slow path MAC rule */ - struct ice_rule_query_data *mac_rule; - u8 rule_added; + /* info about slow path rule */ + struct ice_rule_query_data sp_rule; #endif }; diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index f1dca59bd8449..2ea6d24977a68 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -135,18 +135,9 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) */ static int ice_sriov_free_msix_res(struct ice_pf *pf) { - struct ice_res_tracker *res; - if (!pf) return -EINVAL; - res = pf->irq_tracker; - if (!res) - return -EINVAL; - - /* give back irq_tracker resources used */ - WARN_ON(pf->sriov_base_vector < res->num_entries); - pf->sriov_base_vector = 0; return 0; @@ -409,29 +400,6 @@ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) q_vector->v_idx + 1; } -/** - * ice_get_max_valid_res_idx - Get the max valid resource index - * @res: pointer to the resource to find the max valid index for - * - * Start from the end of the ice_res_tracker and return right when we find the - * first res->list entry with the ICE_RES_VALID_BIT set. This function is only - * valid for SR-IOV because it is the only consumer that manipulates the - * res->end and this is always called when res->end is set to res->num_entries. - */ -static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) -{ - int i; - - if (!res) - return -EINVAL; - - for (i = res->num_entries - 1; i >= 0; i--) - if (res->list[i] & ICE_RES_VALID_BIT) - return i; - - return 0; -} - /** * ice_sriov_set_msix_res - Set any used MSIX resources * @pf: pointer to PF structure @@ -450,7 +418,7 @@ static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) { u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; - int vectors_used = pf->irq_tracker->num_entries; + int vectors_used = ice_get_max_used_msix_vector(pf); int sriov_base_vector; sriov_base_vector = total_vectors - num_msix_needed; @@ -490,7 +458,7 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) */ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) { - int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); + int vectors_used = ice_get_max_used_msix_vector(pf); u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; int msix_avail_per_vf, msix_avail_for_sriov; struct device *dev = ice_pf_to_dev(pf); @@ -501,12 +469,9 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) if (!num_vfs) return -EINVAL; - if (max_valid_res_idx < 0) - return -ENOSPC; - /* determine MSI-X resources per VF */ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - - pf->irq_tracker->num_entries; + vectors_used; msix_avail_per_vf = msix_avail_for_sriov / num_vfs; if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { num_msix_per_vf = ICE_NUM_VF_MSIX_MED; @@ -871,7 +836,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) int ret; /* Disable global interrupt 0 so we don't try to handle the VFLR. */ - wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), + wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); set_bit(ICE_OICR_INTR_DIS, pf->state); ice_flush(hw); @@ -1014,8 +979,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!num_vfs) { if (!pci_vfs_assigned(pdev)) { ice_free_vfs(pf); - if (pf->lag) - ice_enable_lag(pf->lag); return 0; } @@ -1027,8 +990,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) if (err) return err; - if (pf->lag) - ice_disable_lag(pf->lag); return num_vfs; } @@ -1171,7 +1132,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) if (!vf) return -EINVAL; - ret = ice_check_vf_ready_for_cfg(vf); + ret = ice_check_vf_ready_for_reset(vf); if (ret) goto out_put_vf; @@ -1286,7 +1247,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto out_put_vf; } - ret = ice_check_vf_ready_for_cfg(vf); + ret = ice_check_vf_ready_for_reset(vf); if (ret) goto out_put_vf; @@ -1340,7 +1301,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) return -EOPNOTSUPP; } - ret = ice_check_vf_ready_for_cfg(vf); + ret = ice_check_vf_ready_for_reset(vf); if (ret) goto out_put_vf; @@ -1653,7 +1614,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, if (!vf) return -EINVAL; - ret = ice_check_vf_ready_for_cfg(vf); + ret = ice_check_vf_ready_for_reset(vf); if (ret) goto out_put_vf; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 46b36851af460..2ea9e1ae55174 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -4540,6 +4540,11 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, return status; } +#define ICE_PROTOCOL_ENTRY(id, ...) { \ + .prot_type = id, \ + .offs = {__VA_ARGS__}, \ +} + /* This is mapping table entry that maps every word within a given protocol * structure to the real byte offset as per the specification of that * protocol header. @@ -4550,29 +4555,38 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, * structure is added to that union. */ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { - { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, - { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, - { ICE_ETYPE_OL, { 0 } }, - { ICE_ETYPE_IL, { 0 } }, - { ICE_VLAN_OFOS, { 2, 0 } }, - { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, - { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, - { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, - 26, 28, 30, 32, 34, 36, 38 } }, - { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, - 26, 28, 30, 32, 34, 36, 38 } }, - { ICE_TCP_IL, { 0, 2 } }, - { ICE_UDP_OF, { 0, 2 } }, - { ICE_UDP_ILOS, { 0, 2 } }, - { ICE_VXLAN, { 8, 10, 12, 14 } }, - { ICE_GENEVE, { 8, 10, 12, 14 } }, - { ICE_NVGRE, { 0, 2, 4, 6 } }, - { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } }, - { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } }, - { ICE_PPPOE, { 0, 2, 4, 6 } }, - { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } }, - { ICE_VLAN_EX, { 2, 0 } }, - { ICE_VLAN_IN, { 2, 0 } }, + ICE_PROTOCOL_ENTRY(ICE_MAC_OFOS, 0, 2, 4, 6, 8, 10, 12), + ICE_PROTOCOL_ENTRY(ICE_MAC_IL, 0, 2, 4, 6, 8, 10, 12), + ICE_PROTOCOL_ENTRY(ICE_ETYPE_OL, 0), + ICE_PROTOCOL_ENTRY(ICE_ETYPE_IL, 0), + ICE_PROTOCOL_ENTRY(ICE_VLAN_OFOS, 2, 0), + ICE_PROTOCOL_ENTRY(ICE_IPV4_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18), + ICE_PROTOCOL_ENTRY(ICE_IPV4_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18), + ICE_PROTOCOL_ENTRY(ICE_IPV6_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, + 20, 22, 24, 26, 28, 30, 32, 34, 36, 38), + ICE_PROTOCOL_ENTRY(ICE_IPV6_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 30, 32, 34, 36, 38), + ICE_PROTOCOL_ENTRY(ICE_TCP_IL, 0, 2), + ICE_PROTOCOL_ENTRY(ICE_UDP_OF, 0, 2), + ICE_PROTOCOL_ENTRY(ICE_UDP_ILOS, 0, 2), + ICE_PROTOCOL_ENTRY(ICE_VXLAN, 8, 10, 12, 14), + ICE_PROTOCOL_ENTRY(ICE_GENEVE, 8, 10, 12, 14), + ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6), + ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22), + ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14), + ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6), + ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10), + ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0), + ICE_PROTOCOL_ENTRY(ICE_VLAN_IN, 2, 0), + ICE_PROTOCOL_ENTRY(ICE_HW_METADATA, + ICE_SOURCE_PORT_MDID_OFFSET, + ICE_PTYPE_MDID_OFFSET, + ICE_PACKET_LENGTH_MDID_OFFSET, + ICE_SOURCE_VSI_MDID_OFFSET, + ICE_PKT_VLAN_MDID_OFFSET, + ICE_PKT_TUNNEL_MDID_OFFSET, + ICE_PKT_TCP_MDID_OFFSET, + ICE_PKT_ERROR_MDID_OFFSET), }; static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { @@ -4597,6 +4611,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_L2TPV3, ICE_L2TPV3_HW }, { ICE_VLAN_EX, ICE_VLAN_OF_HW }, { ICE_VLAN_IN, ICE_VLAN_OL_HW }, + { ICE_HW_METADATA, ICE_META_DATA_ID_HW }, }; /** @@ -5255,71 +5270,6 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, return status; } -/** - * ice_tun_type_match_word - determine if tun type needs a match mask - * @tun_type: tunnel type - * @mask: mask to be used for the tunnel - */ -static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) -{ - switch (tun_type) { - case ICE_SW_TUN_GENEVE: - case ICE_SW_TUN_VXLAN: - case ICE_SW_TUN_NVGRE: - case ICE_SW_TUN_GTPU: - case ICE_SW_TUN_GTPC: - *mask = ICE_TUN_FLAG_MASK; - return true; - - default: - *mask = 0; - return false; - } -} - -/** - * ice_add_special_words - Add words that are not protocols, such as metadata - * @rinfo: other information regarding the rule e.g. priority and action info - * @lkup_exts: lookup word structure - * @dvm_ena: is double VLAN mode enabled - */ -static int -ice_add_special_words(struct ice_adv_rule_info *rinfo, - struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena) -{ - u16 mask; - - /* If this is a tunneled packet, then add recipe index to match the - * tunnel bit in the packet metadata flags. - */ - if (ice_tun_type_match_word(rinfo->tun_type, &mask)) { - if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { - u8 word = lkup_exts->n_val_words++; - - lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; - lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; - lkup_exts->field_mask[word] = mask; - } else { - return -ENOSPC; - } - } - - if (rinfo->vlan_type != 0 && dvm_ena) { - if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { - u8 word = lkup_exts->n_val_words++; - - lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; - lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF; - lkup_exts->field_mask[word] = - ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK; - } else { - return -ENOSPC; - } - } - - return 0; -} - /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule * @hw: pointer to hardware structure * @rinfo: other information regarding the rule e.g. priority and action info @@ -5433,13 +5383,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_unroll; - /* Create any special protocol/offset pairs, such as looking at tunnel - * bits by extracting metadata - */ - status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw)); - if (status) - goto err_unroll; - /* Group match words into recipes using preferred recipe grouping * criteria. */ @@ -5725,6 +5668,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * was already checked when search for the dummy packet */ type = lkups[i].type; + /* metadata isn't present in the packet */ + if (type == ICE_HW_METADATA) + continue; + for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { if (type == offsets[j].type) { offset = offsets[j].offset; @@ -5860,16 +5807,21 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, /** * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type + * @hw: pointer to hw structure * @vlan_type: VLAN tag type * @pkt: dummy packet to fill in * @offsets: offset info for the dummy packet */ static int -ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt, +ice_fill_adv_packet_vlan(struct ice_hw *hw, u16 vlan_type, u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) { u16 i; + /* Check if there is something to do */ + if (!vlan_type || !ice_is_dvm_ena(hw)) + return 0; + /* Find VLAN header and insert VLAN TPID */ for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) { if (offsets[i].type == ICE_VLAN_OFOS || @@ -5888,6 +5840,15 @@ ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt, return -EIO; } +static bool ice_rules_equal(const struct ice_adv_rule_info *first, + const struct ice_adv_rule_info *second) +{ + return first->sw_act.flag == second->sw_act.flag && + first->tun_type == second->tun_type && + first->vlan_type == second->vlan_type && + first->src_vsi == second->src_vsi; +} + /** * ice_find_adv_rule_entry - Search a rule entry * @hw: pointer to the hardware structure @@ -5921,9 +5882,7 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, lkups_matched = false; break; } - if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && - rinfo->tun_type == list_itr->rule_info.tun_type && - rinfo->vlan_type == list_itr->rule_info.vlan_type && + if (ice_rules_equal(rinfo, &list_itr->rule_info) && lkups_matched) return list_itr; } @@ -6039,6 +5998,26 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, return status; } +void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup) +{ + lkup->type = ICE_HW_METADATA; + lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] = + cpu_to_be16(ICE_PKT_TUNNEL_MASK); +} + +void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup) +{ + lkup->type = ICE_HW_METADATA; + lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] = + cpu_to_be16(ICE_PKT_VLAN_MASK); +} + +void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup) +{ + lkup->type = ICE_HW_METADATA; + lkup->m_u.metadata.source_vsi = cpu_to_be16(ICE_MDID_SOURCE_VSI_MASK); +} + /** * ice_add_adv_rule - helper function to create an advanced switch rule * @hw: pointer to the hardware structure @@ -6120,7 +6099,10 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) rinfo->sw_act.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - if (rinfo->sw_act.flag & ICE_FLTR_TX) + + if (rinfo->src_vsi) + rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi); + else rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); @@ -6189,19 +6171,20 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, goto err_ice_add_adv_rule; } - /* set the rule LOOKUP type based on caller specified 'Rx' - * instead of hardcoding it to be either LOOKUP_TX/RX + /* If there is no matching criteria for direction there + * is only one difference between Rx and Tx: + * - get switch id base on VSI number from source field (Tx) + * - get switch id base on port number (Rx) * - * for 'Rx' set the source to be the port number - * for 'Tx' set the source to be the source HW VSI number (determined - * by caller) + * If matching on direction metadata is chose rule direction is + * extracted from type value set here. */ - if (rinfo->rx) { - s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); - s_rule->src = cpu_to_le16(hw->port_info->lport); - } else { + if (rinfo->sw_act.flag & ICE_FLTR_TX) { s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); s_rule->src = cpu_to_le16(rinfo->sw_act.src); + } else { + s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->src = cpu_to_le16(hw->port_info->lport); } s_rule->recipe_id = cpu_to_le16(rid); @@ -6211,22 +6194,16 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_ice_add_adv_rule; - if (rinfo->tun_type != ICE_NON_TUN && - rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { - status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, - s_rule->hdr_data, - profile->offsets); - if (status) - goto err_ice_add_adv_rule; - } + status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->hdr_data, + profile->offsets); + if (status) + goto err_ice_add_adv_rule; - if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) { - status = ice_fill_adv_packet_vlan(rinfo->vlan_type, - s_rule->hdr_data, - profile->offsets); - if (status) - goto err_ice_add_adv_rule; - } + status = ice_fill_adv_packet_vlan(hw, rinfo->vlan_type, + s_rule->hdr_data, + profile->offsets); + if (status) + goto err_ice_add_adv_rule; status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, @@ -6469,13 +6446,6 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, return -EIO; } - /* Create any special protocol/offset pairs, such as looking at tunnel - * bits by extracting metadata - */ - status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw)); - if (status) - return status; - rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 68d8e8a6a1896..c84b56fe84a5e 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -10,7 +10,6 @@ #define ICE_DFLT_VSI_INVAL 0xff #define ICE_FLTR_RX BIT(0) #define ICE_FLTR_TX BIT(1) -#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX) #define ICE_VSI_INVAL_ID 0xffff #define ICE_INVAL_Q_HANDLE 0xFFFF @@ -187,12 +186,13 @@ struct ice_adv_rule_flags_info { }; struct ice_adv_rule_info { + /* Store metadata values in rule info */ enum ice_sw_tunnel_type tun_type; - struct ice_sw_act_ctrl sw_act; - u32 priority; - u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */ - u16 fltr_rule_id; u16 vlan_type; + u16 fltr_rule_id; + u32 priority; + u16 src_vsi; + struct ice_sw_act_ctrl sw_act; struct ice_adv_rule_flags_info flags_info; }; @@ -342,6 +342,9 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id); /* Switch/bridge related commands */ +void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup); +void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup); +void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup); int ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index d1a31f236d26a..b54052ef6050d 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -54,6 +54,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) lkups_cnt++; + /* is VLAN TPID specified */ + if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID) + lkups_cnt++; + /* is CVLAN specified? */ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) lkups_cnt++; @@ -80,6 +84,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, ICE_TC_FLWR_FIELD_SRC_L4_PORT)) lkups_cnt++; + /* matching for tunneled packets in metadata */ + if (fltr->tunnel_type != TNL_LAST) + lkups_cnt++; + return lkups_cnt; } @@ -320,6 +328,10 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } + /* always fill matching on tunneled packets in metadata */ + ice_rule_add_tunnel_metadata(&list[i]); + i++; + return i; } @@ -390,10 +402,6 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, /* copy VLAN info */ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) { - vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid); - rule_info->vlan_type = - ice_check_supported_vlan_tpid(vlan_tpid); - if (flags & ICE_TC_FLWR_FIELD_CVLAN) list[i].type = ICE_VLAN_EX; else @@ -418,6 +426,15 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, i++; } + if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID) { + vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid); + rule_info->vlan_type = + ice_check_supported_vlan_tpid(vlan_tpid); + + ice_rule_add_vlan_metadata(&list[i]); + i++; + } + if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) { list[i].type = ICE_VLAN_IN; @@ -698,12 +715,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { rule_info.sw_act.flag |= ICE_FLTR_RX; rule_info.sw_act.src = hw->pf_id; - rule_info.rx = true; rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; } else { rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.src = vsi->idx; - rule_info.rx = false; rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; } @@ -910,7 +925,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, rule_info.sw_act.vsi_handle = dest_vsi->idx; rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; rule_info.sw_act.src = hw->pf_id; - rule_info.rx = true; dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n", tc_fltr->action.fwd.tc.tc_class, rule_info.sw_act.vsi_handle, lkups_cnt); @@ -921,7 +935,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, rule_info.sw_act.vsi_handle = dest_vsi->idx; rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE; rule_info.sw_act.src = hw->pf_id; - rule_info.rx = true; dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n", tc_fltr->action.fwd.q.queue, tc_fltr->action.fwd.q.hw_queue, lkups_cnt); @@ -929,7 +942,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, case ICE_DROP_PACKET: rule_info.sw_act.flag |= ICE_FLTR_RX; rule_info.sw_act.src = hw->pf_id; - rule_info.rx = true; rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; break; default: @@ -1460,8 +1472,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, VLAN_PRIO_MASK); } - if (match.mask->vlan_tpid) + if (match.mask->vlan_tpid) { headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid; + fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_TPID; + } } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index 8d5e22ac7023c..8bbc1a62bdb1c 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -33,6 +33,7 @@ #define ICE_TC_FLWR_FIELD_L2TPV3_SESSID BIT(26) #define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27) #define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28) +#define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29) #define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 4fcf2d07eb853..059bd911c51d8 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1664,8 +1664,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; - td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> - ICE_TX_FLAGS_VLAN_S; + td_tag = first->vid; } dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); @@ -1998,7 +1997,7 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) * VLAN offloads exclusively so we only care about the VLAN ID here */ if (skb_vlan_tag_present(skb)) { - first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; + first->vid = skb_vlan_tag_get(skb); if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; else @@ -2388,8 +2387,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | (ICE_TX_CTX_DESC_IL2TAG2 << ICE_TXD_CTX_QW1_CMD_S)); - offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> - ICE_TX_FLAGS_VLAN_S; + offload.cd_l2tag2 = first->vid; } /* set up TSO offload */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index fff0efe28373a..166413fc33f48 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -127,10 +127,6 @@ static inline int ice_skb_pad(void) #define ICE_TX_FLAGS_IPV6 BIT(6) #define ICE_TX_FLAGS_TUNNEL BIT(7) #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8) -#define ICE_TX_FLAGS_VLAN_M 0xffff0000 -#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000 -#define ICE_TX_FLAGS_VLAN_PR_S 29 -#define ICE_TX_FLAGS_VLAN_S 16 #define ICE_XDP_PASS 0 #define ICE_XDP_CONSUMED BIT(0) @@ -182,8 +178,9 @@ struct ice_tx_buf { unsigned int gso_segs; unsigned int nr_frags; /* used for mbuf XDP */ }; - u32 type:16; /* &ice_tx_buf_type */ - u32 tx_flags:16; + u32 tx_flags:12; + u32 type:4; /* &ice_tx_buf_type */ + u32 vid:16; DEFINE_DMA_UNMAP_LEN(len); DEFINE_DMA_UNMAP_ADDR(dma); }; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 89fd6982df093..b26ce4425f455 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -185,6 +185,25 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf) return 0; } +/** + * ice_check_vf_ready_for_reset - check if VF is ready to be reset + * @vf: VF to check if it's ready to be reset + * + * The purpose of this function is to ensure that the VF is not in reset, + * disabled, and is both initialized and active, thus enabling us to safely + * initialize another reset. + */ +int ice_check_vf_ready_for_reset(struct ice_vf *vf) +{ + int ret; + + ret = ice_check_vf_ready_for_cfg(vf); + if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + ret = -EAGAIN; + + return ret; +} + /** * ice_trigger_vf_reset - Reset a VF on HW * @vf: pointer to the VF structure @@ -670,8 +689,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) */ ice_vf_clear_all_promisc_modes(vf, vsi); - ice_eswitch_del_vf_mac_rule(vf); - ice_vf_fdir_exit(vf); ice_vf_fdir_init(vf); /* clean VF control VSI when resetting VF since it should be setup @@ -697,7 +714,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) } ice_eswitch_update_repr(vsi); - ice_eswitch_replay_vf_mac_rule(vf); /* if the VF has been reset allow it to come up again */ ice_mbx_clear_malvf(&vf->mbx_info); @@ -1310,3 +1326,35 @@ void ice_vf_set_initialized(struct ice_vf *vf) set_bit(ICE_VF_STATE_INIT, vf->vf_states); memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); } + +/** + * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer + * @pf: the PF private structure + * @vsi: pointer to the VSI + * + * Return first found VF control VSI other than the vsi + * passed by parameter. This function is used to determine + * whether new resources have to be allocated for control VSI + * or they can be shared with existing one. + * + * Return found VF control VSI pointer other itself. Return + * NULL Otherwise. + * + */ +struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) +{ + struct ice_vsi *ctrl_vsi = NULL; + struct ice_vf *vf; + unsigned int bkt; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + break; + } + } + + rcu_read_unlock(); + return ctrl_vsi; +} diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index e3cda6fb71ab1..67172fdd9bc27 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -215,6 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf); struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); bool ice_is_vf_disabled(struct ice_vf *vf); int ice_check_vf_ready_for_cfg(struct ice_vf *vf); +int ice_check_vf_ready_for_reset(struct ice_vf *vf); void ice_set_vf_state_dis(struct ice_vf *vf); bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf); void @@ -226,6 +227,7 @@ int ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m); int ice_reset_vf(struct ice_vf *vf, u32 flags); void ice_reset_all_vfs(struct ice_pf *pf); +struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi); #else /* CONFIG_PCI_IOV */ static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) { @@ -290,6 +292,12 @@ static inline int ice_reset_vf(struct ice_vf *vf, u32 flags) static inline void ice_reset_all_vfs(struct ice_pf *pf) { } + +static inline struct ice_vsi * +ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) +{ + return NULL; +} #endif /* !CONFIG_PCI_IOV */ #endif /* _ICE_VF_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 97243c616d5d6..efbc2968a7bf6 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -3730,7 +3730,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) for (i = 0; i < al->num_elements; i++) { u8 *mac_addr = al->list[i].addr; - int result; if (!is_unicast_ether_addr(mac_addr) || ether_addr_equal(mac_addr, vf->hw_lan_addr)) @@ -3742,13 +3741,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) goto handle_mac_exit; } - result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr); - if (result) { - dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n", - mac_addr, vf->vf_id, result); - goto handle_mac_exit; - } - ice_vfhw_mac_add(vf, &al->list[i]); vf->num_mac++; break; @@ -3955,6 +3947,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: + clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); ops->reset_vf(vf); break; case VIRTCHNL_OP_ADD_ETH_ADDR: diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c index bcda2e0048073..1279c1ffe31c3 100644 --- a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c +++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c @@ -219,7 +219,7 @@ static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = { .rid = ICE_SW_LKUP_VLAN, .fv_idx = ICE_PKT_FLAGS_0_TO_15_FV_IDX, .ignore_valid = false, - .mask = ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK, + .mask = ICE_PKT_VLAN_MASK, .mask_valid = true, .lkup_idx = ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX, }, diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index d1e489da7363f..a7fe2b4ce6552 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -90,7 +90,6 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, { struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - int base = vsi->base_vector; u16 reg; u32 val; @@ -103,11 +102,9 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, wr32(hw, QINT_RQCTL(reg), val); if (q_vector) { - u16 v_idx = q_vector->v_idx; - wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0); ice_flush(hw); - synchronize_irq(pf->msix_entries[v_idx + base].vector); + synchronize_irq(q_vector->irq.virq); } } diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 205d577bdbbaa..caf91c6f52b4d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -426,7 +426,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value) static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) { u32 hash_value, hash_mask; - u8 bit_shift = 0; + u8 bit_shift = 1; /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; @@ -434,7 +434,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) /* For a mc_filter_type of 0, bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ - while (hash_mask >> bit_shift != 0xFF) + while (hash_mask >> bit_shift != 0xFF && bit_shift < 4) bit_shift++; /* The portion of the address that is used for the hash table diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 58872a4c25401..c5cdb880774d4 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -183,11 +183,13 @@ static int igb_resume(struct device *); static int igb_runtime_suspend(struct device *dev); static int igb_runtime_resume(struct device *dev); static int igb_runtime_idle(struct device *dev); +#ifdef CONFIG_PM static const struct dev_pm_ops igb_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, igb_runtime_idle) }; +#endif static void igb_shutdown(struct pci_dev *); static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); #ifdef CONFIG_IGB_DCA diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 38d113b48111a..c5ef1edcf548a 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2411,6 +2411,8 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) nq = txring_txq(ring); __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); res = igc_xdp_init_tx_descriptor(ring, xdpf); __netif_tx_unlock(nq); return res; @@ -2829,6 +2831,9 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + budget = igc_desc_unused(ring); while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { @@ -6354,6 +6359,9 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + drops = 0; for (i = 0; i < num_frames; i++) { int err; diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index 993ac180a5db8..a32d85d6f599f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -32,6 +32,7 @@ config OCTEONTX2_PF tristate "Marvell OcteonTX2 NIC Physical Function driver" select OCTEONTX2_MBOX select NET_DEVLINK + select PAGE_POOL depends on (64BIT && COMPILE_TEST) || ARM64 select DIMLIB depends on PCI diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index b59532cf53cef..6e2fb24be8c1e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -426,13 +426,16 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, struct mcs_secy_plcy_write_req *req; struct mbox *mbox = &pfvf->mbox; struct macsec_tx_sc *sw_tx_sc; - /* Insert SecTag after 12 bytes (DA+SA)*/ - u8 tag_offset = 12; u8 sectag_tci = 0; + u8 tag_offset; u64 policy; u8 cipher; int ret; + /* Insert SecTag after 12 bytes (DA+SA) or 16 bytes + * if VLAN tag needs to be sent in clear text. + */ + tag_offset = txsc->vlan_dev ? 16 : 12; sw_tx_sc = &secy->tx_sc; mutex_lock(&mbox->lock); @@ -1163,6 +1166,7 @@ static int cn10k_mdo_add_secy(struct macsec_context *ctx) txsc->encoding_sa = secy->tx_sc.encoding_sa; txsc->last_validate_frames = secy->validate_frames; txsc->last_replay_protect = secy->replay_protect; + txsc->vlan_dev = is_vlan_dev(ctx->netdev); list_add(&txsc->entry, &cfg->txsc_list); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index f9286648e45cd..a79cb680bb23a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -518,11 +518,32 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) (pfvf->hw.cq_ecount_wait - 1)); } +static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, + dma_addr_t *dma) +{ + unsigned int offset = 0; + struct page *page; + size_t sz; + + sz = SKB_DATA_ALIGN(pool->rbsize); + sz = ALIGN(sz, OTX2_ALIGN); + + page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC); + if (unlikely(!page)) + return -ENOMEM; + + *dma = page_pool_get_dma_addr(page) + offset; + return 0; +} + static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, dma_addr_t *dma) { u8 *buf; + if (pool->page_pool) + return otx2_alloc_pool_buf(pfvf, pool, dma); + buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN); if (unlikely(!buf)) return -ENOMEM; @@ -1205,10 +1226,31 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf) } } +void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, + u64 iova, int size) +{ + struct page *page; + u64 pa; + + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); + page = virt_to_head_page(phys_to_virt(pa)); + + if (pool->page_pool) { + page_pool_put_full_page(pool->page_pool, page, true); + } else { + dma_unmap_page_attrs(pfvf->dev, iova, size, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + + put_page(page); + } +} + void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) { int pool_id, pool_start = 0, pool_end = 0, size = 0; - u64 iova, pa; + struct otx2_pool *pool; + u64 iova; if (type == AURA_NIX_SQ) { pool_start = otx2_get_pool_idx(pfvf, type, 0); @@ -1224,15 +1266,13 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) /* Free SQB and RQB pointers from the aura pool */ for (pool_id = pool_start; pool_id < pool_end; pool_id++) { iova = otx2_aura_allocptr(pfvf, pool_id); + pool = &pfvf->qset.pool[pool_id]; while (iova) { if (type == AURA_NIX_RQ) iova -= OTX2_HEAD_ROOM; - pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); - dma_unmap_page_attrs(pfvf->dev, iova, size, - DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - put_page(virt_to_page(phys_to_virt(pa))); + otx2_free_bufs(pfvf, pool, iova, size); + iova = otx2_aura_allocptr(pfvf, pool_id); } } @@ -1250,6 +1290,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf) pool = &pfvf->qset.pool[pool_id]; qmem_free(pfvf->dev, pool->stack); qmem_free(pfvf->dev, pool->fc_addr); + page_pool_destroy(pool->page_pool); + pool->page_pool = NULL; } devm_kfree(pfvf->dev, pfvf->qset.pool); pfvf->qset.pool = NULL; @@ -1333,8 +1375,9 @@ int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, } int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, - int stack_pages, int numptrs, int buf_size) + int stack_pages, int numptrs, int buf_size, int type) { + struct page_pool_params pp_params = { 0 }; struct npa_aq_enq_req *aq; struct otx2_pool *pool; int err; @@ -1378,6 +1421,22 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, aq->ctype = NPA_AQ_CTYPE_POOL; aq->op = NPA_AQ_INSTOP_INIT; + if (type != AURA_NIX_RQ) { + pool->page_pool = NULL; + return 0; + } + + pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP; + pp_params.pool_size = numptrs; + pp_params.nid = NUMA_NO_NODE; + pp_params.dev = pfvf->dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + pool->page_pool = page_pool_create(&pp_params); + if (IS_ERR(pool->page_pool)) { + netdev_err(pfvf->netdev, "Creation of page pool failed\n"); + return PTR_ERR(pool->page_pool); + } + return 0; } @@ -1412,7 +1471,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) /* Initialize pool context */ err = otx2_pool_init(pfvf, pool_id, stack_pages, - num_sqbs, hw->sqb_size); + num_sqbs, hw->sqb_size, AURA_NIX_SQ); if (err) goto fail; } @@ -1475,7 +1534,7 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) } for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { err = otx2_pool_init(pfvf, pool_id, stack_pages, - num_ptrs, pfvf->rbsize); + num_ptrs, pfvf->rbsize, AURA_NIX_RQ); if (err) goto fail; } @@ -1659,7 +1718,6 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) req->bpid_per_chan = 0; #endif - return otx2_sync_mbox_msg(&pfvf->mbox); } EXPORT_SYMBOL(otx2_nix_config_bp); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 0f2b2a9012251..a9ed15d1793a7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -419,6 +419,7 @@ struct cn10k_mcs_txsc { u8 encoding_sa; u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; ssci_t ssci[CN10K_MCS_SA_PER_SC]; + bool vlan_dev; /* macsec running on VLAN ? */ }; struct cn10k_mcs_rxsc { @@ -975,7 +976,7 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); -void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); @@ -983,7 +984,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma); int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, - int stack_pages, int numptrs, int buf_size); + int stack_pages, int numptrs, int buf_size, int type); int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int pool_id, int numptrs); @@ -1053,6 +1054,8 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf); int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features); int otx2_smq_flush(struct otx2_nic *pfvf, int smq); +void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, + u64 iova, int size); /* tc support */ int otx2_init_tc(struct otx2_nic *nic); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e1883c3edda3b..db3fcab1c8cd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1555,7 +1555,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) struct nix_lf_free_req *free_req; struct mbox *mbox = &pf->mbox; struct otx2_cq_queue *cq; + struct otx2_pool *pool; struct msg_req *req; + int pool_id; int qidx; /* Ensure all SQE are processed */ @@ -1584,7 +1586,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) for (qidx = 0; qidx < qset->cq_cnt; qidx++) { cq = &qset->cq[qidx]; if (cq->cq_type == CQ_RX) - otx2_cleanup_rx_cqes(pf, cq); + otx2_cleanup_rx_cqes(pf, cq, qidx); else otx2_cleanup_tx_cqes(pf, cq); } @@ -1594,6 +1596,13 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) /* Free RQ buffer pointers*/ otx2_free_aura_ptr(pf, AURA_NIX_RQ); + for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) { + pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx); + pool = &pf->qset.pool[pool_id]; + page_pool_destroy(pool->page_pool); + pool->page_pool = NULL; + } + otx2_free_cq_res(pf); /* Free all ingress bandwidth profiles allocated */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index e288f46b23a8d..e369baf115301 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -217,9 +217,6 @@ static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, va - page_address(page) + off, len - off, pfvf->rbsize); - - otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, - pfvf->rbsize, DMA_FROM_DEVICE); return true; } @@ -382,6 +379,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, if (pfvf->netdev->features & NETIF_F_RXCSUM) skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_mark_for_recycle(skb); + napi_gro_frags(napi); } @@ -657,9 +656,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, htons(ext->lso_sb - skb_network_offset(skb)); } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { ext->lso_format = pfvf->hw.lso_tsov6_idx; - - ipv6_hdr(skb)->payload_len = - htons(ext->lso_sb - skb_network_offset(skb)); + ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb)); } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { __be16 l3_proto = vlan_get_protocol(skb); struct udphdr *udph = udp_hdr(skb); @@ -1186,11 +1183,13 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, } EXPORT_SYMBOL(otx2_sq_append_skb); -void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx) { struct nix_cqe_rx_s *cqe; + struct otx2_pool *pool; int processed_cqe = 0; - u64 iova, pa; + u16 pool_id; + u64 iova; if (pfvf->xdp_prog) xdp_rxq_info_unreg(&cq->xdp_rxq); @@ -1198,6 +1197,9 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) return; + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); + pool = &pfvf->qset.pool[pool_id]; + while (cq->pend_cqe) { cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); processed_cqe++; @@ -1210,9 +1212,8 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) continue; } iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; - pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); - otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); - put_page(virt_to_page(phys_to_virt(pa))); + + otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize); } /* Free CQEs to HW */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 7ab6db9a986fa..b5d689eeff80b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -118,6 +118,7 @@ struct otx2_cq_poll { struct otx2_pool { struct qmem *stack; struct qmem *fc_addr; + struct page_pool *page_pool; u16 rbsize; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c index d96ed29c1567f..9d887bfc31089 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c @@ -63,7 +63,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx) /* Initialize pool context */ err = otx2_pool_init(pfvf, pool_id, stack_pages, - num_sqbs, hw->sqb_size); + num_sqbs, hw->sqb_size, AURA_NIX_SQ); if (err) goto aura_free; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index a75fd072082c6..834c644b67db5 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -3269,18 +3269,14 @@ static int mtk_open(struct net_device *dev) eth->dsa_meta[i] = md_dst; } } else { - /* Hardware special tag parsing needs to be disabled if at least - * one MAC does not use DSA. + /* Hardware DSA untagging and VLAN RX offloading need to be + * disabled if at least one MAC does not use DSA. */ u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); val &= ~MTK_CDMP_STAG_EN; mtk_w32(eth, val, MTK_CDMP_IG_CTRL); - val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); - val &= ~MTK_CDMQ_STAG_EN; - mtk_w32(eth, val, MTK_CDMQ_IG_CTRL); - mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 277738c50c56e..28c435ce98d8c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1374,16 +1374,13 @@ static int mlx4_mf_bond(struct mlx4_dev *dev) int nvfs; struct mlx4_slaves_pport slaves_port1; struct mlx4_slaves_pport slaves_port2; - DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); - bitmap_and(slaves_port_1_2, - slaves_port1.slaves, slaves_port2.slaves, - dev->persist->num_vfs + 1); /* only single port vfs are allowed */ - if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { + if (bitmap_weight_and(slaves_port1.slaves, slaves_port2.slaves, + dev->persist->num_vfs + 1) > 1) { mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index d53de39539a8f..d532883b42d7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, u32 syndrome, int err) { + const char *namep = mlx5_command_str(opcode); struct mlx5_cmd_stats *stats; - if (!err) + if (!err || !(strcmp(namep, "unknown command opcode"))) return; stats = &dev->cmd.stats[opcode]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 4b607785d694c..bfaec67abf0db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -162,9 +162,8 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, return -EOPNOTSUPP; } - if (pci_num_vf(pdev)) { + if (mlx5_core_is_pf(dev) && pci_num_vf(pdev)) NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable"); - } switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: @@ -464,27 +463,6 @@ static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id, ctx->val.vbool = mlx5_lag_is_mpesw(dev); return 0; } - -static int mlx5_devlink_esw_multiport_validate(struct devlink *devlink, u32 id, - union devlink_param_value val, - struct netlink_ext_ack *extack) -{ - struct mlx5_core_dev *dev = devlink_priv(devlink); - - if (!MLX5_ESWITCH_MANAGER(dev)) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported"); - return -EOPNOTSUPP; - } - - if (mlx5_eswitch_mode(dev) != MLX5_ESWITCH_OFFLOADS) { - NL_SET_ERR_MSG_MOD(extack, - "E-Switch must be in switchdev mode"); - return -EBUSY; - } - - return 0; -} - #endif static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id, @@ -563,7 +541,7 @@ static const struct devlink_param mlx5_devlink_params[] = { BIT(DEVLINK_PARAM_CMODE_RUNTIME), mlx5_devlink_esw_multiport_get, mlx5_devlink_esw_multiport_set, - mlx5_devlink_esw_multiport_validate), + NULL), #endif DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL, mlx5_devlink_eq_depth_validate), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index eb5abd0e55d90..3cbebfba582bd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) /* ensure cq space is freed before enabling more cqes */ wmb(); + mlx5e_txqsq_wake(&ptpsq->txqsq); + return work_done == budget; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 20c2d2ecaf935..6a052c6cfc158 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -1369,11 +1369,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; list_for_each_entry(flow, encap_flows, tmp_list) { - struct mlx5_flow_attr *attr = flow->attr; struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_attr *attr; if (!mlx5e_is_offloaded_flow(flow)) continue; + + attr = mlx5e_tc_get_encap_attr(flow); esw_attr = attr->esw_attr; if (flow_flag_test(flow, SLOW)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 47381e949f1f5..879d698b61193 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) return pi; } +void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq); + static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 33bfe4d7338be..934b0d5ce1b3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -283,7 +283,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs, if (IS_ERR(*rule_p)) { err = PTR_ERR(*rule_p); *rule_p = NULL; - fs_err(fs, "%s: add rule failed\n", __func__); + fs_err(fs, "add rule failed\n"); } return err; @@ -395,8 +395,7 @@ int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num if (IS_ERR(rule)) { err = PTR_ERR(rule); fs->vlan->trap_rule = NULL; - fs_err(fs, "%s: add VLAN trap rule failed, err %d\n", - __func__, err); + fs_err(fs, "add VLAN trap rule failed, err %d\n", err); return err; } fs->vlan->trap_rule = rule; @@ -421,8 +420,7 @@ int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num) if (IS_ERR(rule)) { err = PTR_ERR(rule); fs->l2.trap_rule = NULL; - fs_err(fs, "%s: add MAC trap rule failed, err %d\n", - __func__, err); + fs_err(fs, "add MAC trap rule failed, err %d\n", err); return err; } fs->l2.trap_rule = rule; @@ -763,7 +761,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs) if (IS_ERR(*rule_p)) { err = PTR_ERR(*rule_p); *rule_p = NULL; - fs_err(fs, "%s: add promiscuous rule failed\n", __func__); + fs_err(fs, "add promiscuous rule failed\n"); } kvfree(spec); return err; @@ -995,7 +993,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs, ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(ai->rule)) { - fs_err(fs, "%s: add l2 rule(mac:%pM) failed\n", __func__, mv_dmac); + fs_err(fs, "add l2 rule(mac:%pM) failed\n", mv_dmac); err = PTR_ERR(ai->rule); ai->rule = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 728b82ce4031a..e95414ef1f040 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1665,11 +1665,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) { struct mlx5e_priv *out_priv, *route_priv; - struct mlx5_devcom *devcom = NULL; struct mlx5_core_dev *route_mdev; struct mlx5_eswitch *esw; u16 vhca_id; - int err; out_priv = netdev_priv(out_dev); esw = out_priv->mdev->priv.eswitch; @@ -1678,6 +1676,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); if (mlx5_lag_is_active(out_priv->mdev)) { + struct mlx5_devcom *devcom; + int err; + /* In lag case we may get devices from different eswitch instances. * If we failed to get vport num, it means, mostly, that we on the wrong * eswitch. @@ -1686,16 +1687,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro if (err != -ENOENT) return err; + rcu_read_lock(); devcom = out_priv->mdev->priv.devcom; - esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); - if (!esw) - return -ENODEV; + esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV; + rcu_read_unlock(); + + return err; } - err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); - if (devcom) - mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); - return err; + return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); } static int @@ -5301,6 +5302,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) goto err_action_counter; } + mlx5_esw_offloads_devcom_init(esw); + return 0; err_action_counter: @@ -5329,7 +5332,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) priv = netdev_priv(rpriv->netdev); esw = priv->mdev->priv.eswitch; - mlx5e_tc_clean_fdb_peer_flows(esw); + mlx5_esw_offloads_devcom_cleanup(esw); mlx5e_tc_tun_cleanup(uplink_priv->encap); @@ -5643,22 +5646,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb) 0, NULL); } +static struct mapping_ctx * +mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv) +{ + struct mlx5e_tc_table *tc; + struct mlx5_eswitch *esw; + struct mapping_ctx *ctx; + + if (is_mdev_switchdev_mode(priv->mdev)) { + esw = priv->mdev->priv.eswitch; + ctx = esw->offloads.reg_c0_obj_pool; + } else { + tc = mlx5e_fs_get_tc(priv->fs); + ctx = tc->mapping; + } + + return ctx; +} + int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, u64 act_miss_cookie, u32 *act_miss_mapping) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_mapped_obj mapped_obj = {}; + struct mlx5_eswitch *esw; struct mapping_ctx *ctx; int err; - ctx = esw->offloads.reg_c0_obj_pool; - + ctx = mlx5e_get_priv_obj_mapping(priv); mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS; mapped_obj.act_miss_cookie = act_miss_cookie; err = mapping_add(ctx, &mapped_obj, act_miss_mapping); if (err) return err; + if (!is_mdev_switchdev_mode(priv->mdev)) + return 0; + + esw = priv->mdev->priv.eswitch; attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping); if (IS_ERR(attr->act_id_restore_rule)) goto err_rule; @@ -5673,10 +5697,9 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, u32 act_miss_mapping) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mapping_ctx *ctx; + struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv); - ctx = esw->offloads.reg_c0_obj_pool; - mlx5_del_flow_rules(attr->act_id_restore_rule); + if (is_mdev_switchdev_mode(priv->mdev)) + mlx5_del_flow_rules(attr->act_id_restore_rule); mapping_remove(ctx, act_miss_mapping); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index df5e780e8e6a6..c7eb6b238c2ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -762,6 +762,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t } } +void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq) +{ + if (netif_tx_queue_stopped(sq->txq) && + mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && + mlx5e_ptpsq_fifo_has_room(sq) && + !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { + netif_tx_wake_queue(sq->txq); + sq->stats->wake++; + } +} + bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_sq_stats *stats; @@ -861,13 +872,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) netdev_tx_completed_queue(sq->txq, npkts, nbytes); - if (netif_tx_queue_stopped(sq->txq) && - mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && - mlx5e_ptpsq_fifo_has_room(sq) && - !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { - netif_tx_wake_queue(sq->txq); - stats->wake++; - } + mlx5e_txqsq_wake(sq); return (i == MLX5E_TX_CQ_POLL_BUDGET); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index a50bfda18e96d..fbb2d963fb7e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -161,20 +161,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) } } + /* budget=0 means we may be in IRQ context, do as little as possible */ + if (unlikely(!budget)) + goto out; + busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); if (c->xdp) busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq); - if (likely(budget)) { /* budget=0 means: don't poll rx rings */ - if (xsk_open) - work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget); + if (xsk_open) + work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget); - if (likely(budget - work_done)) - work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done); + if (likely(budget - work_done)) + work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done); - busy |= work_done == budget; - } + busy |= work_done == budget; mlx5e_poll_ico_cq(&c->icosq.cq); if (mlx5e_poll_ico_cq(&c->async_icosq.cq)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 1c35d721a31d7..fe698c79616c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -1104,7 +1104,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) struct mlx5_eq_table *table = dev->priv.eq_table; mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ - mlx5_irq_table_destroy(dev); + mlx5_irq_table_free_irqs(dev); mutex_unlock(&table->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c index 45b839116212d..d599e50af346b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c @@ -35,7 +35,8 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, } ft_attr.max_fte = size; - ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT; + if (vport_num || mlx5_core_is_ecpf(esw->dev)) + ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT; acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport_num); if (IS_ERR(acl)) { err = PTR_ERR(acl); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 901c53751b0aa..31956cd9d1bb9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -92,7 +92,7 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) { struct mlx5_vport *vport; - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) + if (!esw) return ERR_PTR(-EPERM); vport = xa_load(&esw->vports, vport_num); @@ -113,7 +113,8 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); - MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + if (vport || mlx5_core_is_ecpf(dev)) + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); @@ -309,11 +310,12 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) fdb_add: /* SRIOV is enabled: Forward UC MAC to vport */ - if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) + if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) { vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); - esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", - vport, mac, vaddr->flow_rule); + esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", + vport, mac, vaddr->flow_rule); + } return 0; } @@ -710,6 +712,9 @@ void esw_vport_change_handle_locked(struct mlx5_vport *vport) struct mlx5_eswitch *esw = dev->priv.eswitch; u8 mac[ETH_ALEN]; + if (!MLX5_CAP_GEN(dev, log_max_l2_table)) + return; + mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac); esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", vport->vport, mac); @@ -946,7 +951,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) vport->enabled = false; /* Disable events from this vport */ - arm_vport_context_events_cmd(esw->dev, vport->vport, 0); + if (MLX5_CAP_GEN(esw->dev, log_max_l2_table)) + arm_vport_context_events_cmd(esw->dev, vport->vport, 0); if (!mlx5_esw_is_manager_vport(esw, vport->vport) && MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) @@ -1616,7 +1622,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) struct mlx5_eswitch *esw; int err; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_VPORT_MANAGER(dev) && !MLX5_ESWITCH_MANAGER(dev)) return 0; esw = kzalloc(sizeof(*esw), GFP_KERNEL); @@ -1686,7 +1692,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) + if (!esw) return; esw_info(esw->dev, "cleanup\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 1a042c9817131..280dc71b032c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -342,6 +342,7 @@ struct mlx5_eswitch { u32 large_group_num; } params; struct blocking_notifier_head n_head; + bool paired[MLX5_MAX_PORTS]; }; void esw_offloads_disable(struct mlx5_eswitch *esw); @@ -369,6 +370,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); void mlx5_eswitch_disable(struct mlx5_eswitch *esw); +void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw); +void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u16 vport, const u8 *mac); int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, @@ -683,6 +686,14 @@ mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr struct mlx5_flow_handle * esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); +void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw, + u32 *flow_group_in, + int match_params); + +void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw, + u16 vport, + struct mlx5_flow_spec *spec); + int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num); void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num); @@ -767,6 +778,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} +static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {} +static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 69215ffb99995..7a65dcf01dba0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -838,6 +838,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, struct mlx5_flow_handle *flow_rule; struct mlx5_flow_spec *spec; void *misc; + u16 vport; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { @@ -847,20 +848,43 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); - /* source vport is the esw manager */ - MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport); - if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) - MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, - MLX5_CAP_GEN(from_esw->dev, vhca_id)); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); - MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); - if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) - MLX5_SET_TO_ONES(fte_match_set_misc, misc, - source_eswitch_owner_vhca_id); spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + + /* source vport is the esw manager */ + vport = from_esw->manager_vport; + + if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) { + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport)); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_mask()); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + } else { + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, vport); + + if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) + MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, + MLX5_CAP_GEN(from_esw->dev, vhca_id)); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) + MLX5_SET_TO_ONES(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + } + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport.num = rep->vport; dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); @@ -1269,8 +1293,10 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) #define MAX_PF_SQ 256 #define MAX_SQ_NVPORTS 32 -static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, - u32 *flow_group_in) +void +mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw, + u32 *flow_group_in, + int match_params) { void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, @@ -1279,7 +1305,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_MISC_PARAMETERS_2); + MLX5_MATCH_MISC_PARAMETERS_2 | match_params); MLX5_SET(fte_match_param, match_criteria, misc_parameters_2.metadata_reg_c_0, @@ -1287,7 +1313,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, } else { MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_MISC_PARAMETERS); + MLX5_MATCH_MISC_PARAMETERS | match_params); MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); @@ -1463,14 +1489,13 @@ esw_create_send_to_vport_group(struct mlx5_eswitch *esw, memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_MISC_PARAMETERS); + mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { + + if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && + MLX5_CAP_ESW(esw->dev, merged_eswitch)) { MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_eswitch_owner_vhca_id); MLX5_SET(create_flow_group_in, flow_group_in, @@ -1558,7 +1583,7 @@ esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw, memset(flow_group_in, 0, inlen); - esw_set_flow_group_source_port(esw, flow_group_in); + mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0); if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { match_criteria = MLX5_ADDR_OF(create_flow_group_in, @@ -1845,7 +1870,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) return -ENOMEM; /* create vport rx group */ - esw_set_flow_group_source_port(esw, flow_group_in); + mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); @@ -1915,21 +1940,13 @@ static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw) mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group); } -struct mlx5_flow_handle * -mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, - struct mlx5_flow_destination *dest) +void +mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw, + u16 vport, + struct mlx5_flow_spec *spec) { - struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_handle *flow_rule; - struct mlx5_flow_spec *spec; void *misc; - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) { - flow_rule = ERR_PTR(-ENOMEM); - goto out; - } - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, @@ -1949,6 +1966,23 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; } +} + +struct mlx5_flow_handle * +mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_spec *spec; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) { + flow_rule = ERR_PTR(-ENOMEM); + goto out; + } + + mlx5_esw_set_spec_source_port(esw, vport, spec); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, @@ -2742,6 +2776,9 @@ static int mlx5_esw_offloads_devcom_event(int event, mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) break; + if (esw->paired[mlx5_get_dev_index(peer_esw->dev)]) + break; + err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); if (err) goto err_out; @@ -2753,14 +2790,18 @@ static int mlx5_esw_offloads_devcom_event(int event, if (err) goto err_pair; + esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true; + peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true; mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); break; case ESW_OFFLOADS_DEVCOM_UNPAIR: - if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)]) break; mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); + esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false; + peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false; mlx5_esw_offloads_unpair(peer_esw); mlx5_esw_offloads_unpair(esw); mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); @@ -2779,7 +2820,7 @@ static int mlx5_esw_offloads_devcom_event(int event, return err; } -static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) +void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) { struct mlx5_devcom *devcom = esw->dev->priv.devcom; @@ -2802,7 +2843,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) ESW_OFFLOADS_DEVCOM_PAIR, esw); } -static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) +void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) { struct mlx5_devcom *devcom = esw->dev->priv.devcom; @@ -2827,9 +2868,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) MLX5_FDB_TO_VPORT_REG_C_0)) return false; - if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) - return false; - return true; } @@ -3250,8 +3288,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vports; - esw_offloads_devcom_init(esw); - return 0; err_vports: @@ -3280,7 +3316,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, /* If changing from switchdev to legacy mode without sriov enabled, * no need to create legacy fdb. */ - if (!mlx5_sriov_is_enabled(esw->dev)) + if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev)) return 0; err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); @@ -3292,7 +3328,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, void esw_offloads_disable(struct mlx5_eswitch *esw) { - esw_offloads_devcom_cleanup(esw); mlx5_eswitch_disable_pf_vf_vports(esw); esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); esw_set_passing_vport_metadata(esw, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 7bb7be01225a8..fb2035a5ec999 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -196,14 +196,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } - if (MLX5_CAP_GEN(dev, vport_group_manager) && - MLX5_ESWITCH_MANAGER(dev)) { + if (MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); if (err) return err; - } - if (MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c index adefde3ea9410..b7d779d08d837 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -3,6 +3,7 @@ #include #include "lib/devcom.h" +#include "mlx5_core.h" static LIST_HEAD(devcom_list); @@ -13,7 +14,7 @@ static LIST_HEAD(devcom_list); struct mlx5_devcom_component { struct { - void *data; + void __rcu *data; } device[MLX5_DEVCOM_PORTS_SUPPORTED]; mlx5_devcom_event_handler_t handler; @@ -77,6 +78,7 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED) return NULL; + mlx5_dev_list_lock(); sguid0 = mlx5_query_nic_system_image_guid(dev); list_for_each_entry(iter, &devcom_list, list) { struct mlx5_core_dev *tmp_dev = NULL; @@ -102,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) if (!priv) { priv = mlx5_devcom_list_alloc(); - if (!priv) - return ERR_PTR(-ENOMEM); + if (!priv) { + devcom = ERR_PTR(-ENOMEM); + goto out; + } idx = 0; new_priv = true; @@ -112,13 +116,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) priv->devs[idx] = dev; devcom = mlx5_devcom_alloc(priv, idx); if (!devcom) { - kfree(priv); - return ERR_PTR(-ENOMEM); + if (new_priv) + kfree(priv); + devcom = ERR_PTR(-ENOMEM); + goto out; } if (new_priv) list_add(&priv->list, &devcom_list); - +out: + mlx5_dev_list_unlock(); return devcom; } @@ -131,6 +138,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) if (IS_ERR_OR_NULL(devcom)) return; + mlx5_dev_list_lock(); priv = devcom->priv; priv->devs[devcom->idx] = NULL; @@ -141,10 +149,12 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) break; if (i != MLX5_DEVCOM_PORTS_SUPPORTED) - return; + goto out; list_del(&priv->list); kfree(priv); +out: + mlx5_dev_list_unlock(); } void mlx5_devcom_register_component(struct mlx5_devcom *devcom, @@ -162,7 +172,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_write(&comp->sem); comp->handler = handler; - comp->device[devcom->idx].data = data; + rcu_assign_pointer(comp->device[devcom->idx].data, data); up_write(&comp->sem); } @@ -176,8 +186,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_write(&comp->sem); - comp->device[devcom->idx].data = NULL; + RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL); up_write(&comp->sem); + synchronize_rcu(); } int mlx5_devcom_send_event(struct mlx5_devcom *devcom, @@ -193,12 +204,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_write(&comp->sem); - for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) - if (i != devcom->idx && comp->device[i].data) { - err = comp->handler(event, comp->device[i].data, - event_data); + for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) { + void *data = rcu_dereference_protected(comp->device[i].data, + lockdep_is_held(&comp->sem)); + + if (i != devcom->idx && data) { + err = comp->handler(event, data, event_data); break; } + } up_write(&comp->sem); return err; @@ -213,7 +227,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; WARN_ON(!rwsem_is_locked(&comp->sem)); - comp->paired = paired; + WRITE_ONCE(comp->paired, paired); } bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, @@ -222,7 +236,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, if (IS_ERR_OR_NULL(devcom)) return false; - return devcom->priv->components[id].paired; + return READ_ONCE(devcom->priv->components[id].paired); } void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, @@ -236,7 +250,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_read(&comp->sem); - if (!comp->paired) { + if (!READ_ONCE(comp->paired)) { up_read(&comp->sem); return NULL; } @@ -245,7 +259,29 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, if (i != devcom->idx) break; - return comp->device[i].data; + return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem)); +} + +void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id) +{ + struct mlx5_devcom_component *comp; + int i; + + if (IS_ERR_OR_NULL(devcom)) + return NULL; + + for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) + if (i != devcom->idx) + break; + + comp = &devcom->priv->components[id]; + /* This can change concurrently, however 'data' pointer will remain + * valid for the duration of RCU read section. + */ + if (!READ_ONCE(comp->paired)) + return NULL; + + return rcu_dereference(comp->device[i].data); } void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index 94313c18bb647..9a496f4722dad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -41,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); +void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 8ff16318e32dc..4450091e181a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -99,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); struct mlx5_mpfs *mpfs; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev) || l2table_size == 1) return 0; mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 995eb2d5ace0b..a7eb65cd0bddd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1049,7 +1049,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) dev->dm = mlx5_dm_create(dev); if (IS_ERR(dev->dm)) - mlx5_core_warn(dev, "Failed to init device memory%d\n", err); + mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm)); dev->tracer = mlx5_fw_tracer_create(dev); dev->hv_vhca = mlx5_hv_vhca_create(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index efd0c299c5c73..aa403a5ea34ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -15,6 +15,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev); void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev); int mlx5_irq_table_create(struct mlx5_core_dev *dev); void mlx5_irq_table_destroy(struct mlx5_core_dev *dev); +void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev); int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table); int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table); struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 2245d3b2f393b..db5687d9fec97 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -32,6 +32,7 @@ struct mlx5_irq { struct mlx5_irq_pool *pool; int refcount; struct msi_map map; + u32 pool_index; }; struct mlx5_irq_table { @@ -132,7 +133,7 @@ static void irq_release(struct mlx5_irq *irq) struct cpu_rmap *rmap; #endif - xa_erase(&pool->irqs, irq->map.index); + xa_erase(&pool->irqs, irq->pool_index); /* free_irq requires that affinity_hint and rmap will be cleared before * calling it. To satisfy this requirement, we call * irq_cpu_rmap_remove() to remove the notifier @@ -276,11 +277,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, } irq->pool = pool; irq->refcount = 1; - irq->map.index = i; - err = xa_err(xa_store(&pool->irqs, irq->map.index, irq, GFP_KERNEL)); + irq->pool_index = i; + err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL)); if (err) { mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n", - irq->map.index, err); + irq->pool_index, err); goto err_xa; } return irq; @@ -567,7 +568,7 @@ int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs, struct mlx5_irq *irq; int i; - af_desc.is_managed = 1; + af_desc.is_managed = false; for (i = 0; i < nirqs; i++) { cpumask_set_cpu(cpus[i], &af_desc.mask); irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap); @@ -691,6 +692,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table) irq_pool_free(table->pcif_pool); } +static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool) +{ + struct mlx5_irq *irq; + unsigned long index; + + xa_for_each(&pool->irqs, index, irq) + free_irq(irq->map.virq, &irq->nh); +} + +static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table) +{ + if (table->sf_ctrl_pool) { + mlx5_irq_pool_free_irqs(table->sf_comp_pool); + mlx5_irq_pool_free_irqs(table->sf_ctrl_pool); + } + mlx5_irq_pool_free_irqs(table->pcif_pool); +} + /* irq_table API */ int mlx5_irq_table_init(struct mlx5_core_dev *dev) @@ -774,6 +793,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) pci_free_irq_vectors(dev->pdev); } +void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev) +{ + struct mlx5_irq_table *table = dev->priv.irq_table; + + if (mlx5_core_is_sf(dev)) + return; + + mlx5_irq_pools_free_irqs(table); + pci_free_irq_vectors(dev->pdev); +} + int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table) { if (table->sf_comp_pool) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c index 540cf05f63739..a42f6cd99b744 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c @@ -30,9 +30,8 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) struct mlx5_flow_spec *spec; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; - void *match_criteria; + struct mlx5_eswitch *esw; u32 *flow_group_in; - void *misc; int err; if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) && @@ -63,12 +62,8 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) goto free; } - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_MISC_PARAMETERS); - match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, - match_criteria); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - misc_parameters.source_port); + esw = dev->priv.eswitch; + mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0); fg = mlx5_create_flow_group(ft, flow_group_in); if (IS_ERR(fg)) { @@ -77,14 +72,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) goto destroy_flow_table; } - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; - misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, - misc_parameters); - MLX5_SET(fte_match_set_misc, misc, source_port, - dev->priv.eswitch->manager_vport); - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - misc_parameters); - MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + mlx5_esw_set_spec_source_port(esw, esw->manager_vport, spec); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0); @@ -115,7 +103,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev) { - mlx5_core_roce_gid_set(dev, 0, 0, 0, + mlx5_core_roce_gid_set(dev, 0, MLX5_ROCE_VERSION_2, 0, NULL, NULL, false, 0, 1); } @@ -135,7 +123,7 @@ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev) mlx5_rdma_make_default_gid(dev, &gid); return mlx5_core_roce_gid_set(dev, 0, - MLX5_ROCE_VERSION_1, + MLX5_ROCE_VERSION_2, 0, gid.raw, mac, false, 0, 1); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 7d955a4d9f145..c7d4691cb65ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -282,8 +282,7 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port, static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, const struct devlink_port_new_attrs *new_attr, - struct netlink_ext_ack *extack, - unsigned int *new_port_index) + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_sf *sf; @@ -297,7 +296,6 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, new_attr->controller, new_attr->sfnum); if (err) goto esw_err; - *new_port_index = sf->port_index; trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum); return 0; @@ -338,8 +336,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_ int mlx5_devlink_sf_port_new(struct devlink *devlink, const struct devlink_port_new_attrs *new_attr, - struct netlink_ext_ack *extack, - unsigned int *new_port_index) + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_sf_table *table; @@ -355,7 +352,7 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink, "Port add is only supported in eswitch switchdev mode or SF ports are disabled."); return -EOPNOTSUPP; } - err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index); + err = mlx5_sf_add(dev, table, new_attr, extack); mlx5_sf_table_put(table); return err; } @@ -379,7 +376,8 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) } } -int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, +int mlx5_devlink_sf_port_del(struct devlink *devlink, + struct devlink_port *dl_port, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); @@ -394,7 +392,7 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, "Port del is only supported in eswitch switchdev mode or SF ports are disabled."); return -EOPNOTSUPP; } - sf = mlx5_sf_lookup_by_index(table, port_index); + sf = mlx5_sf_lookup_by_index(table, dl_port->index); if (!sf) { err = -ENODEV; goto sf_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h index 3a480e06ecc01..c5430b8dcdf62 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h @@ -20,9 +20,9 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev); int mlx5_devlink_sf_port_new(struct devlink *devlink, const struct devlink_port_new_attrs *add_attr, - struct netlink_ext_ack *extack, - unsigned int *new_port_index); -int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, + struct netlink_ext_ack *extack); +int mlx5_devlink_sf_port_del(struct devlink *devlink, + struct devlink_port *dl_port, struct netlink_ext_ack *extack); int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, enum devlink_port_fn_state *state, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 20d7662c10fb6..f07d009291621 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -74,9 +74,6 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err, vf, num_msix_count; - if (!MLX5_ESWITCH_MANAGER(dev)) - goto enable_vfs_hca; - err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs); if (err) { mlx5_core_warn(dev, @@ -84,7 +81,6 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return err; } -enable_vfs_hca: num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs); for (vf = 0; vf < num_vfs; vf++) { /* Notify the VF before its enablement to let it set diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 3835ba3f4ddac..1aa525e509f10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -117,6 +117,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); + caps->roce_caps.fl_rc_qp_when_roce_disabled = + MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled); if (MLX5_CAP_GEN(mdev, roce)) { err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en); @@ -124,7 +126,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, return err; caps->roce_caps.roce_en = roce_en; - caps->roce_caps.fl_rc_qp_when_roce_disabled = + caps->roce_caps.fl_rc_qp_when_roce_disabled |= MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled); caps->roce_caps.fl_rc_qp_when_roce_enabled = MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 9413aaf51251b..e94fbb015efad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -15,7 +15,8 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length) { u32 crc = crc32(0, input_data, length); - return (__force u32)htonl(crc); + return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) | + ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000); } bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index ba7e3df224139..bc66b078a8a1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -288,7 +288,8 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type); MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); - MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); + if (vport || mlx5_core_is_ecpf(dev)) + MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h index b001e52580911..47f6cc0401c39 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h @@ -44,7 +44,7 @@ MLXFW_MFA2_TLV(multi, struct mlxfw_mfa2_tlv_multi, MLXFW_MFA2_TLV_MULTI_PART); struct mlxfw_mfa2_tlv_psid { - u8 psid[0]; + DECLARE_FLEX_ARRAY(u8, psid); } __packed; MLXFW_MFA2_TLV_VARSIZE(psid, struct mlxfw_mfa2_tlv_psid, diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig index 571e6d4da1e9d..f9ebffc04eb85 100644 --- a/drivers/net/ethernet/microchip/lan966x/Kconfig +++ b/drivers/net/ethernet/microchip/lan966x/Kconfig @@ -10,3 +10,14 @@ config LAN966X_SWITCH select VCAP help This driver supports the Lan966x network switch device. + +config LAN966X_DCB + bool "Data Center Bridging (DCB) support" + depends on LAN966X_SWITCH && DCB + default y + help + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. This can be used to assign priority to traffic, based on + DSCP and PCP. + + If unsure, set to Y. diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile index 7b0cda4ffa6b5..3b6ac331691d0 100644 --- a/drivers/net/ethernet/microchip/lan966x/Makefile +++ b/drivers/net/ethernet/microchip/lan966x/Makefile @@ -15,6 +15,7 @@ lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \ lan966x_xdp.o lan966x_vcap_impl.o lan966x_vcap_ag_api.o \ lan966x_tc_flower.o lan966x_goto.o +lan966x-switch-$(CONFIG_LAN966X_DCB) += lan966x_dcb.o lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o # Provide include files diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c new file mode 100644 index 0000000000000..ed2d96d7908eb --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +enum lan966x_dcb_apptrust_values { + LAN966X_DCB_APPTRUST_EMPTY, + LAN966X_DCB_APPTRUST_DSCP, + LAN966X_DCB_APPTRUST_PCP, + LAN966X_DCB_APPTRUST_DSCP_PCP, + __LAN966X_DCB_APPTRUST_MAX +}; + +static const struct lan966x_dcb_apptrust { + u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1]; + int nselectors; +} *lan966x_port_apptrust[NUM_PHYS_PORTS]; + +static const char *lan966x_dcb_apptrust_names[__LAN966X_DCB_APPTRUST_MAX] = { + [LAN966X_DCB_APPTRUST_EMPTY] = "empty", + [LAN966X_DCB_APPTRUST_DSCP] = "dscp", + [LAN966X_DCB_APPTRUST_PCP] = "pcp", + [LAN966X_DCB_APPTRUST_DSCP_PCP] = "dscp pcp" +}; + +/* Lan966x supported apptrust policies */ +static const struct lan966x_dcb_apptrust + lan966x_dcb_apptrust_policies[__LAN966X_DCB_APPTRUST_MAX] = { + /* Empty *must* be first */ + [LAN966X_DCB_APPTRUST_EMPTY] = { { 0 }, 0 }, + [LAN966X_DCB_APPTRUST_DSCP] = { { IEEE_8021QAZ_APP_SEL_DSCP }, 1 }, + [LAN966X_DCB_APPTRUST_PCP] = { { DCB_APP_SEL_PCP }, 1 }, + [LAN966X_DCB_APPTRUST_DSCP_PCP] = { { IEEE_8021QAZ_APP_SEL_DSCP, + DCB_APP_SEL_PCP }, 2 }, +}; + +static bool lan966x_dcb_apptrust_contains(int portno, u8 selector) +{ + const struct lan966x_dcb_apptrust *conf = lan966x_port_apptrust[portno]; + + for (int i = 0; i < conf->nselectors; i++) + if (conf->selectors[i] == selector) + return true; + + return false; +} + +static void lan966x_dcb_app_update(struct net_device *dev) +{ + struct dcb_ieee_app_prio_map dscp_rewr_map = {0}; + struct dcb_rewr_prio_pcp_map pcp_rewr_map = {0}; + struct lan966x_port *port = netdev_priv(dev); + struct lan966x_port_qos qos = {0}; + struct dcb_app app_itr; + bool dscp_rewr = false; + bool pcp_rewr = false; + + /* Get pcp ingress mapping */ + for (int i = 0; i < ARRAY_SIZE(qos.pcp.map); i++) { + app_itr.selector = DCB_APP_SEL_PCP; + app_itr.protocol = i; + qos.pcp.map[i] = dcb_getapp(dev, &app_itr); + } + + /* Get dscp ingress mapping */ + for (int i = 0; i < ARRAY_SIZE(qos.dscp.map); i++) { + app_itr.selector = IEEE_8021QAZ_APP_SEL_DSCP; + app_itr.protocol = i; + qos.dscp.map[i] = dcb_getapp(dev, &app_itr); + } + + /* Get default prio */ + qos.default_prio = dcb_ieee_getapp_default_prio_mask(dev); + if (qos.default_prio) + qos.default_prio = fls(qos.default_prio) - 1; + + /* Get pcp rewrite mapping */ + dcb_getrewr_prio_pcp_mask_map(dev, &pcp_rewr_map); + for (int i = 0; i < ARRAY_SIZE(pcp_rewr_map.map); i++) { + if (!pcp_rewr_map.map[i]) + continue; + + pcp_rewr = true; + qos.pcp_rewr.map[i] = fls(pcp_rewr_map.map[i]) - 1; + } + + /* Get dscp rewrite mapping */ + dcb_getrewr_prio_dscp_mask_map(dev, &dscp_rewr_map); + for (int i = 0; i < ARRAY_SIZE(dscp_rewr_map.map); i++) { + if (!dscp_rewr_map.map[i]) + continue; + + dscp_rewr = true; + qos.dscp_rewr.map[i] = fls64(dscp_rewr_map.map[i]) - 1; + } + + /* Enable use of pcp for queue classification */ + if (lan966x_dcb_apptrust_contains(port->chip_port, DCB_APP_SEL_PCP)) { + qos.pcp.enable = true; + + if (pcp_rewr) + qos.pcp_rewr.enable = true; + } + + /* Enable use of dscp for queue classification */ + if (lan966x_dcb_apptrust_contains(port->chip_port, IEEE_8021QAZ_APP_SEL_DSCP)) { + qos.dscp.enable = true; + + if (dscp_rewr) + qos.dscp_rewr.enable = true; + } + + lan966x_port_qos_set(port, &qos); +} + +/* DSCP mapping is global for all ports, so set and delete app entries are + * replicated for each port. + */ +static int lan966x_dcb_ieee_dscp_setdel(struct net_device *dev, + struct dcb_app *app, + int (*setdel)(struct net_device *, + struct dcb_app *)) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int err; + + for (int i = 0; i < NUM_PHYS_PORTS; i++) { + port = lan966x->ports[i]; + if (!port) + continue; + + err = setdel(port->dev, app); + if (err) + return err; + } + + return 0; +} + +static int lan966x_dcb_app_validate(struct net_device *dev, + const struct dcb_app *app) +{ + int err = 0; + + switch (app->selector) { + /* Default priority checks */ + case IEEE_8021QAZ_APP_SEL_ETHERTYPE: + if (app->protocol) + err = -EINVAL; + else if (app->priority >= NUM_PRIO_QUEUES) + err = -ERANGE; + break; + /* Dscp checks */ + case IEEE_8021QAZ_APP_SEL_DSCP: + if (app->protocol >= LAN966X_PORT_QOS_DSCP_COUNT) + err = -EINVAL; + else if (app->priority >= NUM_PRIO_QUEUES) + err = -ERANGE; + break; + /* Pcp checks */ + case DCB_APP_SEL_PCP: + if (app->protocol >= LAN966X_PORT_QOS_PCP_DEI_COUNT) + err = -EINVAL; + else if (app->priority >= NUM_PRIO_QUEUES) + err = -ERANGE; + break; + default: + err = -EINVAL; + break; + } + + if (err) + netdev_err(dev, "Invalid entry: %d:%d\n", app->protocol, + app->priority); + + return err; +} + +static int lan966x_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app) +{ + int err; + + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) + err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp); + else + err = dcb_ieee_delapp(dev, app); + + if (err) + return err; + + lan966x_dcb_app_update(dev); + + return 0; +} + +static int lan966x_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app) +{ + struct dcb_app app_itr; + int err; + u8 prio; + + err = lan966x_dcb_app_validate(dev, app); + if (err) + return err; + + /* Delete current mapping, if it exists */ + prio = dcb_getapp(dev, app); + if (prio) { + app_itr = *app; + app_itr.priority = prio; + lan966x_dcb_ieee_delapp(dev, &app_itr); + } + + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) + err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_setapp); + else + err = dcb_ieee_setapp(dev, app); + + if (err) + return err; + + lan966x_dcb_app_update(dev); + + return 0; +} + +static int lan966x_dcb_apptrust_validate(struct net_device *dev, + u8 *selectors, + int nselectors) +{ + for (int i = 0; i < ARRAY_SIZE(lan966x_dcb_apptrust_policies); i++) { + bool match; + + if (lan966x_dcb_apptrust_policies[i].nselectors != nselectors) + continue; + + match = true; + for (int j = 0; j < nselectors; j++) { + if (lan966x_dcb_apptrust_policies[i].selectors[j] != + *(selectors + j)) { + match = false; + break; + } + } + if (match) + return i; + } + + netdev_err(dev, "Valid apptrust configurations are:\n"); + for (int i = 0; i < ARRAY_SIZE(lan966x_dcb_apptrust_names); i++) + pr_info("order: %s\n", lan966x_dcb_apptrust_names[i]); + + return -EOPNOTSUPP; +} + +static int lan966x_dcb_setapptrust(struct net_device *dev, + u8 *selectors, + int nselectors) +{ + struct lan966x_port *port = netdev_priv(dev); + int idx; + + idx = lan966x_dcb_apptrust_validate(dev, selectors, nselectors); + if (idx < 0) + return idx; + + lan966x_port_apptrust[port->chip_port] = &lan966x_dcb_apptrust_policies[idx]; + lan966x_dcb_app_update(dev); + + return 0; +} + +static int lan966x_dcb_getapptrust(struct net_device *dev, u8 *selectors, + int *nselectors) +{ + struct lan966x_port *port = netdev_priv(dev); + const struct lan966x_dcb_apptrust *trust; + + trust = lan966x_port_apptrust[port->chip_port]; + + memcpy(selectors, trust->selectors, trust->nselectors); + *nselectors = trust->nselectors; + + return 0; +} + +static int lan966x_dcb_delrewr(struct net_device *dev, struct dcb_app *app) +{ + int err; + + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) + err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_delrewr); + else + err = dcb_delrewr(dev, app); + + if (err < 0) + return err; + + lan966x_dcb_app_update(dev); + + return 0; +} + +static int lan966x_dcb_setrewr(struct net_device *dev, struct dcb_app *app) +{ + struct dcb_app app_itr; + u16 proto; + int err; + + err = lan966x_dcb_app_validate(dev, app); + if (err) + goto out; + + /* Delete current mapping, if it exists. */ + proto = dcb_getrewr(dev, app); + if (proto) { + app_itr = *app; + app_itr.protocol = proto; + lan966x_dcb_delrewr(dev, &app_itr); + } + + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) + err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_setrewr); + else + err = dcb_setrewr(dev, app); + + if (err) + goto out; + + lan966x_dcb_app_update(dev); + +out: + return err; +} + +static const struct dcbnl_rtnl_ops lan966x_dcbnl_ops = { + .ieee_setapp = lan966x_dcb_ieee_setapp, + .ieee_delapp = lan966x_dcb_ieee_delapp, + .dcbnl_setapptrust = lan966x_dcb_setapptrust, + .dcbnl_getapptrust = lan966x_dcb_getapptrust, + .dcbnl_setrewr = lan966x_dcb_setrewr, + .dcbnl_delrewr = lan966x_dcb_delrewr, +}; + +void lan966x_dcb_init(struct lan966x *lan966x) +{ + for (int p = 0; p < lan966x->num_phys_ports; ++p) { + struct lan966x_port *port; + + port = lan966x->ports[p]; + if (!port) + continue; + + port->dev->dcbnl_ops = &lan966x_dcbnl_ops; + + lan966x_port_apptrust[port->chip_port] = + &lan966x_dcb_apptrust_policies[LAN966X_DCB_APPTRUST_DSCP_PCP]; + + /* Enable DSCP classification based on classified QoS class and + * DP, for all DSCP values, for all ports. + */ + lan966x_port_qos_dscp_rewr_mode_set(port, + LAN966X_PORT_QOS_REWR_DSCP_ALL); + } +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 2b6e046e1d10b..f6931dfb3e68e 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -1039,6 +1039,16 @@ static int lan966x_reset_switch(struct lan966x *lan966x) reset_control_reset(switch_reset); + /* Don't reinitialize the switch core, if it is already initialized. In + * case it is initialized twice, some pointers inside the queue system + * in HW will get corrupted and then after a while the queue system gets + * full and no traffic is passing through the switch. The issue is seen + * when loading and unloading the driver and sending traffic through the + * switch. + */ + if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA) + return 0; + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG); lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT); ret = readx_poll_timeout(lan966x_ram_init, lan966x, @@ -1213,6 +1223,8 @@ static int lan966x_probe(struct platform_device *pdev) if (err) goto cleanup_fdma; + lan966x_dcb_init(lan966x); + return 0; cleanup_fdma: diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 882d5a08e7d51..27f272831ea5c 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -104,6 +104,22 @@ #define LAN966X_VCAP_CID_ES0_L0 VCAP_CID_EGRESS_L0 /* ES0 lookup 0 */ #define LAN966X_VCAP_CID_ES0_MAX (VCAP_CID_EGRESS_L1 - 1) /* ES0 Max */ +#define LAN966X_PORT_QOS_PCP_COUNT 8 +#define LAN966X_PORT_QOS_DEI_COUNT 8 +#define LAN966X_PORT_QOS_PCP_DEI_COUNT \ + (LAN966X_PORT_QOS_PCP_COUNT + LAN966X_PORT_QOS_DEI_COUNT) + +#define LAN966X_PORT_QOS_DSCP_COUNT 64 + +/* Port PCP rewrite mode */ +#define LAN966X_PORT_REW_TAG_CTRL_CLASSIFIED 0 +#define LAN966X_PORT_REW_TAG_CTRL_MAPPED 2 + +/* Port DSCP rewrite mode */ +#define LAN966X_PORT_REW_DSCP_FRAME 0 +#define LAN966X_PORT_REW_DSCP_ANALIZER 1 +#define LAN966X_PORT_QOS_REWR_DSCP_ALL 3 + /* MAC table entry types. * ENTRYTYPE_NORMAL is subject to aging. * ENTRYTYPE_LOCKED is not subject to aging. @@ -392,6 +408,34 @@ struct lan966x_port_tc { struct flow_stats mirror_stat; }; +struct lan966x_port_qos_pcp { + u8 map[LAN966X_PORT_QOS_PCP_DEI_COUNT]; + bool enable; +}; + +struct lan966x_port_qos_dscp { + u8 map[LAN966X_PORT_QOS_DSCP_COUNT]; + bool enable; +}; + +struct lan966x_port_qos_pcp_rewr { + u16 map[NUM_PRIO_QUEUES]; + bool enable; +}; + +struct lan966x_port_qos_dscp_rewr { + u16 map[LAN966X_PORT_QOS_DSCP_COUNT]; + bool enable; +}; + +struct lan966x_port_qos { + struct lan966x_port_qos_pcp pcp; + struct lan966x_port_qos_dscp dscp; + struct lan966x_port_qos_pcp_rewr pcp_rewr; + struct lan966x_port_qos_dscp_rewr dscp_rewr; + u8 default_prio; +}; + struct lan966x_port { struct net_device *dev; struct lan966x *lan966x; @@ -456,6 +500,11 @@ int lan966x_port_pcs_set(struct lan966x_port *port, struct lan966x_port_config *config); void lan966x_port_init(struct lan966x_port *port); +void lan966x_port_qos_set(struct lan966x_port *port, + struct lan966x_port_qos *qos); +void lan966x_port_qos_dscp_rewr_mode_set(struct lan966x_port *port, + int mode); + int lan966x_mac_ip_learn(struct lan966x *lan966x, bool cpu_copy, const unsigned char mac[ETH_ALEN], @@ -680,6 +729,14 @@ int lan966x_goto_port_del(struct lan966x_port *port, unsigned long goto_id, struct netlink_ext_ack *extack); +#ifdef CONFIG_LAN966X_DCB +void lan966x_dcb_init(struct lan966x *lan966x); +#else +static inline void lan966x_dcb_init(struct lan966x *lan966x) +{ +} +#endif + static inline void __iomem *lan_addr(void __iomem *base[], int id, int tinst, int tcnt, int gbase, int ginst, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c index 0050fcb988b75..92108d354051c 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -394,6 +394,155 @@ int lan966x_port_pcs_set(struct lan966x_port *port, return 0; } +static void lan966x_port_qos_pcp_set(struct lan966x_port *port, + struct lan966x_port_qos_pcp *qos) +{ + u8 *pcp_itr = qos->map; + u8 pcp, dp; + + lan_rmw(ANA_QOS_CFG_QOS_PCP_ENA_SET(qos->enable), + ANA_QOS_CFG_QOS_PCP_ENA, + port->lan966x, ANA_QOS_CFG(port->chip_port)); + + /* Map PCP and DEI to priority */ + for (int i = 0; i < ARRAY_SIZE(qos->map); i++) { + pcp = *(pcp_itr + i); + dp = (i < LAN966X_PORT_QOS_PCP_COUNT) ? 0 : 1; + + lan_rmw(ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL_SET(pcp) | + ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL_SET(dp), + ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL | + ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL, + port->lan966x, + ANA_PCP_DEI_CFG(port->chip_port, i)); + } +} + +static void lan966x_port_qos_dscp_set(struct lan966x_port *port, + struct lan966x_port_qos_dscp *qos) +{ + struct lan966x *lan966x = port->lan966x; + + /* Enable/disable dscp for qos classification. */ + lan_rmw(ANA_QOS_CFG_QOS_DSCP_ENA_SET(qos->enable), + ANA_QOS_CFG_QOS_DSCP_ENA, + lan966x, ANA_QOS_CFG(port->chip_port)); + + /* Map each dscp value to priority and dp */ + for (int i = 0; i < ARRAY_SIZE(qos->map); i++) + lan_rmw(ANA_DSCP_CFG_DP_DSCP_VAL_SET(0) | + ANA_DSCP_CFG_QOS_DSCP_VAL_SET(*(qos->map + i)), + ANA_DSCP_CFG_DP_DSCP_VAL | + ANA_DSCP_CFG_QOS_DSCP_VAL, + lan966x, ANA_DSCP_CFG(i)); + + /* Set per-dscp trust */ + for (int i = 0; i < ARRAY_SIZE(qos->map); i++) + lan_rmw(ANA_DSCP_CFG_DSCP_TRUST_ENA_SET(qos->enable), + ANA_DSCP_CFG_DSCP_TRUST_ENA, + lan966x, ANA_DSCP_CFG(i)); +} + +static int lan966x_port_qos_default_set(struct lan966x_port *port, + struct lan966x_port_qos *qos) +{ + /* Set default prio and dp level */ + lan_rmw(ANA_QOS_CFG_DP_DEFAULT_VAL_SET(0) | + ANA_QOS_CFG_QOS_DEFAULT_VAL_SET(qos->default_prio), + ANA_QOS_CFG_DP_DEFAULT_VAL | + ANA_QOS_CFG_QOS_DEFAULT_VAL, + port->lan966x, ANA_QOS_CFG(port->chip_port)); + + /* Set default pcp and dei for untagged frames */ + lan_rmw(ANA_VLAN_CFG_VLAN_DEI_SET(0) | + ANA_VLAN_CFG_VLAN_PCP_SET(0), + ANA_VLAN_CFG_VLAN_DEI | + ANA_VLAN_CFG_VLAN_PCP, + port->lan966x, ANA_VLAN_CFG(port->chip_port)); + + return 0; +} + +static void lan966x_port_qos_pcp_rewr_set(struct lan966x_port *port, + struct lan966x_port_qos_pcp_rewr *qos) +{ + u8 mode = LAN966X_PORT_REW_TAG_CTRL_CLASSIFIED; + u8 pcp, dei; + + if (qos->enable) + mode = LAN966X_PORT_REW_TAG_CTRL_MAPPED; + + /* Map the values only if it is enabled otherwise will be the classified + * value + */ + lan_rmw(REW_TAG_CFG_TAG_PCP_CFG_SET(mode) | + REW_TAG_CFG_TAG_DEI_CFG_SET(mode), + REW_TAG_CFG_TAG_PCP_CFG | + REW_TAG_CFG_TAG_DEI_CFG, + port->lan966x, REW_TAG_CFG(port->chip_port)); + + /* Map each value to pcp and dei */ + for (int i = 0; i < ARRAY_SIZE(qos->map); i++) { + pcp = qos->map[i]; + if (pcp > LAN966X_PORT_QOS_PCP_COUNT) + dei = 1; + else + dei = 0; + + lan_rmw(REW_PCP_DEI_CFG_DEI_QOS_VAL_SET(dei) | + REW_PCP_DEI_CFG_PCP_QOS_VAL_SET(pcp), + REW_PCP_DEI_CFG_DEI_QOS_VAL | + REW_PCP_DEI_CFG_PCP_QOS_VAL, + port->lan966x, + REW_PCP_DEI_CFG(port->chip_port, + i + dei * LAN966X_PORT_QOS_PCP_COUNT)); + } +} + +static void lan966x_port_qos_dscp_rewr_set(struct lan966x_port *port, + struct lan966x_port_qos_dscp_rewr *qos) +{ + u16 dscp; + u8 mode; + + if (qos->enable) + mode = LAN966X_PORT_REW_DSCP_ANALIZER; + else + mode = LAN966X_PORT_REW_DSCP_FRAME; + + /* Enable the rewrite otherwise will use the values from the frame */ + lan_rmw(REW_DSCP_CFG_DSCP_REWR_CFG_SET(mode), + REW_DSCP_CFG_DSCP_REWR_CFG, + port->lan966x, REW_DSCP_CFG(port->chip_port)); + + /* Map each classified Qos class and DP to classified DSCP value */ + for (int i = 0; i < ARRAY_SIZE(qos->map); i++) { + dscp = qos->map[i]; + + lan_rmw(ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL_SET(dscp), + ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL, + port->lan966x, ANA_DSCP_REWR_CFG(i)); + } +} + +void lan966x_port_qos_dscp_rewr_mode_set(struct lan966x_port *port, + int mode) +{ + lan_rmw(ANA_QOS_CFG_DSCP_REWR_CFG_SET(mode), + ANA_QOS_CFG_DSCP_REWR_CFG, + port->lan966x, ANA_QOS_CFG(port->chip_port)); +} + +void lan966x_port_qos_set(struct lan966x_port *port, + struct lan966x_port_qos *qos) +{ + lan966x_port_qos_pcp_set(port, &qos->pcp); + lan966x_port_qos_dscp_set(port, &qos->dscp); + lan966x_port_qos_default_set(port, qos); + lan966x_port_qos_pcp_rewr_set(port, &qos->pcp_rewr); + lan966x_port_qos_dscp_rewr_set(port, &qos->dscp_rewr); +} + void lan966x_port_init(struct lan966x_port *port) { struct lan966x_port_config *config = &port->config; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h index 2220391802766..4b553927d2e0e 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h @@ -283,6 +283,18 @@ enum lan966x_target { #define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\ FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x) +#define ANA_VLAN_CFG_VLAN_PCP GENMASK(15, 13) +#define ANA_VLAN_CFG_VLAN_PCP_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_PCP, x) +#define ANA_VLAN_CFG_VLAN_PCP_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_PCP, x) + +#define ANA_VLAN_CFG_VLAN_DEI BIT(12) +#define ANA_VLAN_CFG_VLAN_DEI_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_DEI, x) +#define ANA_VLAN_CFG_VLAN_DEI_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_DEI, x) + #define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0) #define ANA_VLAN_CFG_VLAN_VID_SET(x)\ FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x) @@ -316,6 +328,39 @@ enum lan966x_target { #define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\ FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x) +/* ANA:PORT:QOS_CFG */ +#define ANA_QOS_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 8, 0, 1, 4) + +#define ANA_QOS_CFG_DP_DEFAULT_VAL BIT(8) +#define ANA_QOS_CFG_DP_DEFAULT_VAL_SET(x)\ + FIELD_PREP(ANA_QOS_CFG_DP_DEFAULT_VAL, x) +#define ANA_QOS_CFG_DP_DEFAULT_VAL_GET(x)\ + FIELD_GET(ANA_QOS_CFG_DP_DEFAULT_VAL, x) + +#define ANA_QOS_CFG_QOS_DEFAULT_VAL GENMASK(7, 5) +#define ANA_QOS_CFG_QOS_DEFAULT_VAL_SET(x)\ + FIELD_PREP(ANA_QOS_CFG_QOS_DEFAULT_VAL, x) +#define ANA_QOS_CFG_QOS_DEFAULT_VAL_GET(x)\ + FIELD_GET(ANA_QOS_CFG_QOS_DEFAULT_VAL, x) + +#define ANA_QOS_CFG_QOS_DSCP_ENA BIT(4) +#define ANA_QOS_CFG_QOS_DSCP_ENA_SET(x)\ + FIELD_PREP(ANA_QOS_CFG_QOS_DSCP_ENA, x) +#define ANA_QOS_CFG_QOS_DSCP_ENA_GET(x)\ + FIELD_GET(ANA_QOS_CFG_QOS_DSCP_ENA, x) + +#define ANA_QOS_CFG_QOS_PCP_ENA BIT(3) +#define ANA_QOS_CFG_QOS_PCP_ENA_SET(x)\ + FIELD_PREP(ANA_QOS_CFG_QOS_PCP_ENA, x) +#define ANA_QOS_CFG_QOS_PCP_ENA_GET(x)\ + FIELD_GET(ANA_QOS_CFG_QOS_PCP_ENA, x) + +#define ANA_QOS_CFG_DSCP_REWR_CFG GENMASK(1, 0) +#define ANA_QOS_CFG_DSCP_REWR_CFG_SET(x)\ + FIELD_PREP(ANA_QOS_CFG_DSCP_REWR_CFG, x) +#define ANA_QOS_CFG_DSCP_REWR_CFG_GET(x)\ + FIELD_GET(ANA_QOS_CFG_DSCP_REWR_CFG, x) + /* ANA:PORT:VCAP_CFG */ #define ANA_VCAP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 12, 0, 1, 4) @@ -415,6 +460,21 @@ enum lan966x_target { #define ANA_VCAP_S2_CFG_OAM_DIS_GET(x)\ FIELD_GET(ANA_VCAP_S2_CFG_OAM_DIS, x) +/* ANA:PORT:QOS_PCP_DEI_MAP_CFG */ +#define ANA_PCP_DEI_CFG(g, r) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 32, r, 16, 4) + +#define ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL BIT(3) +#define ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL_SET(x)\ + FIELD_PREP(ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL, x) +#define ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL_GET(x)\ + FIELD_GET(ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL, x) + +#define ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL GENMASK(2, 0) +#define ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL_SET(x)\ + FIELD_PREP(ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL, x) +#define ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL_GET(x)\ + FIELD_GET(ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL, x) + /* ANA:PORT:CPU_FWD_CFG */ #define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4) @@ -478,6 +538,15 @@ enum lan966x_target { #define ANA_PORT_CFG_PORTID_VAL_GET(x)\ FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x) +/* ANA:COMMON:DSCP_REWR_CFG */ +#define ANA_DSCP_REWR_CFG(r) __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 332, r, 16, 4) + +#define ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL GENMASK(5, 0) +#define ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL_SET(x)\ + FIELD_PREP(ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL, x) +#define ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL_GET(x)\ + FIELD_GET(ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL, x) + /* ANA:PORT:POL_CFG */ #define ANA_POL_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 116, 0, 1, 4) @@ -547,6 +616,33 @@ enum lan966x_target { #define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\ FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x) +/* ANA:COMMON:DSCP_CFG */ +#define ANA_DSCP_CFG(r) __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 76, r, 64, 4) + +#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11) +#define ANA_DSCP_CFG_DP_DSCP_VAL_SET(x)\ + FIELD_PREP(ANA_DSCP_CFG_DP_DSCP_VAL, x) +#define ANA_DSCP_CFG_DP_DSCP_VAL_GET(x)\ + FIELD_GET(ANA_DSCP_CFG_DP_DSCP_VAL, x) + +#define ANA_DSCP_CFG_QOS_DSCP_VAL GENMASK(10, 8) +#define ANA_DSCP_CFG_QOS_DSCP_VAL_SET(x)\ + FIELD_PREP(ANA_DSCP_CFG_QOS_DSCP_VAL, x) +#define ANA_DSCP_CFG_QOS_DSCP_VAL_GET(x)\ + FIELD_GET(ANA_DSCP_CFG_QOS_DSCP_VAL, x) + +#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1) +#define ANA_DSCP_CFG_DSCP_TRUST_ENA_SET(x)\ + FIELD_PREP(ANA_DSCP_CFG_DSCP_TRUST_ENA, x) +#define ANA_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\ + FIELD_GET(ANA_DSCP_CFG_DSCP_TRUST_ENA, x) + +#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0) +#define ANA_DSCP_CFG_DSCP_REWR_ENA_SET(x)\ + FIELD_PREP(ANA_DSCP_CFG_DSCP_REWR_ENA, x) +#define ANA_DSCP_CFG_DSCP_REWR_ENA_GET(x)\ + FIELD_GET(ANA_DSCP_CFG_DSCP_REWR_ENA, x) + /* ANA:POL:POL_PIR_CFG */ #define ANA_POL_PIR_CFG(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 0, 0, 1, 4) @@ -1468,6 +1564,18 @@ enum lan966x_target { #define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\ FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x) +#define REW_TAG_CFG_TAG_PCP_CFG GENMASK(3, 2) +#define REW_TAG_CFG_TAG_PCP_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_PCP_CFG, x) +#define REW_TAG_CFG_TAG_PCP_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_PCP_CFG, x) + +#define REW_TAG_CFG_TAG_DEI_CFG GENMASK(1, 0) +#define REW_TAG_CFG_TAG_DEI_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_DEI_CFG, x) +#define REW_TAG_CFG_TAG_DEI_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_DEI_CFG, x) + /* REW:PORT:PORT_CFG */ #define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4) @@ -1483,6 +1591,30 @@ enum lan966x_target { #define REW_PORT_CFG_NO_REWRITE_GET(x)\ FIELD_GET(REW_PORT_CFG_NO_REWRITE, x) +/* REW:PORT:DSCP_CFG */ +#define REW_DSCP_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 12, 0, 1, 4) + +#define REW_DSCP_CFG_DSCP_REWR_CFG GENMASK(1, 0) +#define REW_DSCP_CFG_DSCP_REWR_CFG_SET(x)\ + FIELD_PREP(REW_DSCP_CFG_DSCP_REWR_CFG, x) +#define REW_DSCP_CFG_DSCP_REWR_CFG_GET(x)\ + FIELD_GET(REW_DSCP_CFG_DSCP_REWR_CFG, x) + +/* REW:PORT:PCP_DEI_QOS_MAP_CFG */ +#define REW_PCP_DEI_CFG(g, r) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 16, r, 16, 4) + +#define REW_PCP_DEI_CFG_DEI_QOS_VAL BIT(3) +#define REW_PCP_DEI_CFG_DEI_QOS_VAL_SET(x)\ + FIELD_PREP(REW_PCP_DEI_CFG_DEI_QOS_VAL, x) +#define REW_PCP_DEI_CFG_DEI_QOS_VAL_GET(x)\ + FIELD_GET(REW_PCP_DEI_CFG_DEI_QOS_VAL, x) + +#define REW_PCP_DEI_CFG_PCP_QOS_VAL GENMASK(2, 0) +#define REW_PCP_DEI_CFG_PCP_QOS_VAL_SET(x)\ + FIELD_PREP(REW_PCP_DEI_CFG_PCP_QOS_VAL, x) +#define REW_PCP_DEI_CFG_PCP_QOS_VAL_GET(x)\ + FIELD_GET(REW_PCP_DEI_CFG_PCP_QOS_VAL, x) + /* REW:COMMON:STAT_CFG */ #define REW_STAT_CFG __REG(TARGET_REW, 0, 1, 3072, 0, 1, 528, 520, 0, 1, 4) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 62f0bf91d1e1e..b7cce746b5c0a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2418,6 +2418,8 @@ static void nfp_net_rss_init(struct nfp_net *nn) /* Enable IPv4/IPv6 TCP by default */ nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP | NFP_NET_CFG_RSS_IPV6_TCP | + NFP_NET_CFG_RSS_IPV4_UDP | + NFP_NET_CFG_RSS_IPV6_UDP | FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) | NFP_NET_CFG_RSS_MASK; } diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.h b/drivers/net/ethernet/netronome/nfp/nic/main.h index 094374df42b8c..38b8b10b03cd3 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.h +++ b/drivers/net/ethernet/netronome/nfp/nic/main.h @@ -8,7 +8,7 @@ #ifdef CONFIG_DCB /* DCB feature definitions */ -#define NFP_NET_MAX_DSCP 4 +#define NFP_NET_MAX_DSCP 64 #define NFP_NET_MAX_TC IEEE_8021QAZ_MAX_TCS #define NFP_NET_MAX_PRIO 8 #define NFP_DCB_CFG_STRIDE 256 diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 0605d1ee490dd..7a549b834e970 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -6138,6 +6138,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) return 0; out_error: + nv_mgmt_release_sema(dev); if (phystate_orig) writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); out_freering: diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index a7e376e7e689c..5e6308d574ba0 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -616,10 +616,10 @@ struct rtl8169_private { struct work_struct work; } wk; - spinlock_t config25_lock; - spinlock_t mac_ocp_lock; + raw_spinlock_t config25_lock; + raw_spinlock_t mac_ocp_lock; - spinlock_t cfg9346_usage_lock; + raw_spinlock_t cfg9346_usage_lock; int cfg9346_usage_count; unsigned supports_gmii:1; @@ -671,20 +671,20 @@ static void rtl_lock_config_regs(struct rtl8169_private *tp) { unsigned long flags; - spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); + raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); if (!--tp->cfg9346_usage_count) RTL_W8(tp, Cfg9346, Cfg9346_Lock); - spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); + raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); } static void rtl_unlock_config_regs(struct rtl8169_private *tp) { unsigned long flags; - spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); + raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); if (!tp->cfg9346_usage_count++) RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); + raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); } static void rtl_pci_commit(struct rtl8169_private *tp) @@ -698,10 +698,10 @@ static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set) unsigned long flags; u8 val; - spin_lock_irqsave(&tp->config25_lock, flags); + raw_spin_lock_irqsave(&tp->config25_lock, flags); val = RTL_R8(tp, Config2); RTL_W8(tp, Config2, (val & ~clear) | set); - spin_unlock_irqrestore(&tp->config25_lock, flags); + raw_spin_unlock_irqrestore(&tp->config25_lock, flags); } static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set) @@ -709,10 +709,10 @@ static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set) unsigned long flags; u8 val; - spin_lock_irqsave(&tp->config25_lock, flags); + raw_spin_lock_irqsave(&tp->config25_lock, flags); val = RTL_R8(tp, Config5); RTL_W8(tp, Config5, (val & ~clear) | set); - spin_unlock_irqrestore(&tp->config25_lock, flags); + raw_spin_unlock_irqrestore(&tp->config25_lock, flags); } static bool rtl_is_8125(struct rtl8169_private *tp) @@ -899,9 +899,9 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) { unsigned long flags; - spin_lock_irqsave(&tp->mac_ocp_lock, flags); + raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags); __r8168_mac_ocp_write(tp, reg, data); - spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); + raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); } static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) @@ -919,9 +919,9 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) unsigned long flags; u16 val; - spin_lock_irqsave(&tp->mac_ocp_lock, flags); + raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags); val = __r8168_mac_ocp_read(tp, reg); - spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); + raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); return val; } @@ -932,10 +932,10 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, unsigned long flags; u16 data; - spin_lock_irqsave(&tp->mac_ocp_lock, flags); + raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags); data = __r8168_mac_ocp_read(tp, reg); __r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); - spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); + raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); } /* Work around a hw issue with RTL8168g PHY, the quirk disables @@ -1420,14 +1420,14 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); } - spin_lock_irqsave(&tp->config25_lock, flags); + raw_spin_lock_irqsave(&tp->config25_lock, flags); for (i = 0; i < tmp; i++) { options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; RTL_W8(tp, cfg[i].reg, options); } - spin_unlock_irqrestore(&tp->config25_lock, flags); + raw_spin_unlock_irqrestore(&tp->config25_lock, flags); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: @@ -5164,6 +5164,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) int jumbo_max, region, rc; enum mac_version chipset; struct net_device *dev; + u32 txconfig; u16 xid; dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); @@ -5179,9 +5180,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->eee_adv = -1; tp->ocp_base = OCP_STD_PHY_BASE; - spin_lock_init(&tp->cfg9346_usage_lock); - spin_lock_init(&tp->config25_lock); - spin_lock_init(&tp->mac_ocp_lock); + raw_spin_lock_init(&tp->cfg9346_usage_lock); + raw_spin_lock_init(&tp->config25_lock); + raw_spin_lock_init(&tp->mac_ocp_lock); dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, struct pcpu_sw_netstats); @@ -5218,7 +5219,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->mmio_addr = pcim_iomap_table(pdev)[region]; - xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf; + txconfig = RTL_R32(tp, TxConfig); + if (txconfig == ~0U) { + dev_err(&pdev->dev, "PCI read failed\n"); + return -EIO; + } + + xid = (txconfig >> 20) & 0xfcf; /* Identify chip attached to board */ chipset = rtl8169_get_mac_version(xid, tp->supports_gmii); diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index d916877b5a9ad..274f3a2562ad1 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -40,19 +40,26 @@ static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis) unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels; unsigned int rx_vis = efx->n_rx_channels; unsigned int min_vis, max_vis; + int rc; EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1); tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel; max_vis = max(rx_vis, tx_vis); - /* Currently don't handle resource starvation and only accept - * our maximum needs and no less. + /* We require at least a single complete TX channel worth of queues. */ + min_vis = efx->tx_queues_per_channel; + + rc = efx_mcdi_alloc_vis(efx, min_vis, max_vis, + NULL, allocated_vis); + + /* We retry allocating VIs by reallocating channels when we have not + * been able to allocate the maximum VIs. */ - min_vis = max_vis; + if (!rc && *allocated_vis < max_vis) + rc = -EAGAIN; - return efx_mcdi_alloc_vis(efx, min_vis, max_vis, - NULL, allocated_vis); + return rc; } static int ef100_remap_bar(struct efx_nic *efx, int max_vis) @@ -133,9 +140,41 @@ static int ef100_net_open(struct net_device *net_dev) goto fail; rc = ef100_alloc_vis(efx, &allocated_vis); - if (rc) + if (rc && rc != -EAGAIN) goto fail; + /* Try one more time but with the maximum number of channels + * equal to the allocated VIs, which would more likely succeed. + */ + if (rc == -EAGAIN) { + rc = efx_mcdi_free_vis(efx); + if (rc) + goto fail; + + efx_remove_interrupts(efx); + efx->max_channels = allocated_vis; + + rc = efx_probe_interrupts(efx); + if (rc) + goto fail; + + rc = efx_set_channels(efx); + if (rc) + goto fail; + + rc = ef100_alloc_vis(efx, &allocated_vis); + if (rc && rc != -EAGAIN) + goto fail; + + /* It should be very unlikely that we failed here again, but in + * such a case we return ENOSPC. + */ + if (rc == -EAGAIN) { + rc = -ENOSPC; + goto fail; + } + } + rc = efx_probe_channels(efx); if (rc) return rc; @@ -378,7 +417,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data) efx->net_dev = net_dev; SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev); - net_dev->features |= efx->type->offload_features; + /* enable all supported features except rx-fcs and rx-all */ + net_dev->features |= efx->type->offload_features & + ~(NETIF_F_RXFCS | NETIF_F_RXALL); net_dev->hw_features |= efx->type->offload_features; net_dev->hw_enc_features |= efx->type->offload_features; net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG | diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c index 381b805659d39..ef9971cbb695d 100644 --- a/drivers/net/ethernet/sfc/efx_devlink.c +++ b/drivers/net/ethernet/sfc/efx_devlink.c @@ -171,9 +171,14 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx, rc = efx_mcdi_nvram_metadata(efx, partition_type, NULL, version, NULL, 0); + + /* If the partition does not exist, that is not an error. */ + if (rc == -ENOENT) + return 0; + if (rc) { - netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed\n", - version_name); + netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed (rc=%d)\n", + version_name, rc); return rc; } @@ -187,36 +192,33 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx, static int efx_devlink_info_stored_versions(struct efx_nic *efx, struct devlink_info_req *req) { - int rc; - - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_BUNDLE, - DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID); - if (rc) - return rc; - - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_MC_FIRMWARE, - DEVLINK_INFO_VERSION_GENERIC_FW_MGMT); - if (rc) - return rc; - - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_SUC_FIRMWARE, - EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC); - if (rc) - return rc; - - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_EXPANSION_ROM, - EFX_DEVLINK_INFO_VERSION_FW_EXPROM); - if (rc) - return rc; + int err; - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_EXPANSION_UEFI, - EFX_DEVLINK_INFO_VERSION_FW_UEFI); - return rc; + /* We do not care here about the specific error but just if an error + * happened. The specific error will be reported inside the call + * through system messages, and if any error happened in any call + * below, we report it through extack. + */ + err = efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_BUNDLE, + DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID); + + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_MC_FIRMWARE, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT); + + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_SUC_FIRMWARE, + EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC); + + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_EXPANSION_ROM, + EFX_DEVLINK_INFO_VERSION_FW_EXPROM); + + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_EXPANSION_UEFI, + EFX_DEVLINK_INFO_VERSION_FW_UEFI); + return err; } #define EFX_VER_FLAG(_f) \ @@ -587,27 +589,20 @@ static int efx_devlink_info_get(struct devlink *devlink, { struct efx_devlink *devlink_private = devlink_priv(devlink); struct efx_nic *efx = devlink_private->efx; - int rc; + int err; - /* Several different MCDI commands are used. We report first error - * through extack returning at that point. Specific error - * information via system messages. + /* Several different MCDI commands are used. We report if errors + * happened through extack. Specific error information via system + * messages inside the calls. */ - rc = efx_devlink_info_board_cfg(efx, req); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Getting board info failed"); - return rc; - } - rc = efx_devlink_info_stored_versions(efx, req); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Getting stored versions failed"); - return rc; - } - rc = efx_devlink_info_running_versions(efx, req); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Getting running versions failed"); - return rc; - } + err = efx_devlink_info_board_cfg(efx, req); + + err |= efx_devlink_info_stored_versions(efx, req); + + err |= efx_devlink_info_running_versions(efx, req); + + if (err) + NL_SET_ERR_MSG_MOD(extack, "Errors when getting device info. Check system messages"); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 6807c4c1a0a2f..3db1cb0fd160c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -491,7 +491,6 @@ int stmmac_mdio_reset(struct mii_bus *bus) int stmmac_xpcs_setup(struct mii_bus *bus) { struct net_device *ndev = bus->priv; - struct mdio_device *mdiodev; struct stmmac_priv *priv; struct dw_xpcs *xpcs; int mode, addr; @@ -501,16 +500,10 @@ int stmmac_xpcs_setup(struct mii_bus *bus) /* Try to probe the XPCS by scanning all addresses. */ for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - mdiodev = mdio_device_create(bus, addr); - if (IS_ERR(mdiodev)) + xpcs = xpcs_create_mdiodev(bus, addr, mode); + if (IS_ERR(xpcs)) continue; - xpcs = xpcs_create(mdiodev, mode); - if (IS_ERR_OR_NULL(xpcs)) { - mdio_device_free(mdiodev); - continue; - } - priv->hw->xpcs = xpcs; break; } @@ -669,10 +662,8 @@ int stmmac_mdio_unregister(struct net_device *ndev) if (!priv->mii) return 0; - if (priv->hw->xpcs) { - mdio_device_free(priv->hw->xpcs->mdiodev); + if (priv->hw->xpcs) xpcs_destroy(priv->hw->xpcs); - } mdiobus_unregister(priv->mii); priv->mii->priv = NULL; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 2d52f54ebb458..b317b94864554 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -5073,6 +5073,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cas_shutdown(cp); mutex_unlock(&cp->pm_mutex); + vfree(cp->fw_data); + pci_iounmap(pdev, cp->regs); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 32f952d930097..cbe7f184b50ed 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -598,7 +598,7 @@ struct wx_q_vector { char name[IFNAMSIZ + 17]; /* for dynamic allocation of rings associated with this q_vector */ - struct wx_ring ring[0] ____cacheline_internodealigned_in_smp; + struct wx_ring ring[] ____cacheline_internodealigned_in_smp; }; enum wx_isb_idx { diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c index 1e0c206d0f2e6..da2001ea1f993 100644 --- a/drivers/net/mdio/mdio-i2c.c +++ b/drivers/net/mdio/mdio-i2c.c @@ -291,7 +291,8 @@ static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd, return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs)); } -static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg) +static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int devad, + int reg) { u8 buf[4], res[6]; int bus_addr, ret; @@ -302,7 +303,7 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg) return 0xffff; buf[0] = ROLLBALL_DATA_ADDR; - buf[1] = (reg >> 16) & 0x1f; + buf[1] = devad; buf[2] = (reg >> 8) & 0xff; buf[3] = reg & 0xff; @@ -322,8 +323,8 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg) return val; } -static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg, - u16 val) +static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int devad, + int reg, u16 val) { int bus_addr, ret; u8 buf[6]; @@ -333,7 +334,7 @@ static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg, return 0; buf[0] = ROLLBALL_DATA_ADDR; - buf[1] = (reg >> 16) & 0x1f; + buf[1] = devad; buf[2] = (reg >> 8) & 0xff; buf[3] = reg & 0xff; buf[4] = val >> 8; @@ -405,8 +406,8 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c, return ERR_PTR(ret); } - mii->read = i2c_mii_read_rollball; - mii->write = i2c_mii_write_rollball; + mii->read_c45 = i2c_mii_read_rollball; + mii->write_c45 = i2c_mii_write_rollball; break; default: mii->read = i2c_mii_read_default_c22; diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c index 622c3de3f3a8b..f04dc580ffb83 100644 --- a/drivers/net/pcs/pcs-lynx.c +++ b/drivers/net/pcs/pcs-lynx.c @@ -323,6 +323,7 @@ struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio) if (!lynx) return NULL; + mdio_device_get(mdio); lynx->mdio = mdio; lynx->pcs.ops = &lynx_pcs_phylink_ops; lynx->pcs.poll = true; @@ -331,10 +332,40 @@ struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio) } EXPORT_SYMBOL(lynx_pcs_create); +struct phylink_pcs *lynx_pcs_create_mdiodev(struct mii_bus *bus, int addr) +{ + struct mdio_device *mdio; + struct phylink_pcs *pcs; + + mdio = mdio_device_create(bus, addr); + if (IS_ERR(mdio)) + return ERR_CAST(mdio); + + pcs = lynx_pcs_create(mdio); + + /* Convert failure to create the PCS to an error pointer, so this + * function has a consistent return value strategy. + */ + if (!pcs) + pcs = ERR_PTR(-ENOMEM); + + /* lynx_create() has taken a refcount on the mdiodev if it was + * successful. If lynx_create() fails, this will free the mdio + * device here. In any case, we don't need to hold our reference + * anymore, and putting it here will allow mdio_device_put() in + * lynx_destroy() to automatically free the mdio device. + */ + mdio_device_put(mdio); + + return pcs; +} +EXPORT_SYMBOL(lynx_pcs_create_mdiodev); + void lynx_pcs_destroy(struct phylink_pcs *pcs) { struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs); + mdio_device_put(lynx->mdio); kfree(lynx); } EXPORT_SYMBOL(lynx_pcs_destroy); diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index f19d48c94fe0e..1ba214429e012 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -271,15 +271,12 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs, }) static int xpcs_read_fault_c73(struct dw_xpcs *xpcs, - struct phylink_link_state *state) + struct phylink_link_state *state, + u16 pcs_stat1) { int ret; - ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1); - if (ret < 0) - return ret; - - if (ret & MDIO_STAT1_FAULT) { + if (pcs_stat1 & MDIO_STAT1_FAULT) { xpcs_warn(xpcs, state, "Link fault condition detected!\n"); return -EFAULT; } @@ -321,37 +318,6 @@ static int xpcs_read_fault_c73(struct dw_xpcs *xpcs, return 0; } -static int xpcs_read_link_c73(struct dw_xpcs *xpcs) -{ - bool link = true; - int ret; - - ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1); - if (ret < 0) - return ret; - - if (!(ret & MDIO_STAT1_LSTATUS)) - link = false; - - return link; -} - -static int xpcs_get_max_usxgmii_speed(const unsigned long *supported) -{ - int max = SPEED_UNKNOWN; - - if (phylink_test(supported, 1000baseKX_Full)) - max = SPEED_1000; - if (phylink_test(supported, 2500baseX_Full)) - max = SPEED_2500; - if (phylink_test(supported, 10000baseKX4_Full)) - max = SPEED_10000; - if (phylink_test(supported, 10000baseKR_Full)) - max = SPEED_10000; - - return max; -} - static void xpcs_config_usxgmii(struct dw_xpcs *xpcs, int speed) { int ret, speed_sel; @@ -478,16 +444,12 @@ static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs, static int xpcs_aneg_done_c73(struct dw_xpcs *xpcs, struct phylink_link_state *state, - const struct xpcs_compat *compat) + const struct xpcs_compat *compat, u16 an_stat1) { int ret; - ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1); - if (ret < 0) - return ret; - - if (ret & MDIO_AN_STAT1_COMPLETE) { - ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL1); + if (an_stat1 & MDIO_AN_STAT1_COMPLETE) { + ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_AN_LPA); if (ret < 0) return ret; @@ -504,64 +466,32 @@ static int xpcs_aneg_done_c73(struct dw_xpcs *xpcs, } static int xpcs_read_lpa_c73(struct dw_xpcs *xpcs, - struct phylink_link_state *state) + struct phylink_link_state *state, u16 an_stat1) { - int ret; - - ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1); - if (ret < 0) - return ret; + u16 lpa[3]; + int i, ret; - if (!(ret & MDIO_AN_STAT1_LPABLE)) { + if (!(an_stat1 & MDIO_AN_STAT1_LPABLE)) { phylink_clear(state->lp_advertising, Autoneg); return 0; } phylink_set(state->lp_advertising, Autoneg); - /* Clause 73 outcome */ - ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL3); - if (ret < 0) - return ret; - - if (ret & DW_C73_2500KX) - phylink_set(state->lp_advertising, 2500baseX_Full); - - ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL2); - if (ret < 0) - return ret; - - if (ret & DW_C73_1000KX) - phylink_set(state->lp_advertising, 1000baseKX_Full); - if (ret & DW_C73_10000KX4) - phylink_set(state->lp_advertising, 10000baseKX4_Full); - if (ret & DW_C73_10000KR) - phylink_set(state->lp_advertising, 10000baseKR_Full); + /* Read Clause 73 link partner advertisement */ + for (i = ARRAY_SIZE(lpa); --i >= 0; ) { + ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_AN_LPA + i); + if (ret < 0) + return ret; - ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL1); - if (ret < 0) - return ret; + lpa[i] = ret; + } - if (ret & DW_C73_PAUSE) - phylink_set(state->lp_advertising, Pause); - if (ret & DW_C73_ASYM_PAUSE) - phylink_set(state->lp_advertising, Asym_Pause); + mii_c73_mod_linkmode(state->lp_advertising, lpa); - linkmode_and(state->lp_advertising, state->lp_advertising, - state->advertising); return 0; } -static void xpcs_resolve_lpa_c73(struct dw_xpcs *xpcs, - struct phylink_link_state *state) -{ - int max_speed = xpcs_get_max_usxgmii_speed(state->lp_advertising); - - state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX; - state->speed = max_speed; - state->duplex = DUPLEX_FULL; -} - static int xpcs_get_max_xlgmii_speed(struct dw_xpcs *xpcs, struct phylink_link_state *state) { @@ -873,7 +803,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, switch (compat->an_mode) { case DW_AN_C73: - if (phylink_autoneg_inband(mode)) { + if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) { ret = xpcs_config_aneg_c73(xpcs, compat); if (ret) return ret; @@ -924,13 +854,25 @@ static int xpcs_get_state_c73(struct dw_xpcs *xpcs, const struct xpcs_compat *compat) { bool an_enabled; + int pcs_stat1; + int an_stat1; int ret; + /* The link status bit is latching-low, so it is important to + * avoid unnecessary re-reads of this register to avoid missing + * a link-down event. + */ + pcs_stat1 = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1); + if (pcs_stat1 < 0) { + state->link = false; + return pcs_stat1; + } + /* Link needs to be read first ... */ - state->link = xpcs_read_link_c73(xpcs) > 0 ? 1 : 0; + state->link = !!(pcs_stat1 & MDIO_STAT1_LSTATUS); /* ... and then we check the faults. */ - ret = xpcs_read_fault_c73(xpcs, state); + ret = xpcs_read_fault_c73(xpcs, state, pcs_stat1); if (ret) { ret = xpcs_soft_reset(xpcs, compat); if (ret) @@ -941,15 +883,38 @@ static int xpcs_get_state_c73(struct dw_xpcs *xpcs, return xpcs_do_config(xpcs, state->interface, MLO_AN_INBAND, NULL); } + /* There is no point doing anything else if the link is down. */ + if (!state->link) + return 0; + an_enabled = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, state->advertising); - if (an_enabled && xpcs_aneg_done_c73(xpcs, state, compat)) { - state->an_complete = true; - xpcs_read_lpa_c73(xpcs, state); - xpcs_resolve_lpa_c73(xpcs, state); - } else if (an_enabled) { - state->link = 0; - } else if (state->link) { + if (an_enabled) { + /* The link status bit is latching-low, so it is important to + * avoid unnecessary re-reads of this register to avoid missing + * a link-down event. + */ + an_stat1 = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1); + if (an_stat1 < 0) { + state->link = false; + return an_stat1; + } + + state->an_complete = xpcs_aneg_done_c73(xpcs, state, compat, + an_stat1); + if (!state->an_complete) { + state->link = false; + return 0; + } + + ret = xpcs_read_lpa_c73(xpcs, state, an_stat1); + if (ret < 0) { + state->link = false; + return ret; + } + + phylink_resolve_c73(state); + } else { xpcs_resolve_pma(xpcs, state); } @@ -1270,6 +1235,7 @@ struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, if (!xpcs) return ERR_PTR(-ENOMEM); + mdio_device_get(mdiodev); xpcs->mdiodev = mdiodev; xpcs_id = xpcs_get_id(xpcs); @@ -1302,6 +1268,7 @@ struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, ret = -ENODEV; out: + mdio_device_put(mdiodev); kfree(xpcs); return ERR_PTR(ret); @@ -1310,8 +1277,34 @@ EXPORT_SYMBOL_GPL(xpcs_create); void xpcs_destroy(struct dw_xpcs *xpcs) { + if (xpcs) + mdio_device_put(xpcs->mdiodev); kfree(xpcs); } EXPORT_SYMBOL_GPL(xpcs_destroy); +struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, + phy_interface_t interface) +{ + struct mdio_device *mdiodev; + struct dw_xpcs *xpcs; + + mdiodev = mdio_device_create(bus, addr); + if (IS_ERR(mdiodev)) + return ERR_CAST(mdiodev); + + xpcs = xpcs_create(mdiodev, interface); + + /* xpcs_create() has taken a refcount on the mdiodev if it was + * successful. If xpcs_create() fails, this will free the mdio + * device here. In any case, we don't need to hold our reference + * anymore, and putting it here will allow mdio_device_put() in + * xpcs_destroy() to automatically free the mdio device. + */ + mdio_device_put(mdiodev); + + return xpcs; +} +EXPORT_SYMBOL_GPL(xpcs_create_mdiodev); + MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h index 770df50323a01..68c6b5a620881 100644 --- a/drivers/net/pcs/pcs-xpcs.h +++ b/drivers/net/pcs/pcs-xpcs.h @@ -32,9 +32,6 @@ #define DW_SR_AN_ADV1 0x10 #define DW_SR_AN_ADV2 0x11 #define DW_SR_AN_ADV3 0x12 -#define DW_SR_AN_LP_ABL1 0x13 -#define DW_SR_AN_LP_ABL2 0x14 -#define DW_SR_AN_LP_ABL3 0x15 /* Clause 73 Defines */ /* AN_LP_ABL1 */ diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 93b8efc792273..059bd06a8cce4 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -243,9 +243,10 @@ config MICREL_PHY Supports the KSZ9021, VSC8201, KS8001 PHYs. config MICROCHIP_T1S_PHY - tristate "Microchip 10BASE-T1S Ethernet PHY" + tristate "Microchip 10BASE-T1S Ethernet PHYs" help - Currently supports the LAN8670, LAN8671, LAN8672 + Currently supports the LAN8670/1/2 Rev.B1 and LAN8650/1 Rev.B0 Internal + PHYs. config MICROCHIP_PHY tristate "Microchip PHYs" diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 27c57f6ab2112..5603d0a9ce960 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -1028,6 +1028,12 @@ void bcm_phy_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) } EXPORT_SYMBOL_GPL(bcm_phy_get_wol); +irqreturn_t bcm_phy_wol_isr(int irq, void *dev_id) +{ + return IRQ_HANDLED; +} +EXPORT_SYMBOL_GPL(bcm_phy_wol_isr); + MODULE_DESCRIPTION("Broadcom PHY Library"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Broadcom Corporation"); diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h index c6fed43ec9130..2f30ce0cab0e3 100644 --- a/drivers/net/phy/bcm-phy-lib.h +++ b/drivers/net/phy/bcm-phy-lib.h @@ -8,6 +8,7 @@ #include #include +#include struct ethtool_wolinfo; @@ -115,5 +116,6 @@ static inline void bcm_ptp_stop(struct bcm_ptp_private *priv) int bcm_phy_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); void bcm_phy_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); +irqreturn_t bcm_phy_wol_isr(int irq, void *dev_id); #endif /* _LINUX_BCM_PHY_LIB_H */ diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 418e6bc0e9981..822c8b01dc53c 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -927,7 +927,14 @@ static int bcm54xx_phy_probe(struct phy_device *phydev) if (!IS_ERR(wakeup_gpio)) { priv->wake_irq = gpiod_to_irq(wakeup_gpio); - ret = irq_set_irq_type(priv->wake_irq, IRQ_TYPE_LEVEL_LOW); + + /* Dummy interrupt handler which is not enabled but is provided + * in order for the interrupt descriptor to be fully set-up. + */ + ret = devm_request_irq(&phydev->mdio.dev, priv->wake_irq, + bcm_phy_wol_isr, + IRQF_TRIGGER_LOW | IRQF_NO_AUTOEN, + dev_name(&phydev->mdio.dev), phydev); if (ret) return ret; } diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index d75f526a20a49..76f5a2402fb06 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -44,6 +44,7 @@ #define DP83867_STRAP_STS1 0x006E #define DP83867_STRAP_STS2 0x006f #define DP83867_RGMIIDCTL 0x0086 +#define DP83867_DSP_FFE_CFG 0x012c #define DP83867_RXFCFG 0x0134 #define DP83867_RXFPMD1 0x0136 #define DP83867_RXFPMD2 0x0137 @@ -941,8 +942,27 @@ static int dp83867_phy_reset(struct phy_device *phydev) usleep_range(10, 20); - return phy_modify(phydev, MII_DP83867_PHYCTRL, + err = phy_modify(phydev, MII_DP83867_PHYCTRL, DP83867_PHYCR_FORCE_LINK_GOOD, 0); + if (err < 0) + return err; + + /* Configure the DSP Feedforward Equalizer Configuration register to + * improve short cable (< 1 meter) performance. This will not affect + * long cable performance. + */ + err = phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_DSP_FFE_CFG, + 0x0e81); + if (err < 0) + return err; + + err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART); + if (err < 0) + return err; + + usleep_range(10, 20); + + return 0; } static void dp83867_link_change_notify(struct phy_device *phydev) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 3f81bb8dac44d..2094d49025a77 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -637,7 +637,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev, { int ret; - if ((phydev->phy_id & MICREL_PHY_ID_MASK) != PHY_ID_KSZ8051) + if (!phy_id_compare(phydev->phy_id, PHY_ID_KSZ8051, MICREL_PHY_ID_MASK)) return 0; ret = phy_read(phydev, MII_BMSR); @@ -1566,7 +1566,7 @@ static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat) * * distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity */ - if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_KSZ9131) + if (phydev_id_compare(phydev, PHY_ID_KSZ9131)) dt = clamp(dt - 22, 0, 255); return (dt * 400) / 10; @@ -1998,7 +1998,7 @@ static __always_inline int ksz886x_cable_test_fault_length(struct phy_device *ph */ dt = FIELD_GET(data_mask, status); - if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_LAN8814) + if (phydev_id_compare(phydev, PHY_ID_LAN8814)) return ((dt - 22) * 800) / 10; else return (dt * 400) / 10; diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c index 094967b3c1117..534ca7d1b061d 100644 --- a/drivers/net/phy/microchip_t1s.c +++ b/drivers/net/phy/microchip_t1s.c @@ -1,19 +1,29 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Driver for Microchip 10BASE-T1S LAN867X PHY + * Driver for Microchip 10BASE-T1S PHYs * * Support: Microchip Phys: - * lan8670, lan8671, lan8672 + * lan8670/1/2 Rev.B1 + * lan8650/1 Rev.B0 Internal PHYs */ #include #include #include -#define PHY_ID_LAN867X 0x0007C160 +#define PHY_ID_LAN867X_REVB1 0x0007C162 +#define PHY_ID_LAN865X_REVB0 0x0007C1B3 -#define LAN867X_REG_IRQ_1_CTL 0x001C -#define LAN867X_REG_IRQ_2_CTL 0x001D +#define LAN867X_REG_STS2 0x0019 + +#define LAN867x_RESET_COMPLETE_STS BIT(11) + +#define LAN865X_REG_CFGPARAM_ADDR 0x00D8 +#define LAN865X_REG_CFGPARAM_DATA 0x00D9 +#define LAN865X_REG_CFGPARAM_CTRL 0x00DA +#define LAN865X_REG_STS2 0x0019 + +#define LAN865X_CFGPARAM_READ_ENABLE BIT(1) /* The arrays below are pulled from the following table from AN1699 * Access MMD Address Value Mask @@ -31,72 +41,219 @@ * W 0x1F 0x0099 0x7F80 ------ */ -static const int lan867x_fixup_registers[12] = { +static const u32 lan867x_revb1_fixup_registers[12] = { 0x00D0, 0x00D1, 0x0084, 0x0085, 0x008A, 0x0087, 0x0088, 0x008B, 0x0080, 0x00F1, 0x0096, 0x0099, }; -static const int lan867x_fixup_values[12] = { +static const u16 lan867x_revb1_fixup_values[12] = { 0x0002, 0x0000, 0x3380, 0x0006, 0xC000, 0x801C, 0x033F, 0x0404, 0x0600, 0x2400, 0x2000, 0x7F80, }; -static const int lan867x_fixup_masks[12] = { +static const u16 lan867x_revb1_fixup_masks[12] = { 0x0E03, 0x0300, 0xFFC0, 0x000F, 0xF800, 0x801C, 0x1FFF, 0xFFFF, 0x0600, 0x7F00, 0x2000, 0xFFFF, }; -static int lan867x_config_init(struct phy_device *phydev) +/* LAN865x Rev.B0 configuration parameters from AN1760 */ +static const u32 lan865x_revb0_fixup_registers[28] = { + 0x0091, 0x0081, 0x0043, 0x0044, + 0x0045, 0x0053, 0x0054, 0x0055, + 0x0040, 0x0050, 0x00D0, 0x00E9, + 0x00F5, 0x00F4, 0x00F8, 0x00F9, + 0x00B0, 0x00B1, 0x00B2, 0x00B3, + 0x00B4, 0x00B5, 0x00B6, 0x00B7, + 0x00B8, 0x00B9, 0x00BA, 0x00BB, +}; + +static const u16 lan865x_revb0_fixup_values[28] = { + 0x9660, 0x00C0, 0x00FF, 0xFFFF, + 0x0000, 0x00FF, 0xFFFF, 0x0000, + 0x0002, 0x0002, 0x5F21, 0x9E50, + 0x1CF8, 0xC020, 0x9B00, 0x4E53, + 0x0103, 0x0910, 0x1D26, 0x002A, + 0x0103, 0x070D, 0x1720, 0x0027, + 0x0509, 0x0E13, 0x1C25, 0x002B, +}; + +static const u16 lan865x_revb0_fixup_cfg_regs[5] = { + 0x0084, 0x008A, 0x00AD, 0x00AE, 0x00AF +}; + +/* Pulled from AN1760 describing 'indirect read' + * + * write_register(0x4, 0x00D8, addr) + * write_register(0x4, 0x00DA, 0x2) + * return (int8)(read_register(0x4, 0x00D9)) + * + * 0x4 refers to memory map selector 4, which maps to MDIO_MMD_VEND2 + */ +static int lan865x_revb0_indirect_read(struct phy_device *phydev, u16 addr) +{ + int ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, LAN865X_REG_CFGPARAM_ADDR, + addr); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, LAN865X_REG_CFGPARAM_CTRL, + LAN865X_CFGPARAM_READ_ENABLE); + if (ret) + return ret; + + return phy_read_mmd(phydev, MDIO_MMD_VEND2, LAN865X_REG_CFGPARAM_DATA); +} + +/* This is pulled straight from AN1760 from 'calculation of offset 1' & + * 'calculation of offset 2' + */ +static int lan865x_generate_cfg_offsets(struct phy_device *phydev, s8 offsets[2]) +{ + const u16 fixup_regs[2] = {0x0004, 0x0008}; + int ret; + + for (int i = 0; i < ARRAY_SIZE(fixup_regs); i++) { + ret = lan865x_revb0_indirect_read(phydev, fixup_regs[i]); + if (ret < 0) + return ret; + if (ret & BIT(4)) + offsets[i] = ret | 0xE0; + else + offsets[i] = ret; + } + + return 0; +} + +static int lan865x_read_cfg_params(struct phy_device *phydev, u16 cfg_params[]) +{ + int ret; + + for (int i = 0; i < ARRAY_SIZE(lan865x_revb0_fixup_cfg_regs); i++) { + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + lan865x_revb0_fixup_cfg_regs[i]); + if (ret < 0) + return ret; + cfg_params[i] = (u16)ret; + } + + return 0; +} + +static int lan865x_write_cfg_params(struct phy_device *phydev, u16 cfg_params[]) { - /* HW quirk: Microchip states in the application note (AN1699) for the phy - * that a set of read-modify-write (rmw) operations has to be performed - * on a set of seemingly magic registers. - * The result of these operations is just described as 'optimal performance' - * Microchip gives no explanation as to what these mmd regs do, - * in fact they are marked as reserved in the datasheet. - * It is unclear if phy_modify_mmd would be safe to use or if a write - * really has to happen to each register. - * In order to exactly conform to what is stated in the AN phy_write_mmd is - * used, which might then write the same value back as read + modified. + int ret; + + for (int i = 0; i < ARRAY_SIZE(lan865x_revb0_fixup_cfg_regs); i++) { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + lan865x_revb0_fixup_cfg_regs[i], + cfg_params[i]); + if (ret) + return ret; + } + + return 0; +} + +static int lan865x_setup_cfgparam(struct phy_device *phydev) +{ + u16 cfg_params[ARRAY_SIZE(lan865x_revb0_fixup_cfg_regs)]; + u16 cfg_results[5]; + s8 offsets[2]; + int ret; + + ret = lan865x_generate_cfg_offsets(phydev, offsets); + if (ret) + return ret; + + ret = lan865x_read_cfg_params(phydev, cfg_params); + if (ret) + return ret; + + cfg_results[0] = (cfg_params[0] & 0x000F) | + FIELD_PREP(GENMASK(15, 10), 9 + offsets[0]) | + FIELD_PREP(GENMASK(15, 4), 14 + offsets[0]); + cfg_results[1] = (cfg_params[1] & 0x03FF) | + FIELD_PREP(GENMASK(15, 10), 40 + offsets[1]); + cfg_results[2] = (cfg_params[2] & 0xC0C0) | + FIELD_PREP(GENMASK(15, 8), 5 + offsets[0]) | + (9 + offsets[0]); + cfg_results[3] = (cfg_params[3] & 0xC0C0) | + FIELD_PREP(GENMASK(15, 8), 9 + offsets[0]) | + (14 + offsets[0]); + cfg_results[4] = (cfg_params[4] & 0xC0C0) | + FIELD_PREP(GENMASK(15, 8), 17 + offsets[0]) | + (22 + offsets[0]); + + return lan865x_write_cfg_params(phydev, cfg_results); +} + +static int lan865x_revb0_config_init(struct phy_device *phydev) +{ + int ret; + + /* Reference to AN1760 + * https://ww1.microchip.com/downloads/aemDocuments/documents/AIS/ProductDocuments/SupportingCollateral/AN-LAN8650-1-Configuration-60001760.pdf + */ + for (int i = 0; i < ARRAY_SIZE(lan865x_revb0_fixup_registers); i++) { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + lan865x_revb0_fixup_registers[i], + lan865x_revb0_fixup_values[i]); + if (ret) + return ret; + } + /* Function to calculate and write the configuration parameters in the + * 0x0084, 0x008A, 0x00AD, 0x00AE and 0x00AF registers (from AN1760) */ + return lan865x_setup_cfgparam(phydev); +} - int reg_value; +static int lan867x_revb1_config_init(struct phy_device *phydev) +{ int err; - int reg; - /* Read-Modified Write Pseudocode (from AN1699) - * current_val = read_register(mmd, addr) // Read current register value - * new_val = current_val AND (NOT mask) // Clear bit fields to be written - * new_val = new_val OR value // Set bits - * write_register(mmd, addr, new_val) // Write back updated register value + /* The chip completes a reset in 3us, we might get here earlier than + * that, as an added margin we'll conditionally sleep 5us. */ - for (int i = 0; i < ARRAY_SIZE(lan867x_fixup_registers); i++) { - reg = lan867x_fixup_registers[i]; - reg_value = phy_read_mmd(phydev, MDIO_MMD_VEND2, reg); - reg_value &= ~lan867x_fixup_masks[i]; - reg_value |= lan867x_fixup_values[i]; - err = phy_write_mmd(phydev, MDIO_MMD_VEND2, reg, reg_value); - if (err != 0) + err = phy_read_mmd(phydev, MDIO_MMD_VEND2, LAN867X_REG_STS2); + if (err < 0) + return err; + + if (!(err & LAN867x_RESET_COMPLETE_STS)) { + udelay(5); + err = phy_read_mmd(phydev, MDIO_MMD_VEND2, LAN867X_REG_STS2); + if (err < 0) return err; + if (!(err & LAN867x_RESET_COMPLETE_STS)) { + phydev_err(phydev, "PHY reset failed\n"); + return -ENODEV; + } } - /* None of the interrupts in the lan867x phy seem relevant. - * Other phys inspect the link status and call phy_trigger_machine - * in the interrupt handler. - * This phy does not support link status, and thus has no interrupt - * for it either. - * So we'll just disable all interrupts on the chip. + /* Reference to AN1699 + * https://ww1.microchip.com/downloads/aemDocuments/documents/AIS/ProductDocuments/SupportingCollateral/AN-LAN8670-1-2-config-60001699.pdf + * AN1699 says Read, Modify, Write, but the Write is not required if the + * register already has the required value. So it is safe to use + * phy_modify_mmd here. */ - err = phy_write_mmd(phydev, MDIO_MMD_VEND2, LAN867X_REG_IRQ_1_CTL, 0xFFFF); - if (err != 0) - return err; - return phy_write_mmd(phydev, MDIO_MMD_VEND2, LAN867X_REG_IRQ_2_CTL, 0xFFFF); + for (int i = 0; i < ARRAY_SIZE(lan867x_revb1_fixup_registers); i++) { + err = phy_modify_mmd(phydev, MDIO_MMD_VEND2, + lan867x_revb1_fixup_registers[i], + lan867x_revb1_fixup_masks[i], + lan867x_revb1_fixup_values[i]); + if (err) + return err; + } + + return 0; } -static int lan867x_read_status(struct phy_device *phydev) +static int lan86xx_read_status(struct phy_device *phydev) { /* The phy has some limitations, namely: * - always reports link up @@ -111,28 +268,39 @@ static int lan867x_read_status(struct phy_device *phydev) return 0; } -static struct phy_driver lan867x_driver[] = { +static struct phy_driver microchip_t1s_driver[] = { { - PHY_ID_MATCH_MODEL(PHY_ID_LAN867X), - .name = "LAN867X", + PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1), + .name = "LAN867X Rev.B1", .features = PHY_BASIC_T1S_P2MP_FEATURES, - .config_init = lan867x_config_init, - .read_status = lan867x_read_status, + .config_init = lan867x_revb1_config_init, + .read_status = lan86xx_read_status, .get_plca_cfg = genphy_c45_plca_get_cfg, .set_plca_cfg = genphy_c45_plca_set_cfg, .get_plca_status = genphy_c45_plca_get_status, - } + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB0), + .name = "LAN865X Rev.B0 Internal Phy", + .features = PHY_BASIC_T1S_P2MP_FEATURES, + .config_init = lan865x_revb0_config_init, + .read_status = lan86xx_read_status, + .get_plca_cfg = genphy_c45_plca_get_cfg, + .set_plca_cfg = genphy_c45_plca_set_cfg, + .get_plca_status = genphy_c45_plca_get_status, + }, }; -module_phy_driver(lan867x_driver); +module_phy_driver(microchip_t1s_driver); static struct mdio_device_id __maybe_unused tbl[] = { - { PHY_ID_MATCH_MODEL(PHY_ID_LAN867X) }, + { PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1) }, + { PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB0) }, { } }; MODULE_DEVICE_TABLE(mdio, tbl); -MODULE_DESCRIPTION("Microchip 10BASE-T1S lan867x Phy driver"); +MODULE_DESCRIPTION("Microchip 10BASE-T1S PHYs driver"); MODULE_AUTHOR("Ramón Nordin Rodriguez"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index a50235fdf7d99..defe5cc6d4fce 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -179,6 +179,7 @@ enum rgmii_clock_delay { #define VSC8502_RGMII_CNTL 20 #define VSC8502_RGMII_RX_DELAY_MASK 0x0070 #define VSC8502_RGMII_TX_DELAY_MASK 0x0007 +#define VSC8502_RGMII_RX_CLK_DISABLE 0x0800 #define MSCC_PHY_WOL_LOWER_MAC_ADDR 21 #define MSCC_PHY_WOL_MID_MAC_ADDR 22 @@ -276,6 +277,7 @@ enum rgmii_clock_delay { /* Microsemi PHY ID's * Code assumes lowest nibble is 0 */ +#define PHY_ID_VSC8501 0x00070530 #define PHY_ID_VSC8502 0x00070630 #define PHY_ID_VSC8504 0x000704c0 #define PHY_ID_VSC8514 0x00070670 diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 62bf99e45af16..28df8a2e4230f 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -519,16 +519,27 @@ static int vsc85xx_mac_if_set(struct phy_device *phydev, * * 2.0 ns (which causes the data to be sampled at exactly half way between * clock transitions at 1000 Mbps) if delays should be enabled */ -static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl, - u16 rgmii_rx_delay_mask, - u16 rgmii_tx_delay_mask) +static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl, + u16 rgmii_rx_delay_mask, + u16 rgmii_tx_delay_mask) { u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1; u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1; u16 reg_val = 0; - int rc; + u16 mask = 0; + int rc = 0; - mutex_lock(&phydev->lock); + /* For traffic to pass, the VSC8502 family needs the RX_CLK disable bit + * to be unset for all PHY modes, so do that as part of the paged + * register modification. + * For some family members (like VSC8530/31/40/41) this bit is reserved + * and read-only, and the RX clock is enabled by default. + */ + if (rgmii_cntl == VSC8502_RGMII_CNTL) + mask |= VSC8502_RGMII_RX_CLK_DISABLE; + + if (phy_interface_is_rgmii(phydev)) + mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID || phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) @@ -537,31 +548,20 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl, phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_tx_delay_pos; - rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2, - rgmii_cntl, - rgmii_rx_delay_mask | rgmii_tx_delay_mask, - reg_val); - - mutex_unlock(&phydev->lock); + if (mask) + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2, + rgmii_cntl, mask, reg_val); return rc; } static int vsc85xx_default_config(struct phy_device *phydev) { - int rc; - phydev->mdix_ctrl = ETH_TP_MDI_AUTO; - if (phy_interface_mode_is_rgmii(phydev->interface)) { - rc = vsc85xx_rgmii_set_skews(phydev, VSC8502_RGMII_CNTL, - VSC8502_RGMII_RX_DELAY_MASK, - VSC8502_RGMII_TX_DELAY_MASK); - if (rc) - return rc; - } - - return 0; + return vsc85xx_update_rgmii_cntl(phydev, VSC8502_RGMII_CNTL, + VSC8502_RGMII_RX_DELAY_MASK, + VSC8502_RGMII_TX_DELAY_MASK); } static int vsc85xx_get_tunable(struct phy_device *phydev, @@ -1758,13 +1758,11 @@ static int vsc8584_config_init(struct phy_device *phydev) if (ret) return ret; - if (phy_interface_is_rgmii(phydev)) { - ret = vsc85xx_rgmii_set_skews(phydev, VSC8572_RGMII_CNTL, - VSC8572_RGMII_RX_DELAY_MASK, - VSC8572_RGMII_TX_DELAY_MASK); - if (ret) - return ret; - } + ret = vsc85xx_update_rgmii_cntl(phydev, VSC8572_RGMII_CNTL, + VSC8572_RGMII_RX_DELAY_MASK, + VSC8572_RGMII_TX_DELAY_MASK); + if (ret) + return ret; ret = genphy_soft_reset(phydev); if (ret) @@ -2316,6 +2314,30 @@ static int vsc85xx_probe(struct phy_device *phydev) /* Microsemi VSC85xx PHYs */ static struct phy_driver vsc85xx_driver[] = { +{ + .phy_id = PHY_ID_VSC8501, + .name = "Microsemi GE VSC8501 SyncE", + .phy_id_mask = 0xfffffff0, + /* PHY_BASIC_FEATURES */ + .soft_reset = &genphy_soft_reset, + .config_init = &vsc85xx_config_init, + .config_aneg = &vsc85xx_config_aneg, + .read_status = &vsc85xx_read_status, + .handle_interrupt = vsc85xx_handle_interrupt, + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, + .probe = &vsc85xx_probe, + .set_wol = &vsc85xx_wol_set, + .get_wol = &vsc85xx_wol_get, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, +}, { .phy_id = PHY_ID_VSC8502, .name = "Microsemi GE VSC8502 SyncE", @@ -2656,6 +2678,8 @@ static struct phy_driver vsc85xx_driver[] = { module_phy_driver(vsc85xx_driver); static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = { + { PHY_ID_VSC8501, 0xfffffff0, }, + { PHY_ID_VSC8502, 0xfffffff0, }, { PHY_ID_VSC8504, 0xfffffff0, }, { PHY_ID_VSC8514, 0xfffffff0, }, { PHY_ID_VSC8530, 0xfffffff0, }, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 0c0df38cd1ab8..bdf00b2b2c1dc 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -52,6 +52,7 @@ static const char *phy_state_to_str(enum phy_state st) PHY_STATE_STR(NOLINK) PHY_STATE_STR(CABLETEST) PHY_STATE_STR(HALTED) + PHY_STATE_STR(ERROR) } return NULL; @@ -1184,7 +1185,7 @@ void phy_stop_machine(struct phy_device *phydev) static void phy_process_error(struct phy_device *phydev) { mutex_lock(&phydev->lock); - phydev->state = PHY_HALTED; + phydev->state = PHY_ERROR; mutex_unlock(&phydev->lock); phy_trigger_machine(phydev); @@ -1198,10 +1199,10 @@ static void phy_error_precise(struct phy_device *phydev, } /** - * phy_error - enter HALTED state for this PHY device + * phy_error - enter ERROR state for this PHY device * @phydev: target phy_device struct * - * Moves the PHY to the HALTED state in response to a read + * Moves the PHY to the ERROR state in response to a read * or write error, and tells the controller the link is down. * Must not be called from interrupt context, or while the * phydev->lock is held. @@ -1326,7 +1327,8 @@ void phy_stop(struct phy_device *phydev) struct net_device *dev = phydev->attached_dev; enum phy_state old_state; - if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) { + if (!phy_is_started(phydev) && phydev->state != PHY_DOWN && + phydev->state != PHY_ERROR) { WARN(1, "called from state %s\n", phy_state_to_str(phydev->state)); return; @@ -1443,6 +1445,7 @@ void phy_state_machine(struct work_struct *work) } break; case PHY_HALTED: + case PHY_ERROR: if (phydev->link) { phydev->link = 0; phy_link_down(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8852b0c531146..2cad9cc3f6b80 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -454,8 +454,7 @@ int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask) fixup = list_entry(pos, struct phy_fixup, list); if ((!strcmp(fixup->bus_id, bus_id)) && - ((fixup->phy_uid & phy_uid_mask) == - (phy_uid & phy_uid_mask))) { + phy_id_compare(fixup->phy_uid, phy_uid, phy_uid_mask)) { list_del(&fixup->list); kfree(fixup); ret = 0; @@ -491,8 +490,8 @@ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup) if (strcmp(fixup->bus_id, PHY_ANY_ID) != 0) return 0; - if ((fixup->phy_uid & fixup->phy_uid_mask) != - (phydev->phy_id & fixup->phy_uid_mask)) + if (!phy_id_compare(phydev->phy_id, fixup->phy_uid, + fixup->phy_uid_mask)) if (fixup->phy_uid != PHY_ANY_UID) return 0; @@ -539,15 +538,14 @@ static int phy_bus_match(struct device *dev, struct device_driver *drv) if (phydev->c45_ids.device_ids[i] == 0xffffffff) continue; - if ((phydrv->phy_id & phydrv->phy_id_mask) == - (phydev->c45_ids.device_ids[i] & - phydrv->phy_id_mask)) + if (phy_id_compare(phydev->c45_ids.device_ids[i], + phydrv->phy_id, phydrv->phy_id_mask)) return 1; } return 0; } else { - return (phydrv->phy_id & phydrv->phy_id_mask) == - (phydev->phy_id & phydrv->phy_id_mask); + return phy_id_compare(phydev->phy_id, phydrv->phy_id, + phydrv->phy_id_mask); } } diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index cf53096047e61..508434fd4da8a 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -156,6 +156,23 @@ static const char *phylink_an_mode_str(unsigned int mode) return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown"; } +static unsigned int phylink_interface_signal_rate(phy_interface_t interface) +{ + switch (interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: /* 1.25Mbd */ + return 1250; + case PHY_INTERFACE_MODE_2500BASEX: /* 3.125Mbd */ + return 3125; + case PHY_INTERFACE_MODE_5GBASER: /* 5.15625Mbd */ + return 5156; + case PHY_INTERFACE_MODE_10GBASER: /* 10.3125Mbd */ + return 10313; + default: + return 0; + } +} + /** * phylink_interface_max_speed() - get the maximum speed of a phy interface * @interface: phy interface mode defined by &typedef phy_interface_t @@ -695,14 +712,11 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported, { const unsigned long *interfaces = pl->config->supported_interfaces; - if (!phy_interface_empty(interfaces)) { - if (state->interface == PHY_INTERFACE_MODE_NA) - return phylink_validate_mask(pl, supported, state, - interfaces); + if (state->interface == PHY_INTERFACE_MODE_NA) + return phylink_validate_mask(pl, supported, state, interfaces); - if (!test_bit(state->interface, interfaces)) - return -EINVAL; - } + if (!test_bit(state->interface, interfaces)) + return -EINVAL; return phylink_validate_mac_and_pcs(pl, supported, state); } @@ -963,11 +977,10 @@ static void phylink_apply_manual_flow(struct phylink *pl, state->pause = pl->link_config.pause; } -static void phylink_resolve_flow(struct phylink_link_state *state) +static void phylink_resolve_an_pause(struct phylink_link_state *state) { bool tx_pause, rx_pause; - state->pause = MLO_PAUSE_NONE; if (state->duplex == DUPLEX_FULL) { linkmode_resolve_pause(state->advertising, state->lp_advertising, @@ -979,6 +992,25 @@ static void phylink_resolve_flow(struct phylink_link_state *state) } } +static int phylink_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + const struct phylink_link_state *state, + bool permit_pause_to_mac) +{ + if (!pcs) + return 0; + + return pcs->ops->pcs_config(pcs, mode, state->interface, + state->advertising, permit_pause_to_mac); +} + +static void phylink_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, + int duplex) +{ + if (pcs && pcs->ops->pcs_link_up) + pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex); +} + static void phylink_pcs_poll_stop(struct phylink *pl) { if (pl->cfg_link_an_mode == MLO_AN_INBAND) @@ -1025,6 +1057,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, { struct phylink_pcs *pcs = NULL; bool pcs_changed = false; + unsigned int rate_kbd; int err; phylink_dbg(pl, "major config %s\n", phy_modes(state->interface)); @@ -1061,18 +1094,15 @@ static void phylink_major_config(struct phylink *pl, bool restart, phylink_mac_config(pl, state); - if (pl->pcs) { - err = pl->pcs->ops->pcs_config(pl->pcs, pl->cur_link_an_mode, - state->interface, - state->advertising, - !!(pl->link_config.pause & - MLO_PAUSE_AN)); - if (err < 0) - phylink_err(pl, "pcs_config failed: %pe\n", - ERR_PTR(err)); - if (err > 0) - restart = true; - } + err = phylink_pcs_config(pl->pcs, pl->cur_link_an_mode, state, + !!(pl->link_config.pause & + MLO_PAUSE_AN)); + if (err < 0) + phylink_err(pl, "pcs_config failed: %pe\n", + ERR_PTR(err)); + else if (err > 0) + restart = true; + if (restart) phylink_mac_pcs_an_restart(pl); @@ -1084,6 +1114,12 @@ static void phylink_major_config(struct phylink *pl, bool restart, ERR_PTR(err)); } + if (pl->sfp_bus) { + rate_kbd = phylink_interface_signal_rate(state->interface); + if (rate_kbd) + sfp_upstream_set_signal_rate(pl->sfp_bus, rate_kbd); + } + phylink_pcs_poll_start(pl); } @@ -1117,11 +1153,9 @@ static int phylink_change_inband_advert(struct phylink *pl) * restart negotiation if the pcs_config() helper indicates that * the programmed advertisement has changed. */ - ret = pl->pcs->ops->pcs_config(pl->pcs, pl->cur_link_an_mode, - pl->link_config.interface, - pl->link_config.advertising, - !!(pl->link_config.pause & - MLO_PAUSE_AN)); + ret = phylink_pcs_config(pl->pcs, pl->cur_link_an_mode, + &pl->link_config, + !!(pl->link_config.pause & MLO_PAUSE_AN)); if (ret < 0) return ret; @@ -1172,7 +1206,8 @@ static void phylink_get_fixed_state(struct phylink *pl, else if (pl->link_gpio) state->link = !!gpiod_get_value_cansleep(pl->link_gpio); - phylink_resolve_flow(state); + state->pause = MLO_PAUSE_NONE; + phylink_resolve_an_pause(state); } static void phylink_mac_initial_config(struct phylink *pl, bool force_restart) @@ -1252,9 +1287,8 @@ static void phylink_link_up(struct phylink *pl, pl->cur_interface = link_state.interface; - if (pl->pcs && pl->pcs->ops->pcs_link_up) - pl->pcs->ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode, - pl->cur_interface, speed, duplex); + phylink_pcs_link_up(pl->pcs, pl->cur_link_an_mode, pl->cur_interface, + speed, duplex); pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode, pl->cur_interface, speed, duplex, @@ -1489,19 +1523,18 @@ struct phylink *phylink_create(struct phylink_config *config, struct phylink *pl; int ret; - if (mac_ops->mac_select_pcs && - mac_ops->mac_select_pcs(config, PHY_INTERFACE_MODE_NA) != - ERR_PTR(-EOPNOTSUPP)) - using_mac_select_pcs = true; - /* Validate the supplied configuration */ - if (using_mac_select_pcs && - phy_interface_empty(config->supported_interfaces)) { + if (phy_interface_empty(config->supported_interfaces)) { dev_err(config->dev, - "phylink: error: empty supported_interfaces but mac_select_pcs() method present\n"); + "phylink: error: empty supported_interfaces\n"); return ERR_PTR(-EINVAL); } + if (mac_ops->mac_select_pcs && + mac_ops->mac_select_pcs(config, PHY_INTERFACE_MODE_NA) != + ERR_PTR(-EOPNOTSUPP)) + using_mac_select_pcs = true; + pl = kzalloc(sizeof(*pl), GFP_KERNEL); if (!pl) return ERR_PTR(-ENOMEM); @@ -2226,6 +2259,10 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, ASSERT_RTNL(); + /* Mask out unsupported advertisements */ + linkmode_and(config.advertising, kset->link_modes.advertising, + pl->supported); + if (pl->phydev) { /* We can rely on phylib for this update; we also do not need * to update the pl->link_config settings: @@ -2250,10 +2287,6 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, config = pl->link_config; - /* Mask out unsupported advertisements */ - linkmode_and(config.advertising, kset->link_modes.advertising, - pl->supported); - /* FIXME: should we reject autoneg if phy/mac does not support it? */ switch (kset->base.autoneg) { case AUTONEG_DISABLE: @@ -3127,8 +3160,8 @@ static void phylink_sfp_link_up(void *upstream) */ static bool phylink_phy_no_inband(struct phy_device *phy) { - return phy->is_c45 && - (phy->c45_ids.device_ids[1] & 0xfffffff0) == 0xae025150; + return phy->is_c45 && phy_id_compare(phy->c45_ids.device_ids[1], + 0xae025150, 0xfffffff0); } static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) @@ -3192,10 +3225,48 @@ static const struct sfp_upstream_ops sfp_phylink_ops = { /* Helpers for MAC drivers */ +static struct { + int bit; + int speed; +} phylink_c73_priority_resolution[] = { + { ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, SPEED_100000 }, + { ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, SPEED_100000 }, + /* 100GBASE-KP4 and 100GBASE-CR10 not supported */ + { ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, SPEED_40000 }, + { ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, SPEED_40000 }, + { ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, SPEED_10000 }, + { ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, SPEED_10000 }, + /* 5GBASE-KR not supported */ + { ETHTOOL_LINK_MODE_2500baseX_Full_BIT, SPEED_2500 }, + { ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, SPEED_1000 }, +}; + +void phylink_resolve_c73(struct phylink_link_state *state) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phylink_c73_priority_resolution); i++) { + int bit = phylink_c73_priority_resolution[i].bit; + if (linkmode_test_bit(bit, state->advertising) && + linkmode_test_bit(bit, state->lp_advertising)) + break; + } + + if (i < ARRAY_SIZE(phylink_c73_priority_resolution)) { + state->speed = phylink_c73_priority_resolution[i].speed; + state->duplex = DUPLEX_FULL; + } else { + /* negotiation failure */ + state->link = false; + } + + phylink_resolve_an_pause(state); +} +EXPORT_SYMBOL_GPL(phylink_resolve_c73); + static void phylink_decode_c37_word(struct phylink_link_state *state, uint16_t config_reg, int speed) { - bool tx_pause, rx_pause; int fd_bit; if (speed == SPEED_2500) @@ -3214,13 +3285,7 @@ static void phylink_decode_c37_word(struct phylink_link_state *state, state->link = false; } - linkmode_resolve_pause(state->advertising, state->lp_advertising, - &tx_pause, &rx_pause); - - if (tx_pause) - state->pause |= MLO_PAUSE_TX; - if (rx_pause) - state->pause |= MLO_PAUSE_RX; + phylink_resolve_an_pause(state); } static void phylink_decode_sgmii_word(struct phylink_link_state *state, diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 9372e5a4cadcf..e8dd47bffe435 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -575,6 +575,26 @@ static void sfp_upstream_clear(struct sfp_bus *bus) bus->upstream = NULL; } +/** + * sfp_upstream_set_signal_rate() - set data signalling rate + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * @rate_kbd: signalling rate in units of 1000 baud + * + * Configure the rate select settings on the SFP module for the signalling + * rate (not the same as the data rate). + * + * Locks that may be held: + * Phylink's state_mutex + * rtnl lock + * SFP's sm_mutex + */ +void sfp_upstream_set_signal_rate(struct sfp_bus *bus, unsigned int rate_kbd) +{ + if (bus->registered) + bus->socket_ops->set_signal_rate(bus->sfp, rate_kbd); +} +EXPORT_SYMBOL_GPL(sfp_upstream_set_signal_rate); + /** * sfp_bus_find_fwnode() - parse and locate the SFP bus from fwnode * @fwnode: firmware node for the parent device (MAC or PHY) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 89636dc71e483..d855a18308d78 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -24,14 +24,18 @@ enum { GPIO_LOS, GPIO_TX_FAULT, GPIO_TX_DISABLE, - GPIO_RATE_SELECT, + GPIO_RS0, + GPIO_RS1, GPIO_MAX, SFP_F_PRESENT = BIT(GPIO_MODDEF0), SFP_F_LOS = BIT(GPIO_LOS), SFP_F_TX_FAULT = BIT(GPIO_TX_FAULT), SFP_F_TX_DISABLE = BIT(GPIO_TX_DISABLE), - SFP_F_RATE_SELECT = BIT(GPIO_RATE_SELECT), + SFP_F_RS0 = BIT(GPIO_RS0), + SFP_F_RS1 = BIT(GPIO_RS1), + + SFP_F_OUTPUTS = SFP_F_TX_DISABLE | SFP_F_RS0 | SFP_F_RS1, SFP_E_INSERT = 0, SFP_E_REMOVE, @@ -148,6 +152,7 @@ static const char *gpio_names[] = { "tx-fault", "tx-disable", "rate-select0", + "rate-select1", }; static const enum gpiod_flags gpio_flags[] = { @@ -156,6 +161,7 @@ static const enum gpiod_flags gpio_flags[] = { GPIOD_IN, GPIOD_ASIS, GPIOD_ASIS, + GPIOD_ASIS, }; /* t_start_up (SFF-8431) or t_init (SFF-8472) is the time required for a @@ -164,7 +170,6 @@ static const enum gpiod_flags gpio_flags[] = { * on board (for a copper SFP) time to initialise. */ #define T_WAIT msecs_to_jiffies(50) -#define T_WAIT_ROLLBALL msecs_to_jiffies(25000) #define T_START_UP msecs_to_jiffies(300) #define T_START_UP_BAD_GPON msecs_to_jiffies(60000) @@ -242,10 +247,18 @@ struct sfp { bool need_poll; + /* Access rules: + * state_hw_drive: st_mutex held + * state_hw_mask: st_mutex held + * state_soft_mask: st_mutex held + * state: st_mutex held unless reading input bits + */ struct mutex st_mutex; /* Protects state */ + unsigned int state_hw_drive; unsigned int state_hw_mask; unsigned int state_soft_mask; unsigned int state; + struct delayed_work poll; struct delayed_work timeout; struct mutex sm_mutex; /* Protects state machine */ @@ -262,6 +275,10 @@ struct sfp { unsigned int module_t_start_up; unsigned int module_t_wait; + unsigned int rate_kbd; + unsigned int rs_threshold_kbd; + unsigned int rs_state_mask; + bool have_a2; bool tx_fault_ignore; @@ -312,7 +329,7 @@ static bool sfp_module_supported(const struct sfp_eeprom_id *id) static const struct sff_data sfp_data = { .gpios = SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT | - SFP_F_TX_DISABLE | SFP_F_RATE_SELECT, + SFP_F_TX_DISABLE | SFP_F_RS0 | SFP_F_RS1, .module_supported = sfp_module_supported, }; @@ -333,6 +350,27 @@ static void sfp_fixup_ignore_tx_fault(struct sfp *sfp) sfp->tx_fault_ignore = true; } +// For 10GBASE-T short-reach modules +static void sfp_fixup_10gbaset_30m(struct sfp *sfp) +{ + sfp->id.base.connector = SFF8024_CONNECTOR_RJ45; + sfp->id.base.extended_cc = SFF8024_ECC_10GBASE_T_SR; +} + +static void sfp_fixup_rollball_proto(struct sfp *sfp, unsigned int secs) +{ + sfp->mdio_protocol = MDIO_I2C_ROLLBALL; + sfp->module_t_wait = msecs_to_jiffies(secs * 1000); +} + +static void sfp_fixup_fs_10gt(struct sfp *sfp) +{ + sfp_fixup_10gbaset_30m(sfp); + + // These SFPs need 4 seconds before the PHY can be accessed + sfp_fixup_rollball_proto(sfp, 4); +} + static void sfp_fixup_halny_gsfp(struct sfp *sfp) { /* Ignore the TX_FAULT and LOS signals on this module. @@ -344,8 +382,8 @@ static void sfp_fixup_halny_gsfp(struct sfp *sfp) static void sfp_fixup_rollball(struct sfp *sfp) { - sfp->mdio_protocol = MDIO_I2C_ROLLBALL; - sfp->module_t_wait = T_WAIT_ROLLBALL; + // Rollball SFPs need 25 seconds before the PHY can be accessed + sfp_fixup_rollball_proto(sfp, 25); } static void sfp_fixup_rollball_cc(struct sfp *sfp) @@ -410,6 +448,10 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex, sfp_fixup_long_startup), + // Fiberstore SFP-10G-T doesn't identify as copper, and uses the + // Rollball protocol to talk to the PHY. + SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt), + SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp), // HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports @@ -427,6 +469,11 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK_M("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant), + // Walsun HXSX-ATR[CI]-1 don't identify as copper, and use the + // Rollball protocol to talk to the PHY. + SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt), + SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt), + SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc), SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g), SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc), @@ -500,20 +547,37 @@ static unsigned int sff_gpio_get_state(struct sfp *sfp) static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state) { - if (state & SFP_F_PRESENT) { - /* If the module is present, drive the signals */ - if (sfp->gpio[GPIO_TX_DISABLE]) + unsigned int drive; + + if (state & SFP_F_PRESENT) + /* If the module is present, drive the requested signals */ + drive = sfp->state_hw_drive; + else + /* Otherwise, let them float to the pull-ups */ + drive = 0; + + if (sfp->gpio[GPIO_TX_DISABLE]) { + if (drive & SFP_F_TX_DISABLE) gpiod_direction_output(sfp->gpio[GPIO_TX_DISABLE], state & SFP_F_TX_DISABLE); - if (state & SFP_F_RATE_SELECT) - gpiod_direction_output(sfp->gpio[GPIO_RATE_SELECT], - state & SFP_F_RATE_SELECT); - } else { - /* Otherwise, let them float to the pull-ups */ - if (sfp->gpio[GPIO_TX_DISABLE]) + else gpiod_direction_input(sfp->gpio[GPIO_TX_DISABLE]); - if (state & SFP_F_RATE_SELECT) - gpiod_direction_input(sfp->gpio[GPIO_RATE_SELECT]); + } + + if (sfp->gpio[GPIO_RS0]) { + if (drive & SFP_F_RS0) + gpiod_direction_output(sfp->gpio[GPIO_RS0], + state & SFP_F_RS0); + else + gpiod_direction_input(sfp->gpio[GPIO_RS0]); + } + + if (sfp->gpio[GPIO_RS1]) { + if (drive & SFP_F_RS1) + gpiod_direction_output(sfp->gpio[GPIO_RS1], + state & SFP_F_RS1); + else + gpiod_direction_input(sfp->gpio[GPIO_RS1]); } } @@ -675,16 +739,33 @@ static unsigned int sfp_soft_get_state(struct sfp *sfp) return state & sfp->state_soft_mask; } -static void sfp_soft_set_state(struct sfp *sfp, unsigned int state) +static void sfp_soft_set_state(struct sfp *sfp, unsigned int state, + unsigned int soft) { - u8 mask = SFP_STATUS_TX_DISABLE_FORCE; + u8 mask = 0; u8 val = 0; + if (soft & SFP_F_TX_DISABLE) + mask |= SFP_STATUS_TX_DISABLE_FORCE; if (state & SFP_F_TX_DISABLE) val |= SFP_STATUS_TX_DISABLE_FORCE; + if (soft & SFP_F_RS0) + mask |= SFP_STATUS_RS0_SELECT; + if (state & SFP_F_RS0) + val |= SFP_STATUS_RS0_SELECT; + + if (mask) + sfp_modify_u8(sfp, true, SFP_STATUS, mask, val); - sfp_modify_u8(sfp, true, SFP_STATUS, mask, val); + val = mask = 0; + if (soft & SFP_F_RS1) + mask |= SFP_EXT_STATUS_RS1_SELECT; + if (state & SFP_F_RS1) + val |= SFP_EXT_STATUS_RS1_SELECT; + + if (mask) + sfp_modify_u8(sfp, true, SFP_EXT_STATUS, mask, val); } static void sfp_soft_start_poll(struct sfp *sfp) @@ -692,27 +773,35 @@ static void sfp_soft_start_poll(struct sfp *sfp) const struct sfp_eeprom_id *id = &sfp->id; unsigned int mask = 0; - sfp->state_soft_mask = 0; if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE) mask |= SFP_F_TX_DISABLE; if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT) mask |= SFP_F_TX_FAULT; if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS) mask |= SFP_F_LOS; + if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RATE_SELECT) + mask |= sfp->rs_state_mask; + mutex_lock(&sfp->st_mutex); // Poll the soft state for hardware pins we want to ignore sfp->state_soft_mask = ~sfp->state_hw_mask & mask; if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) && !sfp->need_poll) mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); + mutex_unlock(&sfp->st_mutex); } static void sfp_soft_stop_poll(struct sfp *sfp) { + mutex_lock(&sfp->st_mutex); sfp->state_soft_mask = 0; + mutex_unlock(&sfp->st_mutex); } +/* sfp_get_state() - must be called with st_mutex held, or in the + * initialisation path. + */ static unsigned int sfp_get_state(struct sfp *sfp) { unsigned int soft = sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT); @@ -725,13 +814,26 @@ static unsigned int sfp_get_state(struct sfp *sfp) return state; } +/* sfp_set_state() - must be called with st_mutex held, or in the + * initialisation path. + */ static void sfp_set_state(struct sfp *sfp, unsigned int state) { + unsigned int soft; + sfp->set_state(sfp, state); - if (state & SFP_F_PRESENT && - sfp->state_soft_mask & SFP_F_TX_DISABLE) - sfp_soft_set_state(sfp, state); + soft = sfp->state_soft_mask & SFP_F_OUTPUTS; + if (state & SFP_F_PRESENT && soft) + sfp_soft_set_state(sfp, state, soft); +} + +static void sfp_mod_state(struct sfp *sfp, unsigned int mask, unsigned int set) +{ + mutex_lock(&sfp->st_mutex); + sfp->state = (sfp->state & ~mask) | set; + sfp_set_state(sfp, sfp->state); + mutex_unlock(&sfp->st_mutex); } static unsigned int sfp_check(void *buf, size_t len) @@ -1537,16 +1639,14 @@ static void sfp_module_tx_disable(struct sfp *sfp) { dev_dbg(sfp->dev, "tx disable %u -> %u\n", sfp->state & SFP_F_TX_DISABLE ? 1 : 0, 1); - sfp->state |= SFP_F_TX_DISABLE; - sfp_set_state(sfp, sfp->state); + sfp_mod_state(sfp, SFP_F_TX_DISABLE, SFP_F_TX_DISABLE); } static void sfp_module_tx_enable(struct sfp *sfp) { dev_dbg(sfp->dev, "tx disable %u -> %u\n", sfp->state & SFP_F_TX_DISABLE ? 1 : 0, 0); - sfp->state &= ~SFP_F_TX_DISABLE; - sfp_set_state(sfp, sfp->state); + sfp_mod_state(sfp, SFP_F_TX_DISABLE, 0); } #if IS_ENABLED(CONFIG_DEBUG_FS) @@ -1567,10 +1667,15 @@ static int sfp_debug_state_show(struct seq_file *s, void *data) sfp->sm_fault_retries); seq_printf(s, "PHY probe remaining retries: %d\n", sfp->sm_phy_retries); + seq_printf(s, "Signalling rate: %u kBd\n", sfp->rate_kbd); + seq_printf(s, "Rate select threshold: %u kBd\n", + sfp->rs_threshold_kbd); seq_printf(s, "moddef0: %d\n", !!(sfp->state & SFP_F_PRESENT)); seq_printf(s, "rx_los: %d\n", !!(sfp->state & SFP_F_LOS)); seq_printf(s, "tx_fault: %d\n", !!(sfp->state & SFP_F_TX_FAULT)); seq_printf(s, "tx_disable: %d\n", !!(sfp->state & SFP_F_TX_DISABLE)); + seq_printf(s, "rs0: %d\n", !!(sfp->state & SFP_F_RS0)); + seq_printf(s, "rs1: %d\n", !!(sfp->state & SFP_F_RS1)); return 0; } DEFINE_SHOW_ATTRIBUTE(sfp_debug_state); @@ -1599,16 +1704,18 @@ static void sfp_debugfs_exit(struct sfp *sfp) static void sfp_module_tx_fault_reset(struct sfp *sfp) { - unsigned int state = sfp->state; - - if (state & SFP_F_TX_DISABLE) - return; + unsigned int state; - sfp_set_state(sfp, state | SFP_F_TX_DISABLE); + mutex_lock(&sfp->st_mutex); + state = sfp->state; + if (!(state & SFP_F_TX_DISABLE)) { + sfp_set_state(sfp, state | SFP_F_TX_DISABLE); - udelay(T_RESET_US); + udelay(T_RESET_US); - sfp_set_state(sfp, state); + sfp_set_state(sfp, state); + } + mutex_unlock(&sfp->st_mutex); } /* SFP state machine */ @@ -1874,6 +1981,95 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable) return 0; } +static void sfp_module_parse_rate_select(struct sfp *sfp) +{ + u8 rate_id; + + sfp->rs_threshold_kbd = 0; + sfp->rs_state_mask = 0; + + if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_RATE_SELECT))) + /* No support for RateSelect */ + return; + + /* Default to INF-8074 RateSelect operation. The signalling threshold + * rate is not well specified, so always select "Full Bandwidth", but + * SFF-8079 reveals that it is understood that RS0 will be low for + * 1.0625Gb/s and high for 2.125Gb/s. Choose a value half-way between. + * This method exists prior to SFF-8472. + */ + sfp->rs_state_mask = SFP_F_RS0; + sfp->rs_threshold_kbd = 1594; + + /* Parse the rate identifier, which is complicated due to history: + * SFF-8472 rev 9.5 marks this field as reserved. + * SFF-8079 references SFF-8472 rev 9.5 and defines bit 0. SFF-8472 + * compliance is not required. + * SFF-8472 rev 10.2 defines this field using values 0..4 + * SFF-8472 rev 11.0 redefines this field with bit 0 for SFF-8079 + * and even values. + */ + rate_id = sfp->id.base.rate_id; + if (rate_id == 0) + /* Unspecified */ + return; + + /* SFF-8472 rev 10.0..10.4 did not account for SFF-8079 using bit 0, + * and allocated value 3 to SFF-8431 independent tx/rx rate select. + * Convert this to a SFF-8472 rev 11.0 rate identifier. + */ + if (sfp->id.ext.sff8472_compliance >= SFP_SFF8472_COMPLIANCE_REV10_2 && + sfp->id.ext.sff8472_compliance < SFP_SFF8472_COMPLIANCE_REV11_0 && + rate_id == 3) + rate_id = SFF_RID_8431; + + if (rate_id & SFF_RID_8079) { + /* SFF-8079 RateSelect / Application Select in conjunction with + * SFF-8472 rev 9.5. SFF-8079 defines rate_id as a bitfield + * with only bit 0 used, which takes precedence over SFF-8472. + */ + if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_APP_SELECT_SFF8079)) { + /* SFF-8079 Part 1 - rate selection between Fibre + * Channel 1.0625/2.125/4.25 Gbd modes. Note that RS0 + * is high for 2125, so we have to subtract 1 to + * include it. + */ + sfp->rs_threshold_kbd = 2125 - 1; + sfp->rs_state_mask = SFP_F_RS0; + } + return; + } + + /* SFF-8472 rev 9.5 does not define the rate identifier */ + if (sfp->id.ext.sff8472_compliance <= SFP_SFF8472_COMPLIANCE_REV9_5) + return; + + /* SFF-8472 rev 11.0 defines rate_id as a numerical value which will + * always have bit 0 clear due to SFF-8079's bitfield usage of rate_id. + */ + switch (rate_id) { + case SFF_RID_8431_RX_ONLY: + sfp->rs_threshold_kbd = 4250; + sfp->rs_state_mask = SFP_F_RS0; + break; + + case SFF_RID_8431_TX_ONLY: + sfp->rs_threshold_kbd = 4250; + sfp->rs_state_mask = SFP_F_RS1; + break; + + case SFF_RID_8431: + sfp->rs_threshold_kbd = 4250; + sfp->rs_state_mask = SFP_F_RS0 | SFP_F_RS1; + break; + + case SFF_RID_10G8G: + sfp->rs_threshold_kbd = 9000; + sfp->rs_state_mask = SFP_F_RS0 | SFP_F_RS1; + break; + } +} + /* GPON modules based on Realtek RTL8672 and RTL9601C chips (e.g. V-SOL * V2801F, CarlitoxxPro CPGOS03-0490, Ubiquiti U-Fiber Instant, ...) do * not support multibyte reads from the EEPROM. Each multi-byte read @@ -1953,6 +2149,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) /* SFP module inserted - read I2C data */ struct sfp_eeprom_id id; bool cotsworks_sfbg; + unsigned int mask; bool cotsworks; u8 check; int ret; @@ -2092,14 +2289,19 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) if (ret < 0) return ret; - /* Initialise state bits to use from hardware */ - sfp->state_hw_mask = SFP_F_PRESENT; + sfp_module_parse_rate_select(sfp); + + mask = SFP_F_PRESENT; if (sfp->gpio[GPIO_TX_DISABLE]) - sfp->state_hw_mask |= SFP_F_TX_DISABLE; + mask |= SFP_F_TX_DISABLE; if (sfp->gpio[GPIO_TX_FAULT]) - sfp->state_hw_mask |= SFP_F_TX_FAULT; + mask |= SFP_F_TX_FAULT; if (sfp->gpio[GPIO_LOS]) - sfp->state_hw_mask |= SFP_F_LOS; + mask |= SFP_F_LOS; + if (sfp->gpio[GPIO_RS0]) + mask |= SFP_F_RS0; + if (sfp->gpio[GPIO_RS1]) + mask |= SFP_F_RS1; sfp->module_t_start_up = T_START_UP; sfp->module_t_wait = T_WAIT; @@ -2117,8 +2319,17 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) sfp->mdio_protocol = MDIO_I2C_NONE; sfp->quirk = sfp_lookup_quirk(&id); + + mutex_lock(&sfp->st_mutex); + /* Initialise state bits to use from hardware */ + sfp->state_hw_mask = mask; + + /* We want to drive the rate select pins that the module is using */ + sfp->state_hw_drive |= sfp->rs_state_mask; + if (sfp->quirk && sfp->quirk->fixup) sfp->quirk->fixup(sfp); + mutex_unlock(&sfp->st_mutex); return 0; } @@ -2132,6 +2343,7 @@ static void sfp_sm_mod_remove(struct sfp *sfp) memset(&sfp->id, 0, sizeof(sfp->id)); sfp->module_power_mW = 0; + sfp->state_hw_drive = SFP_F_TX_DISABLE; sfp->have_a2 = false; dev_info(sfp->dev, "module removed\n"); @@ -2452,10 +2664,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event) } } -static void sfp_sm_event(struct sfp *sfp, unsigned int event) +static void __sfp_sm_event(struct sfp *sfp, unsigned int event) { - mutex_lock(&sfp->sm_mutex); - dev_dbg(sfp->dev, "SM: enter %s:%s:%s event %s\n", mod_state_to_str(sfp->sm_mod_state), dev_state_to_str(sfp->sm_dev_state), @@ -2470,7 +2680,12 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) mod_state_to_str(sfp->sm_mod_state), dev_state_to_str(sfp->sm_dev_state), sm_state_to_str(sfp->sm_state)); +} +static void sfp_sm_event(struct sfp *sfp, unsigned int event) +{ + mutex_lock(&sfp->sm_mutex); + __sfp_sm_event(sfp, event); mutex_unlock(&sfp->sm_mutex); } @@ -2494,6 +2709,20 @@ static void sfp_stop(struct sfp *sfp) sfp_sm_event(sfp, SFP_E_DEV_DOWN); } +static void sfp_set_signal_rate(struct sfp *sfp, unsigned int rate_kbd) +{ + unsigned int set; + + sfp->rate_kbd = rate_kbd; + + if (rate_kbd > sfp->rs_threshold_kbd) + set = sfp->rs_state_mask; + else + set = 0; + + sfp_mod_state(sfp, SFP_F_RS0 | SFP_F_RS1, set); +} + static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo) { /* locking... and check module is present */ @@ -2578,6 +2807,7 @@ static const struct sfp_socket_ops sfp_module_ops = { .detach = sfp_detach, .start = sfp_start, .stop = sfp_stop, + .set_signal_rate = sfp_set_signal_rate, .module_info = sfp_module_info, .module_eeprom = sfp_module_eeprom, .module_eeprom_by_page = sfp_module_eeprom_by_page, @@ -2596,6 +2826,7 @@ static void sfp_check_state(struct sfp *sfp) { unsigned int state, i, changed; + rtnl_lock(); mutex_lock(&sfp->st_mutex); state = sfp_get_state(sfp); changed = state ^ sfp->state; @@ -2609,23 +2840,24 @@ static void sfp_check_state(struct sfp *sfp) dev_dbg(sfp->dev, "%s %u -> %u\n", gpio_names[i], !!(sfp->state & BIT(i)), !!(state & BIT(i))); - state |= sfp->state & (SFP_F_TX_DISABLE | SFP_F_RATE_SELECT); + state |= sfp->state & SFP_F_OUTPUTS; sfp->state = state; + mutex_unlock(&sfp->st_mutex); - rtnl_lock(); + mutex_lock(&sfp->sm_mutex); if (changed & SFP_F_PRESENT) - sfp_sm_event(sfp, state & SFP_F_PRESENT ? - SFP_E_INSERT : SFP_E_REMOVE); + __sfp_sm_event(sfp, state & SFP_F_PRESENT ? + SFP_E_INSERT : SFP_E_REMOVE); if (changed & SFP_F_TX_FAULT) - sfp_sm_event(sfp, state & SFP_F_TX_FAULT ? - SFP_E_TX_FAULT : SFP_E_TX_CLEAR); + __sfp_sm_event(sfp, state & SFP_F_TX_FAULT ? + SFP_E_TX_FAULT : SFP_E_TX_CLEAR); if (changed & SFP_F_LOS) - sfp_sm_event(sfp, state & SFP_F_LOS ? - SFP_E_LOS_HIGH : SFP_E_LOS_LOW); + __sfp_sm_event(sfp, state & SFP_F_LOS ? + SFP_E_LOS_HIGH : SFP_E_LOS_LOW); + mutex_unlock(&sfp->sm_mutex); rtnl_unlock(); - mutex_unlock(&sfp->st_mutex); } static irqreturn_t sfp_irq(int irq, void *data) @@ -2643,6 +2875,8 @@ static void sfp_poll(struct work_struct *work) sfp_check_state(sfp); + // st_mutex doesn't need to be held here for state_soft_mask, + // it's unimportant if we race while reading this. if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) || sfp->need_poll) mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); @@ -2748,6 +2982,7 @@ static int sfp_probe(struct platform_device *pdev) } sfp->state_hw_mask = SFP_F_PRESENT; + sfp->state_hw_drive = SFP_F_TX_DISABLE; sfp->get_state = sfp_gpio_get_state; sfp->set_state = sfp_gpio_set_state; @@ -2773,9 +3008,9 @@ static int sfp_probe(struct platform_device *pdev) */ sfp->state = sfp_get_state(sfp) | SFP_F_TX_DISABLE; - if (sfp->gpio[GPIO_RATE_SELECT] && - gpiod_get_value_cansleep(sfp->gpio[GPIO_RATE_SELECT])) - sfp->state |= SFP_F_RATE_SELECT; + if (sfp->gpio[GPIO_RS0] && + gpiod_get_value_cansleep(sfp->gpio[GPIO_RS0])) + sfp->state |= SFP_F_RS0; sfp_set_state(sfp, sfp->state); sfp_module_tx_disable(sfp); if (sfp->state & SFP_F_PRESENT) { diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index 6cf1643214d3e..c7cb50d100999 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h @@ -19,6 +19,7 @@ struct sfp_socket_ops { void (*detach)(struct sfp *sfp); void (*start)(struct sfp *sfp); void (*stop)(struct sfp *sfp); + void (*set_signal_rate)(struct sfp *sfp, unsigned int rate_kbd); int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); int (*module_eeprom)(struct sfp *sfp, struct ethtool_eeprom *ee, u8 *data); diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig index ac4d162d9455d..2fbcae31fc023 100644 --- a/drivers/net/ppp/Kconfig +++ b/drivers/net/ppp/Kconfig @@ -129,6 +129,40 @@ config PPPOE which contains instruction on how to use this driver (under the heading "Kernel mode PPPoE"). +choice + prompt "Number of PPPoE hash bits" + default PPPOE_HASH_BITS_4 + depends on PPPOE + help + Select the number of bits used for hashing PPPoE interfaces. + + Larger sizes reduces the risk of hash collisions at the cost + of slightly increased memory usage. + + This hash table is on a per outer ethernet interface. + +config PPPOE_HASH_BITS_2 + bool "1 bit (2 buckets)" + +config PPPOE_HASH_BITS_2 + bool "2 bits (4 buckets)" + +config PPPOE_HASH_BITS_4 + bool "4 bits (16 buckets)" + +config PPPOE_HASH_BITS_8 + bool "8 bits (256 buckets)" + +endchoice + +config PPPOE_HASH_BITS + int + default 1 if PPPOE_HASH_BITS_1 + default 2 if PPPOE_HASH_BITS_2 + default 4 if PPPOE_HASH_BITS_4 + default 8 if PPPOE_HASH_BITS_8 + default 4 + config PPTP tristate "PPP over IPv4 (PPTP)" depends on PPP && NET_IPGRE_DEMUX diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index ce2cbb5903d7b..3b79c603b9363 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -80,7 +80,7 @@ #include -#define PPPOE_HASH_BITS 4 +#define PPPOE_HASH_BITS CONFIG_PPPOE_HASH_BITS #define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS) #define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d10606f257c43..555b0b1e9a789 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1629,6 +1629,7 @@ static int team_init(struct net_device *dev) team->dev = dev; team_set_no_mode(team); + team->notifier_ctx = false; team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats); if (!team->pcpu_stats) @@ -3022,7 +3023,11 @@ static int team_device_event(struct notifier_block *unused, team_del_slave(port->team->dev, dev); break; case NETDEV_FEAT_CHANGE: - team_compute_features(port->team); + if (!port->team->notifier_ctx) { + port->team->notifier_ctx = true; + team_compute_features(port->team); + port->team->notifier_ctx = false; + } break; case NETDEV_PRECHANGEMTU: /* Forbid to change mtu of underlaying device */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d4d0a41a905a7..d75456adc62ac 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1977,6 +1977,14 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, int queue_len; spin_lock_bh(&queue->lock); + + if (unlikely(tfile->detached)) { + spin_unlock_bh(&queue->lock); + rcu_read_unlock(); + err = -EBUSY; + goto free_skb; + } + __skb_queue_tail(queue, skb); queue_len = skb_queue_len(queue); spin_unlock(&queue->lock); @@ -2512,6 +2520,13 @@ static int tun_xdp_one(struct tun_struct *tun, if (tfile->napi_enabled) { queue = &tfile->sk.sk_write_queue; spin_lock(&queue->lock); + + if (unlikely(tfile->detached)) { + spin_unlock(&queue->lock); + kfree_skb(skb); + return -EBUSY; + } + __skb_queue_tail(queue, skb); spin_unlock(&queue->lock); ret = 1; diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 6ce8f4f0c70e8..db05622f1f703 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -181,9 +181,12 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx) else min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32); - max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); - if (max == 0) + if (le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) == 0) max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */ + else + max = clamp_t(u32, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize), + USB_CDC_NCM_NTB_MIN_OUT_SIZE, + CDC_NCM_NTB_MAX_SIZE_TX); /* some devices set dwNtbOutMaxSize too low for the above default */ min = min(min, max); @@ -1244,6 +1247,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) * further. */ if (skb_out == NULL) { + /* If even the smallest allocation fails, abort. */ + if (ctx->tx_curr_size == USB_CDC_NCM_NTB_MIN_OUT_SIZE) + goto alloc_failed; ctx->tx_low_mem_max_cnt = min(ctx->tx_low_mem_max_cnt + 1, (unsigned)CDC_NCM_LOW_MEM_MAX_CNT); ctx->tx_low_mem_val = ctx->tx_low_mem_max_cnt; @@ -1262,13 +1268,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC); /* No allocation possible so we will abort */ - if (skb_out == NULL) { - if (skb != NULL) { - dev_kfree_skb_any(skb); - dev->net->stats.tx_dropped++; - } - goto exit_no_skb; - } + if (!skb_out) + goto alloc_failed; ctx->tx_low_mem_val--; } if (ctx->is_ndp16) { @@ -1461,6 +1462,11 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) return skb_out; +alloc_failed: + if (skb) { + dev_kfree_skb_any(skb); + dev->net->stats.tx_dropped++; + } exit_no_skb: /* Start timer, if there is a remaining non-empty skb */ if (ctx->tx_curr_skb != NULL && n > 0) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 29eccc8ff41fd..5a7f7a76b920a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1977,6 +1977,38 @@ static int virtnet_poll(struct napi_struct *napi, int budget) return received; } +static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) +{ + virtnet_napi_tx_disable(&vi->sq[qp_index].napi); + napi_disable(&vi->rq[qp_index].napi); + xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); +} + +static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) +{ + struct net_device *dev = vi->dev; + int err; + + err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, + vi->rq[qp_index].napi.napi_id); + if (err < 0) + return err; + + err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) + goto err_xdp_reg_mem_model; + + virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); + virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); + + return 0; + +err_xdp_reg_mem_model: + xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); + return err; +} + static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -1990,22 +2022,20 @@ static int virtnet_open(struct net_device *dev) if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); - err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id); + err = virtnet_enable_queue_pair(vi, i); if (err < 0) - return err; - - err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err < 0) { - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); - return err; - } - - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); - virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); + goto err_enable_qp; } return 0; + +err_enable_qp: + disable_delayed_refill(vi); + cancel_delayed_work_sync(&vi->refill); + + for (i--; i >= 0; i--) + virtnet_disable_queue_pair(vi, i); + return err; } static int virtnet_poll_tx(struct napi_struct *napi, int budget) @@ -2414,11 +2444,8 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_tx_disable(&vi->sq[i].napi); - napi_disable(&vi->rq[i].napi); - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); - } + for (i = 0; i < vi->max_queue_pairs; i++) + virtnet_disable_queue_pair(vi, i); return 0; } diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h index 9fc7c088a539e..67b4bac048e58 100644 --- a/drivers/net/wireless/broadcom/b43/b43.h +++ b/drivers/net/wireless/broadcom/b43/b43.h @@ -651,7 +651,7 @@ struct b43_iv { union { __be16 d16; __be32 d32; - } data __packed; + } __packed data; } __packed; diff --git a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h index 6b0cec467938f..f49365d14619f 100644 --- a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h +++ b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h @@ -379,7 +379,7 @@ struct b43legacy_iv { union { __be16 d16; __be32 d32; - } data __packed; + } __packed data; } __packed; #define B43legacy_PHYMODE(phytype) (1 << (phytype)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index ff710b0b5071a..00679a990e3da 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1039,6 +1039,11 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, struct brcmf_sdio_dev *sdiodev; struct brcmf_bus *bus_if; + if (!id) { + dev_err(&func->dev, "Error no sdio_device_id passed for %x:%x\n", func->vendor, func->device); + return -ENODEV; + } + brcmf_dbg(SDIO, "Enter\n"); brcmf_dbg(SDIO, "Class=%x\n", func->class); brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 59f3e9c5e1390..80220685f5e45 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -2394,6 +2394,9 @@ static void brcmf_pcie_debugfs_create(struct device *dev) } #endif +/* Forward declaration for pci_match_id() call */ +static const struct pci_device_id brcmf_pcie_devid_table[]; + static int brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -2404,6 +2407,14 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct brcmf_core *core; struct brcmf_bus *bus; + if (!id) { + id = pci_match_id(brcmf_pcie_devid_table, pdev); + if (!id) { + pci_err(pdev, "Error could not find pci_device_id for %x:%x\n", pdev->vendor, pdev->device); + return -ENODEV; + } + } + brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device); ret = -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 246843aeb6964..2178675ae1a44 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1331,6 +1331,9 @@ brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo) brcmf_usb_detach(devinfo); } +/* Forward declaration for usb_match_id() call */ +static const struct usb_device_id brcmf_usb_devid_table[]; + static int brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -1342,6 +1345,14 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) u32 num_of_eps; u8 endpoint_num, ep; + if (!id) { + id = usb_match_id(intf, brcmf_usb_devid_table); + if (!id) { + dev_err(&intf->dev, "Error could not find matching usb_device_id\n"); + return -ENODEV; + } + } + brcmf_dbg(USB, "Enter 0x%04x:0x%04x\n", id->idVendor, id->idProduct); devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 5f4a51310add0..cb9181f055011 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -38,7 +38,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = { }, { .ident = "ASUS", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."), + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), }, }, {} diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index d9faaae01abd2..55219974b92bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1664,14 +1664,10 @@ static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id, } static void * -iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, - struct iwl_dump_ini_region_data *reg_data, +iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id, struct iwl_fw_ini_monitor_dump *data, const struct iwl_fw_mon_regs *addrs) { - struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; - u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); - if (!iwl_trans_grab_nic_access(fwrt->trans)) { IWL_ERR(fwrt, "Failed to get monitor header\n"); return NULL; @@ -1702,8 +1698,10 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump, &fwrt->trans->cfg->mon_dram_regs); } @@ -1713,8 +1711,10 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id); - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump, &fwrt->trans->cfg->mon_smem_regs); } @@ -1725,7 +1725,10 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, + /* no offset calculation later */ + IWL_FW_INI_ALLOCATION_ID_DBGC1, + mon_dump, &fwrt->trans->cfg->mon_dbgi_regs); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 3963a0d4ed042..652a603c4500e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -526,6 +526,11 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return PTR_ERR_OR_ZERO(sta); + } + if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) FTM_PUT_FLAG(PMF); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index b35c96cf7ad24..205c09bc98634 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1091,7 +1091,7 @@ static const struct dmi_system_id dmi_tas_approved_list[] = { }, { .ident = "LENOVO", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), }, }, { .ident = "DELL", @@ -1727,8 +1727,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_tas_init(mvm); iwl_mvm_leds_sync(mvm); - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) { + if (iwl_rfi_supported(mvm)) { if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE) iwl_rfi_send_config_cmd(mvm, NULL); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index eb828de40a3c6..3814915cb1a67 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -123,11 +123,13 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (mvmvif->link[i]->phy_ctxt) count++; - /* FIXME: IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM should be - * defined per HW - */ - if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM) - return -EINVAL; + if (vif->type == NL80211_IFTYPE_AP) { + if (count > mvm->fw->ucode_capa.num_beacons) + return -EOPNOTSUPP; + /* this should be per HW or such */ + } else if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM) { + return -EOPNOTSUPP; + } } /* Catch early if driver tries to activate or deactivate a link diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 0f01b62357c6f..17f788a5ff6ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -3607,7 +3607,8 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - unsigned int i; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; /* Beacon interval check - firmware will crash if the beacon * interval is less than 16. We can't avoid connecting at all, @@ -3616,14 +3617,11 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm, * wpa_s will blocklist the AP... */ - for_each_set_bit(i, (unsigned long *)&sta->valid_links, - IEEE80211_MLD_MAX_NUM_LINKS) { - struct ieee80211_link_sta *link_sta = - link_sta_dereference_protected(sta, i); + for_each_sta_active_link(vif, sta, link_sta, link_id) { struct ieee80211_bss_conf *link_conf = - link_conf_dereference_protected(vif, i); + link_conf_dereference_protected(vif, link_id); - if (!link_conf || !link_sta) + if (!link_conf) continue; if (link_conf->beacon_int < IWL_MVM_MIN_BEACON_INTERVAL_TU) { @@ -3645,24 +3643,23 @@ static void iwl_mvm_vif_set_he_support(struct ieee80211_hw *hw, bool is_sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - unsigned int i; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; - for_each_set_bit(i, (unsigned long *)&sta->valid_links, - IEEE80211_MLD_MAX_NUM_LINKS) { - struct ieee80211_link_sta *link_sta = - link_sta_dereference_protected(sta, i); + for_each_sta_active_link(vif, sta, link_sta, link_id) { struct ieee80211_bss_conf *link_conf = - link_conf_dereference_protected(vif, i); + link_conf_dereference_protected(vif, link_id); - if (!link_conf || !link_sta || !mvmvif->link[i]) + if (!link_conf || !mvmvif->link[link_id]) continue; link_conf->he_support = link_sta->he_cap.has_he; if (is_sta) { - mvmvif->link[i]->he_ru_2mhz_block = false; + mvmvif->link[link_id]->he_ru_2mhz_block = false; if (link_sta->he_cap.has_he) - iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif, i, + iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif, + link_id, link_conf); } } @@ -3675,6 +3672,7 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, struct iwl_mvm_sta_state_ops *callbacks) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_link_sta *link_sta; unsigned int i; int ret; @@ -3699,15 +3697,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, NL80211_TDLS_SETUP); } - for (i = 0; i < ARRAY_SIZE(sta->link); i++) { - struct ieee80211_link_sta *link_sta; - - link_sta = link_sta_dereference_protected(sta, i); - if (!link_sta) - continue; - + for_each_sta_active_link(vif, sta, link_sta, i) link_sta->agg.max_rc_amsdu_len = 1; - } + ieee80211_sta_recalc_aggregates(sta); if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) @@ -3725,7 +3717,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - unsigned int i; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; lockdep_assert_held(&mvm->mutex); @@ -3751,14 +3744,13 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw, if (!mvm->mld_api_is_used) goto out; - for_each_set_bit(i, (unsigned long *)&sta->valid_links, - IEEE80211_MLD_MAX_NUM_LINKS) { + for_each_sta_active_link(vif, sta, link_sta, link_id) { struct ieee80211_bss_conf *link_conf = - link_conf_dereference_protected(vif, i); + link_conf_dereference_protected(vif, link_id); if (WARN_ON(!link_conf)) return -EINVAL; - if (!mvmvif->link[i]) + if (!mvmvif->link[link_id]) continue; iwl_mvm_link_changed(mvm, vif, link_conf, @@ -3889,6 +3881,9 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, * from the AP now. */ iwl_mvm_reset_cca_40mhz_workaround(mvm, vif); + + /* Also free dup data just in case any assertions below fail */ + kfree(mvm_sta->dup_data); } mutex_lock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index fbc2d5ed10069..7fb66c5709596 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -906,11 +906,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, n_active++; } - if (vif->type == NL80211_IFTYPE_AP && - n_active > mvm->fw->ucode_capa.num_beacons) - return -EOPNOTSUPP; - else if (n_active > 1) + if (vif->type == NL80211_IFTYPE_AP) { + if (n_active > mvm->fw->ucode_capa.num_beacons) + return -EOPNOTSUPP; + } else if (n_active > 1) { return -EOPNOTSUPP; + } } for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 0bfdf44627550..85a4ce8449ade 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -667,15 +667,15 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ret = iwl_mvm_mld_alloc_sta_links(mvm, vif, sta); if (ret) return ret; - } - spin_lock_init(&mvm_sta->lock); + spin_lock_init(&mvm_sta->lock); - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta); - else ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA, STATION_TYPE_PEER); + } else { + ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta); + } + if (ret) goto err; @@ -728,7 +728,7 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_link_sta *link_sta; unsigned int link_id; - int ret = 0; + int ret = -EINVAL; lockdep_assert_held(&mvm->mutex); @@ -791,8 +791,6 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, lockdep_assert_held(&mvm->mutex); - kfree(mvm_sta->dup_data); - /* flush its queues here since we are freeing mvm_sta */ for_each_sta_active_link(vif, sta, link_sta, link_id) { struct iwl_mvm_link_sta *mvm_link_sta = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 6e7470d3a826d..9e5008e0e47f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -2347,6 +2347,7 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm, u32 old_sta_mask, u32 new_sta_mask); +bool iwl_rfi_supported(struct iwl_mvm *mvm); int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table); struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 6d18a1fd649b9..fdf60afb0f3f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -445,6 +445,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp->n_channels); + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp, channels, n_channels)) { + resp_cp = ERR_PTR(-EINVAL); + goto exit; + } resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32); resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); @@ -456,6 +461,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp_v3->n_channels); + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp_v3, channels, n_channels)) { + resp_cp = ERR_PTR(-EINVAL); + goto exit; + } resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32); resp_cp = kzalloc(resp_len, GFP_KERNEL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c index bb77bc9aa8218..2ecd32bed752f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2020 - 2021 Intel Corporation + * Copyright (C) 2020 - 2022 Intel Corporation */ #include "mvm.h" @@ -70,6 +70,16 @@ static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = { PHY_BAND_6, PHY_BAND_6,}}, }; +bool iwl_rfi_supported(struct iwl_mvm *mvm) +{ + /* The feature depends on a platform bugfix, so for now + * it's always disabled. + * When the platform support detection is implemented we should + * check FW TLV and platform support instead. + */ + return false; +} + int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table) { int ret; @@ -81,7 +91,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t .len[0] = sizeof(cmd), }; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) + if (!iwl_rfi_supported(mvm)) return -EOPNOTSUPP; lockdep_assert_held(&mvm->mutex); @@ -113,7 +123,7 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm) .flags = CMD_WANT_SKB, }; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) + if (!iwl_rfi_supported(mvm)) return ERR_PTR(-EOPNOTSUPP); mutex_lock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index a4c1e3bf4ff1d..23266d0c9ce48 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2691,6 +2691,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, return; lq_sta = mvm_sta; + + spin_lock(&lq_sta->pers.lock); iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags, info->band, &info->control.rates[0]); info->control.rates[0].count = 1; @@ -2705,6 +2707,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band, &txrc->reported_rate); } + spin_unlock(&lq_sta->pers.lock); } static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index e1d02c260e69d..6226e4e54a51d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -691,6 +691,11 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t) rcu_read_lock(); sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + goto out; + } + mvmsta = iwl_mvm_sta_from_mac80211(sta); /* SN is set to the last expired frame + 1 */ @@ -712,6 +717,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t) entries[index].e.reorder_time + 1 + RX_REORDER_BUF_TIMEOUT_MQ); } + +out: spin_unlock(&buf->lock); } @@ -2512,7 +2519,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); /* Unblock BCAST / MCAST station */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); - cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); + cancel_delayed_work(&mvm->cs_tx_unblock_dwork); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 5469d634e2899..05a54a69c1357 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -281,7 +281,7 @@ static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) * A-MDPU and hence the timer continues to run. Then, the * timer expires and sta is NULL. */ - if (!sta) + if (IS_ERR_OR_NULL(sta)) goto unlock; mvm_sta = iwl_mvm_sta_from_mac80211(sta); @@ -2089,9 +2089,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_has_new_rx_api(mvm)) - kfree(mvm_sta->dup_data); - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); if (ret) return ret; @@ -3785,6 +3782,9 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, u8 sta_id = mvmvif->deflink.ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + return NULL; + return sta->addr; } @@ -3822,6 +3822,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { addr = iwl_mvm_get_mac_addr(mvm, vif, sta); + if (!addr) { + IWL_ERR(mvm, "Failed to find mac address\n"); + return -EINVAL; + } + /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 10d7178f10718..00719e1304386 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1875,7 +1875,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (WARN_ON_ONCE(!sta || !sta->wme)) { + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) { rcu_read_unlock(); return; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h index a5ec0f6313850..fabf637bdf7f9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h @@ -173,7 +173,7 @@ enum { #define MT_TXS5_MPDU_TX_CNT GENMASK(31, 23) #define MT_TXS6_MPDU_FAIL_CNT GENMASK(31, 23) - +#define MT_TXS7_MPDU_RETRY_BYTE GENMASK(22, 0) #define MT_TXS7_MPDU_RETRY_CNT GENMASK(31, 23) /* RXD DW0 */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c index ee0fbfcd07d64..d39a3cc5e381f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c @@ -608,7 +608,8 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid, /* PPDU based reporting */ if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) { stats->tx_bytes += - le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE); + le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE) - + le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_BYTE); stats->tx_packets += le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT); stats->tx_failed += diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index 130eb7b4fd914..39a4a73ef8e6a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -1088,7 +1088,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, else if (beacon && mvif->beacon_rates_idx) idx = mvif->beacon_rates_idx; - txwi[6] |= FIELD_PREP(MT_TXD6_TX_RATE, idx); + txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx)); txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); } } diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 2c756640f59d3..376b4b72eb801 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1808,6 +1808,7 @@ struct rtl8xxxu_priv { u32 rege9c; u32 regeb4; u32 regebc; + u32 regrcr; int next_mbox; int nr_out_eps; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 37df47c27a69f..1eb0d56426239 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -4213,6 +4213,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL | RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC; rtl8xxxu_write32(priv, REG_RCR, val32); + priv->regrcr = val32; if (fops->init_reg_rxfltmap) { /* Accept all data frames */ @@ -6685,7 +6686,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw, unsigned int *total_flags, u64 multicast) { struct rtl8xxxu_priv *priv = hw->priv; - u32 rcr = rtl8xxxu_read32(priv, REG_RCR); + u32 rcr = priv->regrcr; dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n", __func__, changed_flags, *total_flags); @@ -6731,6 +6732,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw, */ rtl8xxxu_write32(priv, REG_RCR, rcr); + priv->regrcr = rcr; *total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL | diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 02cd19ee6e4c4..e82e40cac60a1 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -920,7 +920,7 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw, struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; if (changed & IEEE80211_RC_BW_CHANGED) - rtw_update_sta_info(rtwdev, si, true); + ieee80211_queue_work(rtwdev->hw, &si->rc_work); } const struct ieee80211_ops rtw_ops = { diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 5bf6b45815578..d30a191c9291d 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -319,6 +319,17 @@ static u8 rtw_acquire_macid(struct rtw_dev *rtwdev) return mac_id; } +static void rtw_sta_rc_work(struct work_struct *work) +{ + struct rtw_sta_info *si = container_of(work, struct rtw_sta_info, + rc_work); + struct rtw_dev *rtwdev = si->rtwdev; + + mutex_lock(&rtwdev->mutex); + rtw_update_sta_info(rtwdev, si, true); + mutex_unlock(&rtwdev->mutex); +} + int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct ieee80211_vif *vif) { @@ -329,12 +340,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, if (si->mac_id >= RTW_MAX_MAC_ID_NUM) return -ENOSPC; + si->rtwdev = rtwdev; si->sta = sta; si->vif = vif; si->init_ra_lv = 1; ewma_rssi_init(&si->avg_rssi); for (i = 0; i < ARRAY_SIZE(sta->txq); i++) rtw_txq_init(rtwdev, sta->txq[i]); + INIT_WORK(&si->rc_work, rtw_sta_rc_work); rtw_update_sta_info(rtwdev, si, true); rtw_fw_media_status_report(rtwdev, si->mac_id, true); @@ -353,6 +366,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; int i; + cancel_work_sync(&si->rc_work); + rtw_release_macid(rtwdev, si->mac_id); if (fw_exist) rtw_fw_media_status_report(rtwdev, si->mac_id, false); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index a563285e90ede..9e841f6991a9a 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -743,6 +743,7 @@ struct rtw_txq { DECLARE_EWMA(rssi, 10, 16); struct rtw_sta_info { + struct rtw_dev *rtwdev; struct ieee80211_sta *sta; struct ieee80211_vif *vif; @@ -767,6 +768,8 @@ struct rtw_sta_info { bool use_cfg_mask; struct cfg80211_bitrate_mask *mask; + + struct work_struct rc_work; }; enum rtw_bfee_role { diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c index af0459a79899f..06fce7c3addaa 100644 --- a/drivers/net/wireless/realtek/rtw88/sdio.c +++ b/drivers/net/wireless/realtek/rtw88/sdio.c @@ -87,11 +87,6 @@ static void rtw_sdio_writew(struct rtw_dev *rtwdev, u16 val, u32 addr, u8 buf[2]; int i; - if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2)) { - sdio_writew(rtwsdio->sdio_func, val, addr, err_ret); - return; - } - *(__le16 *)buf = cpu_to_le16(val); for (i = 0; i < 2; i++) { @@ -125,9 +120,6 @@ static u16 rtw_sdio_readw(struct rtw_dev *rtwdev, u32 addr, int *err_ret) u8 buf[2]; int i; - if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2)) - return sdio_readw(rtwsdio->sdio_func, addr, err_ret); - for (i = 0; i < 2; i++) { buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret); if (*err_ret) diff --git a/drivers/net/wireless/realtek/rtw88/usb.h b/drivers/net/wireless/realtek/rtw88/usb.h index 30647f0dd61c6..ad1d7955c6a51 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.h +++ b/drivers/net/wireless/realtek/rtw88/usb.h @@ -78,7 +78,7 @@ struct rtw_usb { u8 pipe_interrupt; u8 pipe_in; u8 out_ep[RTW_USB_EP_MAX]; - u8 qsel_to_ep[TX_DESC_QSEL_MAX]; + int qsel_to_ep[TX_DESC_QSEL_MAX]; u8 usb_txagg_num; struct workqueue_struct *txwq, *rxwq; diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index acba53cf7cc31..0833a9eb88f37 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1467,6 +1467,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = { .wde_size4 = {RTW89_WDE_PG_64, 0, 4096,}, /* PCIE 64 */ .wde_size6 = {RTW89_WDE_PG_64, 512, 0,}, + /* 8852B PCIE SCC */ + .wde_size7 = {RTW89_WDE_PG_64, 510, 2,}, /* DLFW */ .wde_size9 = {RTW89_WDE_PG_64, 0, 1024,}, /* 8852C DLFW */ @@ -1491,6 +1493,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = { .wde_qt4 = {0, 0, 0, 0,}, /* PCIE 64 */ .wde_qt6 = {448, 48, 0, 16,}, + /* 8852B PCIE SCC */ + .wde_qt7 = {446, 48, 0, 16,}, /* 8852C DLFW */ .wde_qt17 = {0, 0, 0, 0,}, /* 8852C PCIE SCC */ diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index e340720bbb889..0f380b6fb97ba 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -794,6 +794,7 @@ struct rtw89_mac_size_set { const struct rtw89_dle_size wde_size0; const struct rtw89_dle_size wde_size4; const struct rtw89_dle_size wde_size6; + const struct rtw89_dle_size wde_size7; const struct rtw89_dle_size wde_size9; const struct rtw89_dle_size wde_size18; const struct rtw89_dle_size wde_size19; @@ -806,6 +807,7 @@ struct rtw89_mac_size_set { const struct rtw89_wde_quota wde_qt0; const struct rtw89_wde_quota wde_qt4; const struct rtw89_wde_quota wde_qt6; + const struct rtw89_wde_quota wde_qt7; const struct rtw89_wde_quota wde_qt17; const struct rtw89_wde_quota wde_qt18; const struct rtw89_ple_quota ple_qt4; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index b1a6b985842be..9ed4ade08813e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -18,25 +18,25 @@ RTW8852B_FW_BASENAME "-" __stringify(RTW8852B_FW_FORMAT_MAX) ".bin" static const struct rtw89_hfc_ch_cfg rtw8852b_hfc_chcfg_pcie[] = { - {5, 343, grp_0}, /* ACH 0 */ - {5, 343, grp_0}, /* ACH 1 */ - {5, 343, grp_0}, /* ACH 2 */ - {5, 343, grp_0}, /* ACH 3 */ + {5, 341, grp_0}, /* ACH 0 */ + {5, 341, grp_0}, /* ACH 1 */ + {4, 342, grp_0}, /* ACH 2 */ + {4, 342, grp_0}, /* ACH 3 */ {0, 0, grp_0}, /* ACH 4 */ {0, 0, grp_0}, /* ACH 5 */ {0, 0, grp_0}, /* ACH 6 */ {0, 0, grp_0}, /* ACH 7 */ - {4, 344, grp_0}, /* B0MGQ */ - {4, 344, grp_0}, /* B0HIQ */ + {4, 342, grp_0}, /* B0MGQ */ + {4, 342, grp_0}, /* B0HIQ */ {0, 0, grp_0}, /* B1MGQ */ {0, 0, grp_0}, /* B1HIQ */ {40, 0, 0} /* FWCMDQ */ }; static const struct rtw89_hfc_pub_cfg rtw8852b_hfc_pubcfg_pcie = { - 448, /* Group 0 */ + 446, /* Group 0 */ 0, /* Group 1 */ - 448, /* Public Max */ + 446, /* Public Max */ 0 /* WP threshold */ }; @@ -49,13 +49,13 @@ static const struct rtw89_hfc_param_ini rtw8852b_hfc_param_ini_pcie[] = { }; static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = { - [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6, - &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6, - &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18, + [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size7, + &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7, + &rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18, &rtw89_mac_size.ple_qt58}, - [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size6, - &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6, - &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18, + [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size7, + &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7, + &rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18, &rtw89_mac_size.ple_qt_52b_wow}, [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9, &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4, diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index 9a8faaf4c6b64..89c7a1420381d 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -5964,10 +5964,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) ret = -ENOMEM; goto out_free; } + param.pmsr_capa = pmsr_capa; + ret = parse_pmsr_capa(info->attrs[HWSIM_ATTR_PMSR_SUPPORT], pmsr_capa, info); if (ret) goto out_free; - param.pmsr_capa = pmsr_capa; } ret = mac80211_hwsim_new_radio(info, ¶m); diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c index c066b0040a3fe..829515a601b37 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c @@ -565,24 +565,32 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) struct ipc_mux_config mux_cfg; struct iosm_imem *ipc_imem; u8 ctrl_chl_idx = 0; + int ret; ipc_imem = container_of(instance, struct iosm_imem, run_state_worker); if (ipc_imem->phase != IPC_P_RUN) { dev_err(ipc_imem->dev, "Modem link down. Exit run state worker."); - return; + goto err_out; } if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag)) ipc_devlink_deinit(ipc_imem->ipc_devlink); - if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg)) - ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem); + ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg); + if (ret < 0) + goto err_out; + + ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem); + if (!ipc_imem->mux) + goto err_out; + + ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol); + if (ret < 0) + goto err_ipc_mux_deinit; - ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol); - if (ipc_imem->mux) - ipc_imem->mux->wwan = ipc_imem->wwan; + ipc_imem->mux->wwan = ipc_imem->wwan; while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) { if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) { @@ -622,6 +630,13 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) /* Complete all memory stores after setting bit */ smp_mb__after_atomic(); + + return; + +err_ipc_mux_deinit: + ipc_mux_deinit(ipc_imem->mux); +err_out: + ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN); } static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq) diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 66b90cc4c3460..109cf89304888 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -77,8 +77,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, } /* Initialize wwan channel */ -void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, - enum ipc_mux_protocol mux_type) +int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, + enum ipc_mux_protocol mux_type) { struct ipc_chnl_cfg chnl_cfg = { 0 }; @@ -87,7 +87,7 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, /* If modem version is invalid (0xffffffff), do not initialize WWAN. */ if (ipc_imem->cp_version == -1) { dev_err(ipc_imem->dev, "invalid CP version"); - return; + return -EIO; } ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels); @@ -104,9 +104,13 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, /* WWAN registration. */ ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev); - if (!ipc_imem->wwan) + if (!ipc_imem->wwan) { dev_err(ipc_imem->dev, "failed to register the ipc_wwan interfaces"); + return -ENOMEM; + } + + return 0; } /* Map SKB to DMA for transfer */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index f8afb217d9e2f..026c5bd0f9992 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -91,9 +91,11 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id, * MUX. * @ipc_imem: Pointer to iosm_imem struct. * @mux_type: Type of mux protocol. + * + * Return: 0 on success and failure value on error */ -void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, - enum ipc_mux_protocol mux_type); +int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, + enum ipc_mux_protocol mux_type); /** * ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 226fc1703e90f..91256e005b846 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -45,6 +45,7 @@ #define T7XX_PCI_IREG_BASE 0 #define T7XX_PCI_EREG_BASE 2 +#define T7XX_INIT_TIMEOUT 20 #define PM_SLEEP_DIS_TIMEOUT_MS 20 #define PM_ACK_TIMEOUT_MS 1500 #define PM_AUTOSUSPEND_MS 20000 @@ -96,6 +97,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) spin_lock_init(&t7xx_dev->md_pm_lock); init_completion(&t7xx_dev->sleep_lock_acquire); init_completion(&t7xx_dev->pm_sr_ack); + init_completion(&t7xx_dev->init_done); atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); device_init_wakeup(&pdev->dev, true); @@ -124,6 +126,7 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev); pm_runtime_allow(&t7xx_dev->pdev->dev); pm_runtime_put_noidle(&t7xx_dev->pdev->dev); + complete_all(&t7xx_dev->init_done); } static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) @@ -529,6 +532,20 @@ static void t7xx_pci_shutdown(struct pci_dev *pdev) __t7xx_pci_pm_suspend(pdev); } +static int t7xx_pci_pm_prepare(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct t7xx_pci_dev *t7xx_dev; + + t7xx_dev = pci_get_drvdata(pdev); + if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) { + dev_warn(dev, "Not ready for system sleep.\n"); + return -ETIMEDOUT; + } + + return 0; +} + static int t7xx_pci_pm_suspend(struct device *dev) { return __t7xx_pci_pm_suspend(to_pci_dev(dev)); @@ -555,6 +572,7 @@ static int t7xx_pci_pm_runtime_resume(struct device *dev) } static const struct dev_pm_ops t7xx_pci_pm_ops = { + .prepare = t7xx_pci_pm_prepare, .suspend = t7xx_pci_pm_suspend, .resume = t7xx_pci_pm_resume, .resume_noirq = t7xx_pci_pm_resume_noirq, diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h index 112efa534eace..f08f1ab744691 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.h +++ b/drivers/net/wwan/t7xx/t7xx_pci.h @@ -69,6 +69,7 @@ struct t7xx_pci_dev { struct t7xx_modem *md; struct t7xx_ccmni_ctrl *ccmni_ctlb; bool rgu_pci_irq_en; + struct completion init_done; /* Low Power Items */ struct list_head md_pm_entities; diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index 1e0f2297f9c66..c1896a1d978cd 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -359,7 +359,7 @@ static struct i2c_driver fdp_nci_i2c_driver = { .name = FDP_I2C_DRIVER_NAME, .acpi_match_table = fdp_nci_i2c_acpi_match, }, - .probe_new = fdp_nci_i2c_probe, + .probe = fdp_nci_i2c_probe, .remove = fdp_nci_i2c_remove, }; module_i2c_driver(fdp_nci_i2c_driver); diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c index e72b358a2a125..642df4e0ce24a 100644 --- a/drivers/nfc/microread/i2c.c +++ b/drivers/nfc/microread/i2c.c @@ -286,7 +286,7 @@ static struct i2c_driver microread_i2c_driver = { .driver = { .name = MICROREAD_I2C_DRIVER_NAME, }, - .probe_new = microread_i2c_probe, + .probe = microread_i2c_probe, .remove = microread_i2c_remove, .id_table = microread_i2c_id, }; diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c index 164e2ab859fd9..74553134c1b18 100644 --- a/drivers/nfc/nfcmrvl/i2c.c +++ b/drivers/nfc/nfcmrvl/i2c.c @@ -258,7 +258,7 @@ static const struct i2c_device_id nfcmrvl_i2c_id_table[] = { MODULE_DEVICE_TABLE(i2c, nfcmrvl_i2c_id_table); static struct i2c_driver nfcmrvl_i2c_driver = { - .probe_new = nfcmrvl_i2c_probe, + .probe = nfcmrvl_i2c_probe, .id_table = nfcmrvl_i2c_id_table, .remove = nfcmrvl_i2c_remove, .driver = { diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index d4c299be79499..baddaf242d185 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -348,7 +348,7 @@ static struct i2c_driver nxp_nci_i2c_driver = { .acpi_match_table = ACPI_PTR(acpi_id), .of_match_table = of_nxp_nci_i2c_match, }, - .probe_new = nxp_nci_i2c_probe, + .probe = nxp_nci_i2c_probe, .id_table = nxp_nci_i2c_id_table, .remove = nxp_nci_i2c_remove, }; diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c index 1503a98f0405a..438ab9553f7a1 100644 --- a/drivers/nfc/pn533/i2c.c +++ b/drivers/nfc/pn533/i2c.c @@ -259,7 +259,7 @@ static struct i2c_driver pn533_i2c_driver = { .name = PN533_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_pn533_i2c_match), }, - .probe_new = pn533_i2c_probe, + .probe = pn533_i2c_probe, .id_table = pn533_i2c_id_table, .remove = pn533_i2c_remove, }; diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index 8b0d910bee06c..3f6d74832bac3 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -953,7 +953,7 @@ static struct i2c_driver pn544_hci_i2c_driver = { .of_match_table = of_match_ptr(of_pn544_i2c_match), .acpi_match_table = ACPI_PTR(pn544_hci_i2c_acpi_match), }, - .probe_new = pn544_hci_i2c_probe, + .probe = pn544_hci_i2c_probe, .id_table = pn544_hci_i2c_id_table, .remove = pn544_hci_i2c_remove, }; diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c index 2517ae71f9a4a..720d4a72493c8 100644 --- a/drivers/nfc/s3fwrn5/i2c.c +++ b/drivers/nfc/s3fwrn5/i2c.c @@ -261,7 +261,7 @@ static struct i2c_driver s3fwrn5_i2c_driver = { .name = S3FWRN5_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_s3fwrn5_i2c_match), }, - .probe_new = s3fwrn5_i2c_probe, + .probe = s3fwrn5_i2c_probe, .remove = s3fwrn5_i2c_remove, .id_table = s3fwrn5_i2c_id_table, }; diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c index 6b5eed8a1fbe1..d20a337e90b45 100644 --- a/drivers/nfc/st-nci/i2c.c +++ b/drivers/nfc/st-nci/i2c.c @@ -283,7 +283,7 @@ static struct i2c_driver st_nci_i2c_driver = { .of_match_table = of_match_ptr(of_st_nci_i2c_match), .acpi_match_table = ACPI_PTR(st_nci_i2c_acpi_match), }, - .probe_new = st_nci_i2c_probe, + .probe = st_nci_i2c_probe, .id_table = st_nci_i2c_id_table, .remove = st_nci_i2c_remove, }; diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 55f7a2391bb1a..064a63db288bd 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -597,7 +597,7 @@ static struct i2c_driver st21nfca_hci_i2c_driver = { .of_match_table = of_match_ptr(of_st21nfca_i2c_match), .acpi_match_table = ACPI_PTR(st21nfca_hci_i2c_acpi_match), }, - .probe_new = st21nfca_hci_i2c_probe, + .probe = st21nfca_hci_i2c_probe, .id_table = st21nfca_hci_i2c_id_table, .remove = st21nfca_hci_i2c_remove, }; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ccb6eb1282f82..1f0cbb77b2493 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3585,6 +3585,9 @@ static ssize_t nvme_sysfs_delete(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags)) + return -EBUSY; + if (device_remove_file_self(dev, attr)) nvme_delete_ctrl_sync(ctrl); return count; @@ -5045,7 +5048,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) * that were missed. We identify persistent discovery controllers by * checking that they started once before, hence are reconnecting back. */ - if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && + if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && nvme_discovery_ctrl(ctrl)) nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); @@ -5056,6 +5059,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) } nvme_change_uevent(ctrl, "NVME_EVENT=connected"); + set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags); } EXPORT_SYMBOL_GPL(nvme_start_ctrl); diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 9e6e56c20ec99..316f3e4ca7cc6 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -163,7 +163,9 @@ static umode_t nvme_hwmon_is_visible(const void *_data, case hwmon_temp_max: case hwmon_temp_min: if ((!channel && data->ctrl->wctemp) || - (channel && data->log->temp_sensor[channel - 1])) { + (channel && data->log->temp_sensor[channel - 1] && + !(data->ctrl->quirks & + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) { if (data->ctrl->quirks & NVME_QUIRK_NO_TEMP_THRESH_CHANGE) return 0444; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 9171452e2f6d4..2bc159a318ff0 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -884,7 +884,6 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) { if (!head->disk) return; - blk_mark_disk_dead(head->disk); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index bf46f122e9e1e..a2d4f59e0535a 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -149,6 +149,11 @@ enum nvme_quirks { * Reports garbage in the namespace identifiers (eui64, nguid, uuid). */ NVME_QUIRK_BOGUS_NID = (1 << 18), + + /* + * No temperature thresholds for channels other than 0 (Composite). + */ + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19), }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 7f25c0fe3a0bf..32244582fdb08 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2956,7 +2956,7 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, * over a single page. */ dev->ctrl.max_hw_sectors = min_t(u32, - NVME_MAX_KB_SZ << 1, dma_max_mapping_size(&pdev->dev) >> 9); + NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); dev->ctrl.max_segments = NVME_MAX_SEGS; /* @@ -3402,6 +3402,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */ + .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, }, { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ @@ -3441,6 +3443,10 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index c2c9b0d3244cb..be967d797c28e 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -1348,9 +1348,8 @@ static int mlxbf_pmc_map_counters(struct device *dev) for (i = 0; i < pmc->total_blocks; ++i) { if (strstr(pmc->block_name[i], "tile")) { - ret = sscanf(pmc->block_name[i], "tile%d", &tile_num); - if (ret < 0) - return ret; + if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1) + return -EINVAL; if (tile_num >= pmc->tile_count) continue; diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index d5bb775dadcf2..ee5f124f78b6d 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -245,24 +245,29 @@ static const struct pci_device_id pmf_pci_ids[] = { { } }; -int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) +static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev) { u64 phys_addr; u32 hi, low; - INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics); + phys_addr = virt_to_phys(dev->buf); + hi = phys_addr >> 32; + low = phys_addr & GENMASK(31, 0); + + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL); + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL); +} +int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) +{ /* Get Metrics Table Address */ dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL); if (!dev->buf) return -ENOMEM; - phys_addr = virt_to_phys(dev->buf); - hi = phys_addr >> 32; - low = phys_addr & GENMASK(31, 0); + INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics); - amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL); - amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL); + amd_pmf_set_dram_addr(dev); /* * Start collecting the metrics data after a small delay @@ -273,6 +278,18 @@ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) return 0; } +static int amd_pmf_resume_handler(struct device *dev) +{ + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + + if (pdev->buf) + amd_pmf_set_dram_addr(pdev); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler); + static void amd_pmf_init_features(struct amd_pmf_dev *dev) { int ret; @@ -413,6 +430,7 @@ static struct platform_driver amd_pmf_driver = { .name = "amd-pmf", .acpi_match_table = amd_pmf_acpi_ids, .dev_groups = amd_pmf_driver_groups, + .pm = pm_sleep_ptr(&amd_pmf_pm), }, .probe = amd_pmf_probe, .remove_new = amd_pmf_remove, diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index e2c9a68d12df9..fdf7da06af306 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -555,6 +555,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */ { KE_IGNORE, 0x79, }, /* Charger type dectection notification */ { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */ + { KE_IGNORE, 0x7B, }, /* Charger connect/disconnect notification */ { KE_KEY, 0x7c, { KEY_MICMUTE } }, { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */ { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */ @@ -584,6 +585,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0xAE, { KEY_FN_F5 } }, /* Fn+F5 fan mode on 2020+ */ { KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */ { KE_KEY, 0xB5, { KEY_CALC } }, + { KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */ { KE_KEY, 0xC4, { KEY_KBDILLUMUP } }, { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } }, { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */ diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 61dffb4c8a1dc..e6ae8265f3a37 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -208,7 +208,7 @@ static int scan_chunks_sanity_check(struct device *dev) continue; reinit_completion(&ifs_done); local_work.dev = dev; - INIT_WORK(&local_work.w, copy_hashes_authenticate_chunks); + INIT_WORK_ONSTACK(&local_work.w, copy_hashes_authenticate_chunks); schedule_work_on(cpu, &local_work.w); wait_for_completion(&ifs_done); if (ifsd->loading_error) { diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index e0572a29212e8..02fe360a59c7c 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -304,14 +304,13 @@ struct isst_if_pkg_info { static struct isst_if_cpu_info *isst_cpu_info; static struct isst_if_pkg_info *isst_pkg_info; -#define ISST_MAX_PCI_DOMAINS 8 - static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn) { struct pci_dev *matched_pci_dev = NULL; struct pci_dev *pci_dev = NULL; + struct pci_dev *_pci_dev = NULL; int no_matches = 0, pkg_id; - int i, bus_number; + int bus_number; if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 || cpu >= nr_cpu_ids || cpu >= num_possible_cpus()) @@ -323,12 +322,11 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn if (bus_number < 0) return NULL; - for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) { - struct pci_dev *_pci_dev; + for_each_pci_dev(_pci_dev) { int node; - _pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn)); - if (!_pci_dev) + if (_pci_dev->bus->number != bus_number || + _pci_dev->devfn != PCI_DEVFN(dev, fn)) continue; ++no_matches; diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c index 307ee6f71042e..6f83e99d2eb72 100644 --- a/drivers/power/supply/ab8500_btemp.c +++ b/drivers/power/supply/ab8500_btemp.c @@ -624,10 +624,8 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data) */ static void ab8500_btemp_external_power_changed(struct power_supply *psy) { - struct ab8500_btemp *di = power_supply_get_drvdata(psy); - - class_for_each_device(power_supply_class, NULL, - di->btemp_psy, ab8500_btemp_get_ext_psy_data); + class_for_each_device(power_supply_class, NULL, psy, + ab8500_btemp_get_ext_psy_data); } /* ab8500 btemp driver interrupts and their respective isr */ diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 41a7bff9ac376..53560fbb6dcd3 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -2407,10 +2407,8 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di) */ static void ab8500_fg_external_power_changed(struct power_supply *psy) { - struct ab8500_fg *di = power_supply_get_drvdata(psy); - - class_for_each_device(power_supply_class, NULL, - di->fg_psy, ab8500_fg_get_ext_psy_data); + class_for_each_device(power_supply_class, NULL, psy, + ab8500_fg_get_ext_psy_data); } /** diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index 05f4131784629..3be6f3b10ea42 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -507,7 +507,7 @@ static void fuel_gauge_external_power_changed(struct power_supply *psy) mutex_lock(&info->lock); info->valid = 0; /* Force updating of the cached registers */ mutex_unlock(&info->lock); - power_supply_changed(info->bat); + power_supply_changed(psy); } static struct power_supply_desc fuel_gauge_desc = { diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index de67b985f0a91..dc33f00fcc068 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -1262,6 +1262,7 @@ static void bq24190_input_current_limit_work(struct work_struct *work) bq24190_charger_set_property(bdi->charger, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val); + power_supply_changed(bdi->charger); } /* Sync the input-current-limit with our parent supply (if we have one) */ diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 22cde35eb144c..f8636cf86505a 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -750,7 +750,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy) if (bq->chip_version != BQ25892) return; - ret = power_supply_get_property_from_supplier(bq->charger, + ret = power_supply_get_property_from_supplier(psy, POWER_SUPPLY_PROP_USB_TYPE, &val); if (ret) @@ -775,6 +775,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy) } bq25890_field_write(bq, F_IINLIM, input_current_limit); + power_supply_changed(psy); } static int bq25890_get_chip_state(struct bq25890_device *bq, @@ -1106,6 +1107,8 @@ static void bq25890_pump_express_work(struct work_struct *data) dev_info(bq->dev, "Hi-voltage charging requested, input voltage is %d mV\n", voltage); + power_supply_changed(bq->charger); + return; error_print: bq25890_field_write(bq, F_PUMPX_EN, 0); diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 5ff6f44fd47b2..4296600e8912a 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1083,10 +1083,8 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k return ret; mutex_lock(&bq27xxx_list_lock); - list_for_each_entry(di, &bq27xxx_battery_devices, list) { - cancel_delayed_work_sync(&di->work); - schedule_delayed_work(&di->work, 0); - } + list_for_each_entry(di, &bq27xxx_battery_devices, list) + mod_delayed_work(system_wq, &di->work, 0); mutex_unlock(&bq27xxx_list_lock); return ret; @@ -1761,60 +1759,6 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di) return POWER_SUPPLY_HEALTH_GOOD; } -void bq27xxx_battery_update(struct bq27xxx_device_info *di) -{ - struct bq27xxx_reg_cache cache = {0, }; - bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; - - cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); - if ((cache.flags & 0xff) == 0xff) - cache.flags = -1; /* read error */ - if (cache.flags >= 0) { - cache.temperature = bq27xxx_battery_read_temperature(di); - if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR) - cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE); - if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR) - cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP); - if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR) - cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF); - - cache.charge_full = bq27xxx_battery_read_fcc(di); - cache.capacity = bq27xxx_battery_read_soc(di); - if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) - cache.energy = bq27xxx_battery_read_energy(di); - di->cache.flags = cache.flags; - cache.health = bq27xxx_battery_read_health(di); - if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR) - cache.cycle_count = bq27xxx_battery_read_cyct(di); - - /* We only have to read charge design full once */ - if (di->charge_design_full <= 0) - di->charge_design_full = bq27xxx_battery_read_dcap(di); - } - - if ((di->cache.capacity != cache.capacity) || - (di->cache.flags != cache.flags)) - power_supply_changed(di->bat); - - if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) - di->cache = cache; - - di->last_update = jiffies; -} -EXPORT_SYMBOL_GPL(bq27xxx_battery_update); - -static void bq27xxx_battery_poll(struct work_struct *work) -{ - struct bq27xxx_device_info *di = - container_of(work, struct bq27xxx_device_info, - work.work); - - bq27xxx_battery_update(di); - - if (poll_interval > 0) - schedule_delayed_work(&di->work, poll_interval * HZ); -} - static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags) { if (di->opts & BQ27XXX_O_ZERO) @@ -1833,7 +1777,8 @@ static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags) static int bq27xxx_battery_current_and_status( struct bq27xxx_device_info *di, union power_supply_propval *val_curr, - union power_supply_propval *val_status) + union power_supply_propval *val_status, + struct bq27xxx_reg_cache *cache) { bool single_flags = (di->opts & BQ27XXX_O_ZERO); int curr; @@ -1845,10 +1790,14 @@ static int bq27xxx_battery_current_and_status( return curr; } - flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags); - if (flags < 0) { - dev_err(di->dev, "error reading flags\n"); - return flags; + if (cache) { + flags = cache->flags; + } else { + flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags); + if (flags < 0) { + dev_err(di->dev, "error reading flags\n"); + return flags; + } } if (di->opts & BQ27XXX_O_ZERO) { @@ -1883,6 +1832,78 @@ static int bq27xxx_battery_current_and_status( return 0; } +static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) +{ + union power_supply_propval status = di->last_status; + struct bq27xxx_reg_cache cache = {0, }; + bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; + + cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); + if ((cache.flags & 0xff) == 0xff) + cache.flags = -1; /* read error */ + if (cache.flags >= 0) { + cache.temperature = bq27xxx_battery_read_temperature(di); + if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR) + cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE); + if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR) + cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP); + if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR) + cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF); + + cache.charge_full = bq27xxx_battery_read_fcc(di); + cache.capacity = bq27xxx_battery_read_soc(di); + if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) + cache.energy = bq27xxx_battery_read_energy(di); + di->cache.flags = cache.flags; + cache.health = bq27xxx_battery_read_health(di); + if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR) + cache.cycle_count = bq27xxx_battery_read_cyct(di); + + /* + * On gauges with signed current reporting the current must be + * checked to detect charging <-> discharging status changes. + */ + if (!(di->opts & BQ27XXX_O_ZERO)) + bq27xxx_battery_current_and_status(di, NULL, &status, &cache); + + /* We only have to read charge design full once */ + if (di->charge_design_full <= 0) + di->charge_design_full = bq27xxx_battery_read_dcap(di); + } + + if ((di->cache.capacity != cache.capacity) || + (di->cache.flags != cache.flags) || + (di->last_status.intval != status.intval)) { + di->last_status.intval = status.intval; + power_supply_changed(di->bat); + } + + if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) + di->cache = cache; + + di->last_update = jiffies; + + if (!di->removed && poll_interval > 0) + mod_delayed_work(system_wq, &di->work, poll_interval * HZ); +} + +void bq27xxx_battery_update(struct bq27xxx_device_info *di) +{ + mutex_lock(&di->lock); + bq27xxx_battery_update_unlocked(di); + mutex_unlock(&di->lock); +} +EXPORT_SYMBOL_GPL(bq27xxx_battery_update); + +static void bq27xxx_battery_poll(struct work_struct *work) +{ + struct bq27xxx_device_info *di = + container_of(work, struct bq27xxx_device_info, + work.work); + + bq27xxx_battery_update(di); +} + /* * Get the average power in µW * Return < 0 if something fails. @@ -1985,10 +2006,8 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, struct bq27xxx_device_info *di = power_supply_get_drvdata(psy); mutex_lock(&di->lock); - if (time_is_before_jiffies(di->last_update + 5 * HZ)) { - cancel_delayed_work_sync(&di->work); - bq27xxx_battery_poll(&di->work.work); - } + if (time_is_before_jiffies(di->last_update + 5 * HZ)) + bq27xxx_battery_update_unlocked(di); mutex_unlock(&di->lock); if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) @@ -1996,7 +2015,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, switch (psp) { case POWER_SUPPLY_PROP_STATUS: - ret = bq27xxx_battery_current_and_status(di, NULL, val); + ret = bq27xxx_battery_current_and_status(di, NULL, val, NULL); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = bq27xxx_battery_voltage(di, val); @@ -2005,7 +2024,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, val->intval = di->cache.flags < 0 ? 0 : 1; break; case POWER_SUPPLY_PROP_CURRENT_NOW: - ret = bq27xxx_battery_current_and_status(di, val, NULL); + ret = bq27xxx_battery_current_and_status(di, val, NULL, NULL); break; case POWER_SUPPLY_PROP_CAPACITY: ret = bq27xxx_simple_value(di->cache.capacity, val); @@ -2078,8 +2097,8 @@ static void bq27xxx_external_power_changed(struct power_supply *psy) { struct bq27xxx_device_info *di = power_supply_get_drvdata(psy); - cancel_delayed_work_sync(&di->work); - schedule_delayed_work(&di->work, 0); + /* After charger plug in/out wait 0.5s for things to stabilize */ + mod_delayed_work(system_wq, &di->work, HZ / 2); } int bq27xxx_battery_setup(struct bq27xxx_device_info *di) @@ -2127,22 +2146,18 @@ EXPORT_SYMBOL_GPL(bq27xxx_battery_setup); void bq27xxx_battery_teardown(struct bq27xxx_device_info *di) { - /* - * power_supply_unregister call bq27xxx_battery_get_property which - * call bq27xxx_battery_poll. - * Make sure that bq27xxx_battery_poll will not call - * schedule_delayed_work again after unregister (which cause OOPS). - */ - poll_interval = 0; - - cancel_delayed_work_sync(&di->work); - - power_supply_unregister(di->bat); - mutex_lock(&bq27xxx_list_lock); list_del(&di->list); mutex_unlock(&bq27xxx_list_lock); + /* Set removed to avoid bq27xxx_battery_update() re-queuing the work */ + mutex_lock(&di->lock); + di->removed = true; + mutex_unlock(&di->lock); + + cancel_delayed_work_sync(&di->work); + + power_supply_unregister(di->bat); mutex_destroy(&di->lock); } EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown); diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index f8768997333bc..6d3c748763390 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -179,7 +179,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client) i2c_set_clientdata(client, di); if (client->irq) { - ret = devm_request_threaded_irq(&client->dev, client->irq, + ret = request_threaded_irq(client->irq, NULL, bq27xxx_battery_irq_handler_thread, IRQF_ONESHOT, di->name, di); @@ -209,6 +209,7 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client) { struct bq27xxx_device_info *di = i2c_get_clientdata(client); + free_irq(client->irq, di); bq27xxx_battery_teardown(di); mutex_lock(&battery_mutex); diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c index 92e48e3a48536..1305cba61edd4 100644 --- a/drivers/power/supply/mt6360_charger.c +++ b/drivers/power/supply/mt6360_charger.c @@ -796,7 +796,9 @@ static int mt6360_charger_probe(struct platform_device *pdev) mci->vinovp = 6500000; mutex_init(&mci->chgdet_lock); platform_set_drvdata(pdev, mci); - devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work); + ret = devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to set delayed work\n"); ret = device_property_read_u32(&pdev->dev, "richtek,vinovp-microvolt", &mci->vinovp); if (ret) diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index ab986dbace16a..3791aec69ddc6 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -348,6 +348,10 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data) struct power_supply *psy = dev_get_drvdata(dev); unsigned int *count = data; + if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_SCOPE, &ret)) + if (ret.intval == POWER_SUPPLY_SCOPE_DEVICE) + return 0; + (*count)++; if (psy->desc->type != POWER_SUPPLY_TYPE_BATTERY) if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE, @@ -366,8 +370,8 @@ int power_supply_is_system_supplied(void) __power_supply_is_system_supplied); /* - * If no power class device was found at all, most probably we are - * running on a desktop system, so assume we are on mains power. + * If no system scope power class device was found at all, most probably we + * are running on a desktop system, so assume we are on mains power. */ if (count == 0) return 1; @@ -573,7 +577,7 @@ int power_supply_get_battery_info(struct power_supply *psy, struct power_supply_battery_info *info; struct device_node *battery_np = NULL; struct fwnode_reference_args args; - struct fwnode_handle *fwnode; + struct fwnode_handle *fwnode = NULL; const char *value; int err, len, index; const __be32 *list; @@ -585,7 +589,7 @@ int power_supply_get_battery_info(struct power_supply *psy, return -ENODEV; fwnode = fwnode_handle_get(of_fwnode_handle(battery_np)); - } else { + } else if (psy->dev.parent) { err = fwnode_property_get_reference_args( dev_fwnode(psy->dev.parent), "monitored-battery", NULL, 0, 0, &args); @@ -595,6 +599,9 @@ int power_supply_get_battery_info(struct power_supply *psy, fwnode = args.fwnode; } + if (!fwnode) + return -ENOENT; + err = fwnode_property_read_string(fwnode, "compatible", &value); if (err) goto out_put_node; diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c index 702bf83f6e6d2..0674483279d77 100644 --- a/drivers/power/supply/power_supply_leds.c +++ b/drivers/power/supply/power_supply_leds.c @@ -35,8 +35,9 @@ static void power_supply_update_bat_leds(struct power_supply *psy) led_trigger_event(psy->charging_full_trig, LED_FULL); led_trigger_event(psy->charging_trig, LED_OFF); led_trigger_event(psy->full_trig, LED_FULL); - led_trigger_event(psy->charging_blink_full_solid_trig, - LED_FULL); + /* Going from blink to LED on requires a LED_OFF event to stop blink */ + led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF); + led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL); break; case POWER_SUPPLY_STATUS_CHARGING: led_trigger_event(psy->charging_full_trig, LED_FULL); diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index ba3b125cd66e4..06e5b6b0e255c 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -286,7 +286,8 @@ static ssize_t power_supply_show_property(struct device *dev, if (ret < 0) { if (ret == -ENODATA) - dev_dbg(dev, "driver has no data for `%s' property\n", + dev_dbg_ratelimited(dev, + "driver has no data for `%s' property\n", attr->attr.name); else if (ret != -ENODEV && ret != -EAGAIN) dev_err_ratelimited(dev, diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c index 73f744a3155d4..ea33693b69779 100644 --- a/drivers/power/supply/rt9467-charger.c +++ b/drivers/power/supply/rt9467-charger.c @@ -1023,7 +1023,7 @@ static int rt9467_request_interrupt(struct rt9467_chg_data *data) for (i = 0; i < num_chg_irqs; i++) { virq = regmap_irq_get_virq(data->irq_chip_data, chg_irqs[i].hwirq); if (virq <= 0) - return dev_err_probe(dev, virq, "Failed to get (%s) irq\n", + return dev_err_probe(dev, -EINVAL, "Failed to get (%s) irq\n", chg_irqs[i].name); ret = devm_request_threaded_irq(dev, virq, NULL, chg_irqs[i].handler, diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c index 75ebcbf0a7883..a14e89ac0369c 100644 --- a/drivers/power/supply/sbs-charger.c +++ b/drivers/power/supply/sbs-charger.c @@ -24,7 +24,7 @@ #define SBS_CHARGER_REG_STATUS 0x13 #define SBS_CHARGER_REG_ALARM_WARNING 0x16 -#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(1) +#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(0) #define SBS_CHARGER_STATUS_RES_COLD BIT(9) #define SBS_CHARGER_STATUS_RES_HOT BIT(10) #define SBS_CHARGER_STATUS_BATTERY_PRESENT BIT(14) diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index 632977f84b954..bd23c4d9fed43 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -733,13 +733,6 @@ static int sc27xx_fgu_set_property(struct power_supply *psy, return ret; } -static void sc27xx_fgu_external_power_changed(struct power_supply *psy) -{ - struct sc27xx_fgu_data *data = power_supply_get_drvdata(psy); - - power_supply_changed(data->battery); -} - static int sc27xx_fgu_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { @@ -774,7 +767,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = { .num_properties = ARRAY_SIZE(sc27xx_fgu_props), .get_property = sc27xx_fgu_get_property, .set_property = sc27xx_fgu_set_property, - .external_power_changed = sc27xx_fgu_external_power_changed, + .external_power_changed = power_supply_changed, .property_is_writeable = sc27xx_fgu_property_is_writeable, .no_thermal = true, }; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index dc741ac156c32..698ab7f5004bf 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5256,7 +5256,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev) } rdev->debugfs = debugfs_create_dir(rname, debugfs_root); - if (!rdev->debugfs) { + if (IS_ERR(rdev->debugfs)) { rdev_warn(rdev, "Failed to create debugfs directory\n"); return; } @@ -6178,7 +6178,7 @@ static int __init regulator_init(void) ret = class_register(®ulator_class); debugfs_root = debugfs_create_dir("regulator", NULL); - if (!debugfs_root) + if (IS_ERR(debugfs_root)) pr_warn("regulator: Failed to create debugfs directory\n"); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/regulator/mt6359-regulator.c b/drivers/regulator/mt6359-regulator.c index 1849566784ab5..3eb86ec21d088 100644 --- a/drivers/regulator/mt6359-regulator.c +++ b/drivers/regulator/mt6359-regulator.c @@ -951,9 +951,12 @@ static int mt6359_regulator_probe(struct platform_device *pdev) struct regulator_config config = {}; struct regulator_dev *rdev; struct mt6359_regulator_info *mt6359_info; - int i, hw_ver; + int i, hw_ver, ret; + + ret = regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver); + if (ret) + return ret; - regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver); if (hw_ver >= MT6359P_CHIP_VER) mt6359_info = mt6359p_regulators; else diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index 87a746dcb5169..e75dd92f86ca7 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -264,7 +264,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0, .vsel_mask = BUCK2OUT_DVS0_MASK, .enable_reg = PCA9450_REG_BUCK2CTRL, - .enable_mask = BUCK1_ENMODE_MASK, + .enable_mask = BUCK2_ENMODE_MASK, .ramp_reg = PCA9450_REG_BUCK2CTRL, .ramp_mask = BUCK2_RAMP_MASK, .ramp_delay_table = pca9450_dvs_buck_ramp_table, @@ -502,7 +502,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0, .vsel_mask = BUCK2OUT_DVS0_MASK, .enable_reg = PCA9450_REG_BUCK2CTRL, - .enable_mask = BUCK1_ENMODE_MASK, + .enable_mask = BUCK2_ENMODE_MASK, .ramp_reg = PCA9450_REG_BUCK2CTRL, .ramp_mask = BUCK2_RAMP_MASK, .ramp_delay_table = pca9450_dvs_buck_ramp_table, diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index ade1369fe5ed3..113c509bf6d05 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -127,6 +127,8 @@ static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int, struct dasd_device *, struct dasd_device *, unsigned int, int, unsigned int, unsigned int, unsigned int, unsigned int); +static int dasd_eckd_query_pprc_status(struct dasd_device *, + struct dasd_pprc_data_sc4 *); /* initial attempt at a probe function. this can be simplified once * the other detection code is gone */ @@ -3733,6 +3735,26 @@ static int count_exts(unsigned int from, unsigned int to, int trks_per_ext) return count; } +static int dasd_in_copy_relation(struct dasd_device *device) +{ + struct dasd_pprc_data_sc4 *temp; + int rc; + + if (!dasd_eckd_pprc_enabled(device)) + return 0; + + temp = kzalloc(sizeof(*temp), GFP_KERNEL); + if (!temp) + return -ENOMEM; + + rc = dasd_eckd_query_pprc_status(device, temp); + if (!rc) + rc = temp->dev_info[0].state; + + kfree(temp); + return rc; +} + /* * Release allocated space for a given range or an entire volume. */ @@ -3749,6 +3771,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, int cur_to_trk, cur_from_trk; struct dasd_ccw_req *cqr; u32 beg_cyl, end_cyl; + int copy_relation; struct ccw1 *ccw; int trks_per_ext; size_t ras_size; @@ -3760,6 +3783,10 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk)) return ERR_PTR(-EINVAL); + copy_relation = dasd_in_copy_relation(device); + if (copy_relation < 0) + return ERR_PTR(copy_relation); + rq = req ? blk_mq_rq_to_pdu(req) : NULL; features = &private->features; @@ -3788,9 +3815,11 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, /* * This bit guarantees initialisation of tracks within an extent that is * not fully specified, but is only supported with a certain feature - * subset. + * subset and for devices not in a copy relation. */ - ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01); + if (features->feature[56] & 0x01 && !copy_relation) + ras_data->op_flags.guarantee_init = 1; + ras_data->lss = private->conf.ned->ID; ras_data->dev_addr = private->conf.ned->unit_addr; ras_data->nr_exts = nr_exts; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 8eb089b99cde9..d5c43e9b51289 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1111,6 +1111,8 @@ static void io_subchannel_verify(struct subchannel *sch) cdev = sch_get_cdev(sch); if (cdev) dev_fsm_event(cdev, DEV_EVENT_VERIFY); + else + css_schedule_eval(sch->schid); } static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 5ea6249d81803..641f0dbb65a90 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -95,7 +95,7 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue, " lgr 1,%[token]\n" " .insn rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])" : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart) - : [state] "d" ((unsigned long)state), [token] "d" (token) + : [state] "a" ((unsigned long)state), [token] "d" (token) : "memory", "cc", "1"); *count = _ccq & 0xff; *start = _queuestart & 0xff; diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 5a05d1cdfec20..a8def50c149bd 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -1293,6 +1293,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return PTR_ERR(kkey); rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey); DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); + memzero_explicit(kkey, ktp.keylen); kfree(kkey); if (rc) break; @@ -1426,6 +1427,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, kkey, ktp.keylen, &ktp.protkey); DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); kfree(apqns); + memzero_explicit(kkey, ktp.keylen); kfree(kkey); if (rc) break; @@ -1552,6 +1554,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, protkey, &protkeylen); DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); kfree(apqns); + memzero_explicit(kkey, ktp.keylen); kfree(kkey); if (rc) { kfree(protkey); diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index 8acb9eba691b5..1399b5dc646c7 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -660,7 +660,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_disable; - ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (ret) goto err_resource; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b7c569a42aa47..0226c9279cef6 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1463,6 +1463,8 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) struct Scsi_Host *host = cmd->device->host; int rtn = 0; + atomic_inc(&cmd->device->iorequest_cnt); + /* check if the device is still usable */ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { /* in SDEV_DEL we error all commands. DID_NO_CONNECT @@ -1483,6 +1485,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) */ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, "queuecommand : device blocked\n")); + atomic_dec(&cmd->device->iorequest_cnt); return SCSI_MLQUEUE_DEVICE_BUSY; } @@ -1515,6 +1518,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) trace_scsi_dispatch_cmd_start(cmd); rtn = host->hostt->queuecommand(host, cmd); if (rtn) { + atomic_dec(&cmd->device->iorequest_cnt); trace_scsi_dispatch_cmd_error(cmd, rtn); if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && rtn != SCSI_MLQUEUE_TARGET_BUSY) @@ -1761,7 +1765,6 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, goto out_dec_host_busy; } - atomic_inc(&cmd->device->iorequest_cnt); return BLK_STS_OK; out_dec_host_busy: diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index d9ce379c4d2e8..e6bc622954cfa 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1780,7 +1780,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) length = scsi_bufflen(scmnd); payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; - payload_sz = sizeof(cmd_request->mpb); + payload_sz = 0; if (scsi_sg_count(scmnd)) { unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset); @@ -1789,10 +1789,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) unsigned long hvpfn, hvpfns_to_add; int j, i = 0, sg_count; - if (hvpg_count > MAX_PAGE_BUFFER_COUNT) { + payload_sz = (hvpg_count * sizeof(u64) + + sizeof(struct vmbus_packet_mpb_array)); - payload_sz = (hvpg_count * sizeof(u64) + - sizeof(struct vmbus_packet_mpb_array)); + if (hvpg_count > MAX_PAGE_BUFFER_COUNT) { payload = kzalloc(payload_sz, GFP_ATOMIC); if (!payload) return SCSI_MLQUEUE_DEVICE_BUSY; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index ac85d55622127..26e6633693196 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -301,49 +302,43 @@ static int cdns_spi_setup_transfer(struct spi_device *spi, } /** - * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible + * cdns_spi_process_fifo - Fills the TX FIFO, and drain the RX FIFO * @xspi: Pointer to the cdns_spi structure + * @ntx: Number of bytes to pack into the TX FIFO + * @nrx: Number of bytes to drain from the RX FIFO */ -static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) +static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx) { - unsigned long trans_cnt = 0; + ntx = clamp(ntx, 0, xspi->tx_bytes); + nrx = clamp(nrx, 0, xspi->rx_bytes); - while ((trans_cnt < xspi->tx_fifo_depth) && - (xspi->tx_bytes > 0)) { + xspi->tx_bytes -= ntx; + xspi->rx_bytes -= nrx; + while (ntx || nrx) { /* When xspi in busy condition, bytes may send failed, * then spi control did't work thoroughly, add one byte delay */ - if (cdns_spi_read(xspi, CDNS_SPI_ISR) & - CDNS_SPI_IXR_TXFULL) + if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL) udelay(10); - if (xspi->txbuf) - cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); - else - cdns_spi_write(xspi, CDNS_SPI_TXD, 0); + if (ntx) { + if (xspi->txbuf) + cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); + else + cdns_spi_write(xspi, CDNS_SPI_TXD, 0); - xspi->tx_bytes--; - trans_cnt++; - } -} + ntx--; + } -/** - * cdns_spi_read_rx_fifo - Reads the RX FIFO with as many bytes as possible - * @xspi: Pointer to the cdns_spi structure - * @count: Read byte count - */ -static void cdns_spi_read_rx_fifo(struct cdns_spi *xspi, unsigned long count) -{ - u8 data; - - /* Read out the data from the RX FIFO */ - while (count > 0) { - data = cdns_spi_read(xspi, CDNS_SPI_RXD); - if (xspi->rxbuf) - *xspi->rxbuf++ = data; - xspi->rx_bytes--; - count--; + if (nrx) { + u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD); + + if (xspi->rxbuf) + *xspi->rxbuf++ = data; + + nrx--; + } } } @@ -381,33 +376,22 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) spi_finalize_current_transfer(ctlr); status = IRQ_HANDLED; } else if (intr_status & CDNS_SPI_IXR_TXOW) { - int trans_cnt = cdns_spi_read(xspi, CDNS_SPI_THLD); + int threshold = cdns_spi_read(xspi, CDNS_SPI_THLD); + int trans_cnt = xspi->rx_bytes - xspi->tx_bytes; + + if (threshold > 1) + trans_cnt -= threshold; + /* Set threshold to one if number of pending are * less than half fifo */ if (xspi->tx_bytes < xspi->tx_fifo_depth >> 1) cdns_spi_write(xspi, CDNS_SPI_THLD, 1); - while (trans_cnt) { - cdns_spi_read_rx_fifo(xspi, 1); - - if (xspi->tx_bytes) { - if (xspi->txbuf) - cdns_spi_write(xspi, CDNS_SPI_TXD, - *xspi->txbuf++); - else - cdns_spi_write(xspi, CDNS_SPI_TXD, 0); - xspi->tx_bytes--; - } - trans_cnt--; - } - if (!xspi->tx_bytes) { - /* Fixed delay due to controller limitation with - * RX_NEMPTY incorrect status - * Xilinx AR:65885 contains more details - */ - udelay(10); - cdns_spi_read_rx_fifo(xspi, xspi->rx_bytes); + if (xspi->tx_bytes) { + cdns_spi_process_fifo(xspi, trans_cnt, trans_cnt); + } else { + cdns_spi_process_fifo(xspi, 0, trans_cnt); cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT); spi_finalize_current_transfer(ctlr); @@ -450,16 +434,17 @@ static int cdns_transfer_one(struct spi_controller *ctlr, xspi->tx_bytes = transfer->len; xspi->rx_bytes = transfer->len; - if (!spi_controller_is_slave(ctlr)) + if (!spi_controller_is_slave(ctlr)) { cdns_spi_setup_transfer(spi, transfer); + } else { + /* Set TX empty threshold to half of FIFO depth + * only if TX bytes are more than half FIFO depth. + */ + if (xspi->tx_bytes > xspi->tx_fifo_depth) + cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1); + } - /* Set TX empty threshold to half of FIFO depth - * only if TX bytes are more than half FIFO depth. - */ - if (xspi->tx_bytes > (xspi->tx_fifo_depth >> 1)) - cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1); - - cdns_spi_fill_tx_fifo(xspi); + cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0); spi_transfer_delay_exec(transfer); cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT); diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index 5e6faa98aa852..5f2aee69c1c13 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -264,17 +264,17 @@ static void dw_spi_elba_set_cs(struct spi_device *spi, bool enable) struct regmap *syscon = dwsmmio->priv; u8 cs; - cs = spi->chip_select; + cs = spi_get_chipselect(spi, 0); if (cs < 2) - dw_spi_elba_override_cs(syscon, spi->chip_select, enable); + dw_spi_elba_override_cs(syscon, spi_get_chipselect(spi, 0), enable); /* * The DW SPI controller needs a native CS bit selected to start * the serial engine. */ - spi->chip_select = 0; + spi_set_chipselect(spi, 0, 0); dw_spi_set_cs(spi, enable); - spi->chip_select = cs; + spi_get_chipselect(spi, cs); } static int dw_spi_elba_init(struct platform_device *pdev, diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index ba7be505ec4ef..a98b781b103ab 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -294,6 +294,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) mas->cs_flag = set_flag; /* set xfer_mode to FIFO to complete cs_done in isr */ mas->cur_xfer_mode = GENI_SE_FIFO; + geni_se_select_mode(se, mas->cur_xfer_mode); + reinit_completion(&mas->cs_done); if (set_flag) geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0); diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index d76e923fbc6a0..c0aee5dc5237f 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -54,6 +54,21 @@ static int ring_interrupt_index(const struct tb_ring *ring) return bit; } +static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring) +{ + if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) + return; + iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring); +} + +static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring) +{ + if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) + ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring); + else + iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring); +} + /* * ring_interrupt_active() - activate/deactivate interrupts for a single ring * @@ -61,8 +76,8 @@ static int ring_interrupt_index(const struct tb_ring *ring) */ static void ring_interrupt_active(struct tb_ring *ring, bool active) { - int reg = REG_RING_INTERRUPT_BASE + - ring_interrupt_index(ring) / 32 * 4; + int index = ring_interrupt_index(ring) / 32 * 4; + int reg = REG_RING_INTERRUPT_BASE + index; int interrupt_bit = ring_interrupt_index(ring) & 31; int mask = 1 << interrupt_bit; u32 old, new; @@ -123,7 +138,11 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) "interrupt for %s %d is already %s\n", RING_TYPE(ring), ring->hop, active ? "enabled" : "disabled"); - iowrite32(new, ring->nhi->iobase + reg); + + if (active) + iowrite32(new, ring->nhi->iobase + reg); + else + nhi_mask_interrupt(ring->nhi, mask, index); } /* @@ -136,11 +155,11 @@ static void nhi_disable_interrupts(struct tb_nhi *nhi) int i = 0; /* disable interrupts */ for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) - iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); + nhi_mask_interrupt(nhi, ~0, 4 * i); /* clear interrupt status bits */ for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) - ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); + nhi_clear_interrupt(nhi, 4 * i); } /* ring helper methods */ diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h index faef165a919cc..6ba2958154770 100644 --- a/drivers/thunderbolt/nhi_regs.h +++ b/drivers/thunderbolt/nhi_regs.h @@ -93,6 +93,8 @@ struct ring_desc { #define REG_RING_INTERRUPT_BASE 0x38200 #define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32) +#define REG_RING_INTERRUPT_MASK_CLEAR_BASE 0x38208 + #define REG_INT_THROTTLING_RATE 0x38c00 /* Interrupt Vector Allocation */ diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c index f801b1f5b46c0..af0e1c0701879 100644 --- a/drivers/tty/serial/8250/8250_bcm7271.c +++ b/drivers/tty/serial/8250/8250_bcm7271.c @@ -1012,7 +1012,7 @@ static int brcmuart_probe(struct platform_device *pdev) of_property_read_u32(np, "clock-frequency", &clk_rate); /* See if a Baud clock has been specified */ - baud_mux_clk = of_clk_get_by_name(np, "sw_baud"); + baud_mux_clk = devm_clk_get(dev, "sw_baud"); if (IS_ERR(baud_mux_clk)) { if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; @@ -1032,7 +1032,7 @@ static int brcmuart_probe(struct platform_device *pdev) if (clk_rate == 0) { dev_err(dev, "clock-frequency or clk not defined\n"); ret = -EINVAL; - goto release_dma; + goto err_clk_disable; } dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not "); @@ -1119,6 +1119,8 @@ static int brcmuart_probe(struct platform_device *pdev) serial8250_unregister_port(priv->line); err: brcmuart_free_bufs(dev, priv); +err_clk_disable: + clk_disable_unprepare(baud_mux_clk); release_dma: if (priv->dma_enabled) brcmuart_arbitration(priv, 0); @@ -1133,6 +1135,7 @@ static int brcmuart_remove(struct platform_device *pdev) hrtimer_cancel(&priv->hrt); serial8250_unregister_port(priv->line); brcmuart_free_bufs(&pdev->dev, priv); + clk_disable_unprepare(priv->baud_mux_clk); if (priv->dma_enabled) brcmuart_arbitration(priv, 0); return 0; diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 64770c62bbec5..b406cba10b0eb 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -40,9 +40,13 @@ #define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020 #define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021 #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 + #define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358 #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 +#define PCI_SUBDEVICE_ID_USR_2980 0x0128 +#define PCI_SUBDEVICE_ID_USR_2981 0x0129 + #define PCI_DEVICE_ID_SEALEVEL_710xC 0x1001 #define PCI_DEVICE_ID_SEALEVEL_720xC 0x1002 #define PCI_DEVICE_ID_SEALEVEL_740xC 0x1004 @@ -829,6 +833,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = { (kernel_ulong_t)&bd \ } +#define USR_DEVICE(devid, sdevid, bd) { \ + PCI_DEVICE_SUB( \ + PCI_VENDOR_ID_USR, \ + PCI_DEVICE_ID_EXAR_##devid, \ + PCI_VENDOR_ID_EXAR, \ + PCI_SUBDEVICE_ID_USR_##sdevid), 0, 0, \ + (kernel_ulong_t)&bd \ + } + static const struct pci_device_id exar_pci_tbl[] = { EXAR_DEVICE(ACCESSIO, COM_2S, pbn_exar_XR17C15x), EXAR_DEVICE(ACCESSIO, COM_4S, pbn_exar_XR17C15x), @@ -853,6 +866,10 @@ static const struct pci_device_id exar_pci_tbl[] = { IBM_DEVICE(XR17C152, SATURN_SERIAL_ONE_PORT, pbn_exar_ibm_saturn), + /* USRobotics USR298x-OEM PCI Modems */ + USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x), + USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x), + /* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */ EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x), EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x), diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index c55be6fda0cab..e80c4f6551a1c 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1920,6 +1920,8 @@ pci_moxa_setup(struct serial_private *priv, #define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530 #define PCI_VENDOR_ID_ADVANTECH 0x13fe #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66 +#define PCI_DEVICE_ID_ADVANTECH_PCI1600 0x1600 +#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611 0x1611 #define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620 #define PCI_DEVICE_ID_ADVANTECH_PCI3618 0x3618 #define PCI_DEVICE_ID_ADVANTECH_PCIf618 0xf618 @@ -4085,6 +4087,9 @@ static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one, pciserial_resume_one); static const struct pci_device_id serial_pci_tbl[] = { + { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI1600, + PCI_DEVICE_ID_ADVANTECH_PCI1600_1611, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */ { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620, PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index fe8d79c4ae95e..c153ba3a018a2 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -669,6 +669,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_supported); /** * serial8250_em485_config() - generic ->rs485_config() callback * @port: uart port + * @termios: termios structure * @rs485: rs485 settings * * Generic callback usable by 8250 uart drivers to activate rs485 settings diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c index 59e25f2b66322..4b2512eef577b 100644 --- a/drivers/tty/serial/arc_uart.c +++ b/drivers/tty/serial/arc_uart.c @@ -606,10 +606,11 @@ static int arc_serial_probe(struct platform_device *pdev) } uart->baud = val; - port->membase = of_iomap(np, 0); - if (!port->membase) + port->membase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(port->membase)) { /* No point of dev_err since UART itself is hosed here */ - return -ENXIO; + return PTR_ERR(port->membase); + } port->irq = irq_of_parse_and_map(np, 0); diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 08dc3e2a729c7..8582479f0211a 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1664,19 +1664,18 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) uport->private_data = &port->private_data; platform_set_drvdata(pdev, port); - ret = uart_add_one_port(drv, uport); - if (ret) - return ret; - irq_set_status_flags(uport->irq, IRQ_NOAUTOEN); ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr, IRQF_TRIGGER_HIGH, port->name, uport); if (ret) { dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret); - uart_remove_one_port(drv, uport); return ret; } + ret = uart_add_one_port(drv, uport); + if (ret) + return ret; + /* * Set pm_runtime status as ACTIVE so that wakeup_irq gets * enabled/disabled from dev_pm_arm_wake_irq during system diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 498ba9c0ee937..829c4be66f3b0 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -656,10 +656,17 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) } } - /* The vcs_size might have changed while we slept to grab - * the user buffer, so recheck. + /* The vc might have been freed or vcs_size might have changed + * while we slept to grab the user buffer, so recheck. * Return data written up to now on failure. */ + vc = vcs_vc(inode, &viewed); + if (!vc) { + if (written) + break; + ret = -ENXIO; + goto unlock_out; + } size = vcs_size(vc, attr, false); if (size < 0) { if (written) diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 202ff71e1b582..51b3c6ae781df 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -150,7 +150,8 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) u32 hba_maxq, rem, tot_queues; struct Scsi_Host *host = hba->host; - hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities); + /* maxq is 0 based value */ + hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1; tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues + rw_queues; @@ -265,7 +266,7 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba, addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) - hba->ucdl_dma_addr; - return div_u64(addr, sizeof(struct utp_transfer_cmd_desc)); + return div_u64(addr, ufshcd_get_ucd_size(hba)); } static void ufshcd_mcq_process_cqe(struct ufs_hba *hba, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 17d7bb875fee8..e7e79f515e141 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -2849,10 +2849,10 @@ static void ufshcd_map_queues(struct Scsi_Host *shost) static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) { struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr + - i * sizeof_utp_transfer_cmd_desc(hba); + i * ufshcd_get_ucd_size(hba); struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + - i * sizeof_utp_transfer_cmd_desc(hba); + i * ufshcd_get_ucd_size(hba); u16 response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu); u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); @@ -3761,7 +3761,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) size_t utmrdl_size, utrdl_size, ucdl_size; /* Allocate memory for UTP command descriptors */ - ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs; + ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs; hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, ucdl_size, &hba->ucdl_dma_addr, @@ -3861,7 +3861,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); - cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba); + cmd_desc_size = ufshcd_get_ucd_size(hba); cmd_desc_dma_addr = hba->ucdl_dma_addr; for (i = 0; i < hba->nutrs; i++) { @@ -8452,7 +8452,7 @@ static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs) { size_t ucdl_size, utrdl_size; - ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs; + ucdl_size = ufshcd_get_ucd_size(hba) * nutrs; dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr, hba->ucdl_dma_addr); @@ -9459,8 +9459,16 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) * that performance might be impacted. */ ret = ufshcd_urgent_bkops(hba); - if (ret) + if (ret) { + /* + * If return err in suspend flow, IO will hang. + * Trigger error handler and break suspend for + * error recovery. + */ + ufshcd_force_error_recovery(hba); + ret = -EBUSY; goto enable_scaling; + } } else { /* make sure that auto bkops is disabled */ ufshcd_disable_auto_bkops(hba); diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 4bb6d304eb4b2..311007b1d9046 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1928,6 +1928,8 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, if (request.req.wLength > USBTMC_BUFSIZE) return -EMSGSIZE; + if (request.req.wLength == 0) /* Length-0 requests are never IN */ + request.req.bRequestType &= ~USB_DIR_IN; is_in = request.req.bRequestType & USB_DIR_IN; diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 0beaab932e7db..7b2ce013cc5ba 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1137,7 +1137,7 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc3_set_incr_burst_type(dwc); - dwc3_phy_power_on(dwc); + ret = dwc3_phy_power_on(dwc); if (ret) goto err_exit_phy; diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index d56457c02996b..1f043c31a0969 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1116,6 +1116,7 @@ struct dwc3_scratchpad_array { * @dis_metastability_quirk: set to disable metastability quirk. * @dis_split_quirk: set to disable split boundary. * @wakeup_configured: set if the device is configured for remote wakeup. + * @suspended: set to track suspend event due to U3/L2. * @imod_interval: set the interrupt moderation interval in 250ns * increments or 0 to disable. * @max_cfg_eps: current max number of IN eps used across all USB configs. @@ -1332,6 +1333,7 @@ struct dwc3 { unsigned dis_split_quirk:1; unsigned async_callbacks:1; unsigned wakeup_configured:1; + unsigned suspended:1; u16 imod_interval; diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index e4a2560b9dc0c..ebf03468fac4a 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -332,6 +332,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused) unsigned int current_mode; unsigned long flags; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GSTS); @@ -350,6 +355,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused) } spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -395,6 +402,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused) struct dwc3 *dwc = s->private; unsigned long flags; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GCTL); @@ -414,6 +426,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused) seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg)); } + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -463,6 +477,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused) struct dwc3 *dwc = s->private; unsigned long flags; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DCTL); @@ -493,6 +512,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused) seq_printf(s, "UNKNOWN %d\n", reg); } + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -509,6 +530,7 @@ static ssize_t dwc3_testmode_write(struct file *file, unsigned long flags; u32 testmode = 0; char buf[32]; + int ret; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -526,10 +548,16 @@ static ssize_t dwc3_testmode_write(struct file *file, else testmode = 0; + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; + spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_set_test_mode(dwc, testmode); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return count; } @@ -548,12 +576,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused) enum dwc3_link_state state; u32 reg; u8 speed; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GSTS); if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) { seq_puts(s, "Not available\n"); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); return 0; } @@ -566,6 +600,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused) dwc3_gadget_hs_link_string(state)); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -584,6 +620,7 @@ static ssize_t dwc3_link_state_write(struct file *file, char buf[32]; u32 reg; u8 speed; + int ret; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -603,10 +640,15 @@ static ssize_t dwc3_link_state_write(struct file *file, else return -EINVAL; + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; + spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GSTS); if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) { spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); return -EINVAL; } @@ -616,12 +658,15 @@ static ssize_t dwc3_link_state_write(struct file *file, if (speed < DWC3_DSTS_SUPERSPEED && state != DWC3_LINK_STATE_RECOV) { spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); return -EINVAL; } dwc3_gadget_set_link_state(dwc, state); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return count; } @@ -645,6 +690,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused) unsigned long flags; u32 mdwidth; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_TXFIFO); @@ -657,6 +707,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused) seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -667,6 +719,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused) unsigned long flags; u32 mdwidth; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_RXFIFO); @@ -679,6 +736,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused) seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -688,12 +747,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_TXREQQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -703,12 +769,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_RXREQQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -718,12 +791,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -733,12 +813,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -748,12 +835,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_EVENTQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -798,6 +892,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; int i; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); if (dep->number <= 1) { @@ -827,6 +926,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused) out: spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -839,6 +940,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused) u32 lower_32_bits; u32 upper_32_bits; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number); @@ -851,6 +957,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused) seq_printf(s, "0x%016llx\n", ep_info); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -910,6 +1018,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc) dwc->regset->regs = dwc3_regs; dwc->regset->nregs = ARRAY_SIZE(dwc3_regs); dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START; + dwc->regset->dev = dwc->dev; root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root); dwc->debug_root = root; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index c0ca4d12f95d9..d831f5acf7b56 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2440,6 +2440,7 @@ static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id) return -EINVAL; } dwc3_resume_gadget(dwc); + dwc->suspended = false; dwc->link_state = DWC3_LINK_STATE_U0; } @@ -2699,6 +2700,21 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc) return ret; } +static int dwc3_gadget_soft_connect(struct dwc3 *dwc) +{ + /* + * In the Synopsys DWC_usb31 1.90a programming guide section + * 4.1.9, it specifies that for a reconnect after a + * device-initiated disconnect requires a core soft reset + * (DCTL.CSftRst) before enabling the run/stop bit. + */ + dwc3_core_soft_reset(dwc); + + dwc3_event_buffers_setup(dwc); + __dwc3_gadget_start(dwc); + return dwc3_gadget_run_stop(dwc, true); +} + static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) { struct dwc3 *dwc = gadget_to_dwc(g); @@ -2737,21 +2753,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) synchronize_irq(dwc->irq_gadget); - if (!is_on) { + if (!is_on) ret = dwc3_gadget_soft_disconnect(dwc); - } else { - /* - * In the Synopsys DWC_usb31 1.90a programming guide section - * 4.1.9, it specifies that for a reconnect after a - * device-initiated disconnect requires a core soft reset - * (DCTL.CSftRst) before enabling the run/stop bit. - */ - dwc3_core_soft_reset(dwc); - - dwc3_event_buffers_setup(dwc); - __dwc3_gadget_start(dwc); - ret = dwc3_gadget_run_stop(dwc, true); - } + else + ret = dwc3_gadget_soft_connect(dwc); pm_runtime_put(dwc->dev); @@ -3938,6 +3943,8 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) { int reg; + dwc->suspended = false; + dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET); reg = dwc3_readl(dwc->regs, DWC3_DCTL); @@ -3962,6 +3969,8 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) { u32 reg; + dwc->suspended = false; + /* * Ideally, dwc3_reset_gadget() would trigger the function * drivers to stop any active transfers through ep disable. @@ -4180,6 +4189,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, unsigned int evtinfo) { + dwc->suspended = false; + /* * TODO take core out of low power mode when that's * implemented. @@ -4277,6 +4288,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, if (dwc->gadget->wakeup_armed) { dwc3_gadget_enable_linksts_evts(dwc, false); dwc3_resume_gadget(dwc); + dwc->suspended = false; } break; case DWC3_LINK_STATE_U1: @@ -4303,8 +4315,10 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, { enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; - if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) + if (!dwc->suspended && next == DWC3_LINK_STATE_U3) { + dwc->suspended = true; dwc3_suspend_gadget(dwc); + } dwc->link_state = next; } @@ -4655,42 +4669,39 @@ void dwc3_gadget_exit(struct dwc3 *dwc) int dwc3_gadget_suspend(struct dwc3 *dwc) { unsigned long flags; + int ret; if (!dwc->gadget_driver) return 0; - dwc3_gadget_run_stop(dwc, false); + ret = dwc3_gadget_soft_disconnect(dwc); + if (ret) + goto err; spin_lock_irqsave(&dwc->lock, flags); dwc3_disconnect_gadget(dwc); - __dwc3_gadget_stop(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return 0; + +err: + /* + * Attempt to reset the controller's state. Likely no + * communication can be established until the host + * performs a port reset. + */ + if (dwc->softconnect) + dwc3_gadget_soft_connect(dwc); + + return ret; } int dwc3_gadget_resume(struct dwc3 *dwc) { - int ret; - if (!dwc->gadget_driver || !dwc->softconnect) return 0; - ret = __dwc3_gadget_start(dwc); - if (ret < 0) - goto err0; - - ret = dwc3_gadget_run_stop(dwc, true); - if (ret < 0) - goto err1; - - return 0; - -err1: - __dwc3_gadget_stop(dwc); - -err0: - return ret; + return dwc3_gadget_soft_connect(dwc); } void dwc3_gadget_process_pending_events(struct dwc3 *dwc) diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 6956ad8ba8dd8..a366abb456236 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "u_ether.h" @@ -965,6 +966,8 @@ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) dev = netdev_priv(net); snprintf(host_addr, len, "%pm", dev->host_mac); + string_upper(host_addr, host_addr); + return strlen(host_addr); } EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 4641153e9706b..52e6d2e84e352 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -37,10 +37,6 @@ static const struct bus_type gadget_bus_type; * @vbus: for udcs who care about vbus status, this value is real vbus status; * for udcs who do not care about vbus status, this value is always true * @started: the UDC's started state. True if the UDC had started. - * @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related - * functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked, - * usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are - * called with this lock held. * * This represents the internal data structure which is used by the UDC-class * to hold information about udc driver and gadget together. @@ -52,7 +48,6 @@ struct usb_udc { struct list_head list; bool vbus; bool started; - struct mutex connect_lock; }; static struct class *udc_class; @@ -692,9 +687,17 @@ int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) } EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect); -/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */ -static int usb_gadget_connect_locked(struct usb_gadget *gadget) - __must_hold(&gadget->udc->connect_lock) +/** + * usb_gadget_connect - software-controlled connect to USB host + * @gadget:the peripheral being connected + * + * Enables the D+ (or potentially D-) pullup. The host will start + * enumerating this gadget when the pullup is active and a VBUS session + * is active (the link is powered). + * + * Returns zero on success, else negative errno. + */ +int usb_gadget_connect(struct usb_gadget *gadget) { int ret = 0; @@ -703,15 +706,10 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget) goto out; } - if (gadget->connected) - goto out; - - if (gadget->deactivated || !gadget->udc->started) { + if (gadget->deactivated) { /* * If gadget is deactivated we only save new state. * Gadget will be connected automatically after activation. - * - * udc first needs to be started before gadget can be pulled up. */ gadget->connected = true; goto out; @@ -726,32 +724,22 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget) return ret; } +EXPORT_SYMBOL_GPL(usb_gadget_connect); /** - * usb_gadget_connect - software-controlled connect to USB host - * @gadget:the peripheral being connected + * usb_gadget_disconnect - software-controlled disconnect from USB host + * @gadget:the peripheral being disconnected * - * Enables the D+ (or potentially D-) pullup. The host will start - * enumerating this gadget when the pullup is active and a VBUS session - * is active (the link is powered). + * Disables the D+ (or potentially D-) pullup, which the host may see + * as a disconnect (when a VBUS session is active). Not all systems + * support software pullup controls. + * + * Following a successful disconnect, invoke the ->disconnect() callback + * for the current gadget driver so that UDC drivers don't need to. * * Returns zero on success, else negative errno. */ -int usb_gadget_connect(struct usb_gadget *gadget) -{ - int ret; - - mutex_lock(&gadget->udc->connect_lock); - ret = usb_gadget_connect_locked(gadget); - mutex_unlock(&gadget->udc->connect_lock); - - return ret; -} -EXPORT_SYMBOL_GPL(usb_gadget_connect); - -/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */ -static int usb_gadget_disconnect_locked(struct usb_gadget *gadget) - __must_hold(&gadget->udc->connect_lock) +int usb_gadget_disconnect(struct usb_gadget *gadget) { int ret = 0; @@ -763,12 +751,10 @@ static int usb_gadget_disconnect_locked(struct usb_gadget *gadget) if (!gadget->connected) goto out; - if (gadget->deactivated || !gadget->udc->started) { + if (gadget->deactivated) { /* * If gadget is deactivated we only save new state. * Gadget will stay disconnected after activation. - * - * udc should have been started before gadget being pulled down. */ gadget->connected = false; goto out; @@ -788,30 +774,6 @@ static int usb_gadget_disconnect_locked(struct usb_gadget *gadget) return ret; } - -/** - * usb_gadget_disconnect - software-controlled disconnect from USB host - * @gadget:the peripheral being disconnected - * - * Disables the D+ (or potentially D-) pullup, which the host may see - * as a disconnect (when a VBUS session is active). Not all systems - * support software pullup controls. - * - * Following a successful disconnect, invoke the ->disconnect() callback - * for the current gadget driver so that UDC drivers don't need to. - * - * Returns zero on success, else negative errno. - */ -int usb_gadget_disconnect(struct usb_gadget *gadget) -{ - int ret; - - mutex_lock(&gadget->udc->connect_lock); - ret = usb_gadget_disconnect_locked(gadget); - mutex_unlock(&gadget->udc->connect_lock); - - return ret; -} EXPORT_SYMBOL_GPL(usb_gadget_disconnect); /** @@ -832,11 +794,10 @@ int usb_gadget_deactivate(struct usb_gadget *gadget) if (gadget->deactivated) goto out; - mutex_lock(&gadget->udc->connect_lock); if (gadget->connected) { - ret = usb_gadget_disconnect_locked(gadget); + ret = usb_gadget_disconnect(gadget); if (ret) - goto unlock; + goto out; /* * If gadget was being connected before deactivation, we want @@ -846,8 +807,6 @@ int usb_gadget_deactivate(struct usb_gadget *gadget) } gadget->deactivated = true; -unlock: - mutex_unlock(&gadget->udc->connect_lock); out: trace_usb_gadget_deactivate(gadget, ret); @@ -871,7 +830,6 @@ int usb_gadget_activate(struct usb_gadget *gadget) if (!gadget->deactivated) goto out; - mutex_lock(&gadget->udc->connect_lock); gadget->deactivated = false; /* @@ -879,8 +837,7 @@ int usb_gadget_activate(struct usb_gadget *gadget) * while it was being deactivated, we call usb_gadget_connect(). */ if (gadget->connected) - ret = usb_gadget_connect_locked(gadget); - mutex_unlock(&gadget->udc->connect_lock); + ret = usb_gadget_connect(gadget); out: trace_usb_gadget_activate(gadget, ret); @@ -1121,13 +1078,12 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state); /* ------------------------------------------------------------------------- */ -/* Acquire connect_lock before calling this function. */ -static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock) +static void usb_udc_connect_control(struct usb_udc *udc) { - if (udc->vbus && udc->started) - usb_gadget_connect_locked(udc->gadget); + if (udc->vbus) + usb_gadget_connect(udc->gadget); else - usb_gadget_disconnect_locked(udc->gadget); + usb_gadget_disconnect(udc->gadget); } /** @@ -1143,12 +1099,10 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status) { struct usb_udc *udc = gadget->udc; - mutex_lock(&udc->connect_lock); if (udc) { udc->vbus = status; - usb_udc_connect_control_locked(udc); + usb_udc_connect_control(udc); } - mutex_unlock(&udc->connect_lock); } EXPORT_SYMBOL_GPL(usb_udc_vbus_handler); @@ -1170,7 +1124,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget, EXPORT_SYMBOL_GPL(usb_gadget_udc_reset); /** - * usb_gadget_udc_start_locked - tells usb device controller to start up + * usb_gadget_udc_start - tells usb device controller to start up * @udc: The UDC to be started * * This call is issued by the UDC Class driver when it's about @@ -1181,11 +1135,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset); * necessary to have it powered on. * * Returns zero on success, else negative errno. - * - * Caller should acquire connect_lock before invoking this function. */ -static inline int usb_gadget_udc_start_locked(struct usb_udc *udc) - __must_hold(&udc->connect_lock) +static inline int usb_gadget_udc_start(struct usb_udc *udc) { int ret; @@ -1202,7 +1153,7 @@ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc) } /** - * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore + * usb_gadget_udc_stop - tells usb device controller we don't need it anymore * @udc: The UDC to be stopped * * This call is issued by the UDC Class driver after calling @@ -1211,11 +1162,8 @@ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc) * The details are implementation specific, but it can go as * far as powering off UDC completely and disable its data * line pullups. - * - * Caller should acquire connect lock before invoking this function. */ -static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc) - __must_hold(&udc->connect_lock) +static inline void usb_gadget_udc_stop(struct usb_udc *udc) { if (!udc->started) { dev_err(&udc->dev, "UDC had already stopped\n"); @@ -1374,7 +1322,6 @@ int usb_add_gadget(struct usb_gadget *gadget) udc->gadget = gadget; gadget->udc = udc; - mutex_init(&udc->connect_lock); udc->started = false; @@ -1576,15 +1523,11 @@ static int gadget_bind_driver(struct device *dev) if (ret) goto err_bind; - mutex_lock(&udc->connect_lock); - ret = usb_gadget_udc_start_locked(udc); - if (ret) { - mutex_unlock(&udc->connect_lock); + ret = usb_gadget_udc_start(udc); + if (ret) goto err_start; - } usb_gadget_enable_async_callbacks(udc); - usb_udc_connect_control_locked(udc); - mutex_unlock(&udc->connect_lock); + usb_udc_connect_control(udc); kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); return 0; @@ -1615,14 +1558,12 @@ static void gadget_unbind_driver(struct device *dev) kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); - mutex_lock(&udc->connect_lock); - usb_gadget_disconnect_locked(gadget); + usb_gadget_disconnect(gadget); usb_gadget_disable_async_callbacks(udc); if (gadget->irq) synchronize_irq(gadget->irq); udc->driver->unbind(gadget); - usb_gadget_udc_stop_locked(udc); - mutex_unlock(&udc->connect_lock); + usb_gadget_udc_stop(udc); mutex_lock(&udc_lock); driver->is_bound = false; @@ -1708,15 +1649,11 @@ static ssize_t soft_connect_store(struct device *dev, } if (sysfs_streq(buf, "connect")) { - mutex_lock(&udc->connect_lock); - usb_gadget_udc_start_locked(udc); - usb_gadget_connect_locked(udc->gadget); - mutex_unlock(&udc->connect_lock); + usb_gadget_udc_start(udc); + usb_gadget_connect(udc->gadget); } else if (sysfs_streq(buf, "disconnect")) { - mutex_lock(&udc->connect_lock); - usb_gadget_disconnect_locked(udc->gadget); - usb_gadget_udc_stop_locked(udc); - mutex_unlock(&udc->connect_lock); + usb_gadget_disconnect(udc->gadget); + usb_gadget_udc_stop(udc); } else { dev_err(dev, "unsupported command '%s'\n", buf); ret = -EINVAL; diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c index 3592f757fe05d..7bd2fddde770a 100644 --- a/drivers/usb/host/uhci-pci.c +++ b/drivers/usb/host/uhci-pci.c @@ -119,11 +119,13 @@ static int uhci_pci_init(struct usb_hcd *hcd) uhci->rh_numports = uhci_count_ports(hcd); - /* Intel controllers report the OverCurrent bit active on. - * VIA controllers report it active off, so we'll adjust the - * bit value. (It's not standardized in the UHCI spec.) + /* + * Intel controllers report the OverCurrent bit active on. VIA + * and ZHAOXIN controllers report it active off, so we'll adjust + * the bit value. (It's not standardized in the UHCI spec.) */ - if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA) + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA || + to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN) uhci->oc_low = 1; /* HP's server management chip requires a longer port reset delay. */ diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index ddb79f23fb3b7..79b3691f373f3 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "xhci.h" #include "xhci-trace.h" @@ -387,7 +388,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI) - xhci->quirks |= XHCI_BROKEN_D3COLD; + xhci->quirks |= XHCI_BROKEN_D3COLD_S2I; if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; @@ -801,9 +802,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) * Systems with the TI redriver that loses port status change events * need to have the registers polled during D3, so avoid D3cold. */ - if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD)) + if (xhci->quirks & XHCI_COMP_MODE_QUIRK) pci_d3cold_disable(pdev); +#ifdef CONFIG_SUSPEND + /* d3cold is broken, but only when s2idle is used */ + if (pm_suspend_target_state == PM_SUSPEND_TO_IDLE && + xhci->quirks & (XHCI_BROKEN_D3COLD_S2I)) + pci_d3cold_disable(pdev); +#endif + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) xhci_pme_quirk(hcd); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1ad12d5a48572..2bc82b3a2f984 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -276,6 +276,26 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, trace_xhci_inc_enq(ring); } +static int xhci_num_trbs_to(struct xhci_segment *start_seg, union xhci_trb *start, + struct xhci_segment *end_seg, union xhci_trb *end, + unsigned int num_segs) +{ + union xhci_trb *last_on_seg; + int num = 0; + int i = 0; + + do { + if (start_seg == end_seg && end >= start) + return num + (end - start); + last_on_seg = &start_seg->trbs[TRBS_PER_SEGMENT - 1]; + num += last_on_seg - start; + start_seg = start_seg->next; + start = start_seg->trbs; + } while (i++ <= num_segs); + + return -EINVAL; +} + /* * Check to see if there's room to enqueue num_trbs on the ring and make sure * enqueue pointer will not advance into dequeue segment. See rules above. @@ -2140,6 +2160,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, u32 trb_comp_code) { struct xhci_ep_ctx *ep_ctx; + int trbs_freed; ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); @@ -2209,9 +2230,15 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, } /* Update ring dequeue pointer */ + trbs_freed = xhci_num_trbs_to(ep_ring->deq_seg, ep_ring->dequeue, + td->last_trb_seg, td->last_trb, + ep_ring->num_segs); + if (trbs_freed < 0) + xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n"); + else + ep_ring->num_trbs_free += trbs_freed; ep_ring->dequeue = td->last_trb; ep_ring->deq_seg = td->last_trb_seg; - ep_ring->num_trbs_free += td->num_trbs - 1; inc_deq(xhci, ep_ring); return xhci_td_cleanup(xhci, td, ep_ring, td->status); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 08d721921b7bb..6b690ec91ff3a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1901,7 +1901,7 @@ struct xhci_hcd { #define XHCI_DISABLE_SPARSE BIT_ULL(38) #define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) #define XHCI_NO_SOFT_RETRY BIT_ULL(40) -#define XHCI_BROKEN_D3COLD BIT_ULL(41) +#define XHCI_BROKEN_D3COLD_S2I BIT_ULL(41) #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42) #define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43) #define XHCI_RESET_TO_DEFAULT BIT_ULL(44) diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 8931df5a85fd9..c54e9805da536 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -406,22 +406,25 @@ static DEF_SCSI_QCMD(queuecommand) ***********************************************************************/ /* Command timeout and abort */ -static int command_abort(struct scsi_cmnd *srb) +static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match) { - struct us_data *us = host_to_us(srb->device->host); - - usb_stor_dbg(us, "%s called\n", __func__); - /* * us->srb together with the TIMED_OUT, RESETTING, and ABORTING * bits are protected by the host lock. */ scsi_lock(us_to_host(us)); - /* Is this command still active? */ - if (us->srb != srb) { + /* is there any active pending command to abort ? */ + if (!us->srb) { scsi_unlock(us_to_host(us)); usb_stor_dbg(us, "-- nothing to abort\n"); + return SUCCESS; + } + + /* Does the command match the passed srb if any ? */ + if (srb_match && us->srb != srb_match) { + scsi_unlock(us_to_host(us)); + usb_stor_dbg(us, "-- pending command mismatch\n"); return FAILED; } @@ -444,6 +447,14 @@ static int command_abort(struct scsi_cmnd *srb) return SUCCESS; } +static int command_abort(struct scsi_cmnd *srb) +{ + struct us_data *us = host_to_us(srb->device->host); + + usb_stor_dbg(us, "%s called\n", __func__); + return command_abort_matching(us, srb); +} + /* * This invokes the transport reset mechanism to reset the state of the * device @@ -455,6 +466,9 @@ static int device_reset(struct scsi_cmnd *srb) usb_stor_dbg(us, "%s called\n", __func__); + /* abort any pending command before reset */ + command_abort_matching(us, NULL); + /* lock the device pointers and do the reset */ mutex_lock(&(us->dev_mutex)); result = us->transport_reset(us); diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index 8f3e884222ade..66de880b28d01 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -516,6 +516,10 @@ static ssize_t pin_assignment_show(struct device *dev, mutex_unlock(&dp->lock); + /* get_current_pin_assignments can return 0 when no matching pin assignments are found */ + if (len == 0) + len++; + buf[len - 1] = '\n'; return len; } diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 8b075ca82ef6d..438cc40660a1b 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -886,6 +886,9 @@ static void tps6598x_remove(struct i2c_client *client) { struct tps6598x *tps = i2c_get_clientdata(client); + if (!client->irq) + cancel_delayed_work_sync(&tps->wq_poll); + tps6598x_disconnect(tps, 0); typec_unregister_port(tps->port); usb_role_switch_put(tps->role_sw); diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c index 3ccf46f8ffd01..07d6e8dc686bb 100644 --- a/drivers/video/fbdev/68328fb.c +++ b/drivers/video/fbdev/68328fb.c @@ -124,7 +124,7 @@ static u_long get_line_length(int xres_virtual, int bpp) * First part, xxxfb_check_var, must not write anything * to hardware, it should only verify and adjust var. * This means it doesn't alter par but it does use hardware - * data from it to check this var. + * data from it to check this var. */ static int mc68x328fb_check_var(struct fb_var_screeninfo *var, @@ -182,7 +182,7 @@ static int mc68x328fb_check_var(struct fb_var_screeninfo *var, /* * Now that we checked it we alter var. The reason being is that the video - * mode passed in might not work but slight changes to it might make it + * mode passed in might not work but slight changes to it might make it * work. This way we let the user know what is acceptable. */ switch (var->bits_per_pixel) { @@ -257,8 +257,8 @@ static int mc68x328fb_check_var(struct fb_var_screeninfo *var, } /* This routine actually sets the video mode. It's in here where we - * the hardware state info->par and fix which can be affected by the - * change in par. For this driver it doesn't do much. + * the hardware state info->par and fix which can be affected by the + * change in par. For this driver it doesn't do much. */ static int mc68x328fb_set_par(struct fb_info *info) { @@ -295,7 +295,7 @@ static int mc68x328fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, * {hardwarespecific} contains width of RAMDAC * cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset) * RAMDAC[X] is programmed to (red, green, blue) - * + * * Pseudocolor: * uses offset = 0 && length = RAMDAC register width. * var->{color}.offset is 0 @@ -384,7 +384,7 @@ static int mc68x328fb_pan_display(struct fb_var_screeninfo *var, } /* - * Most drivers don't need their own mmap function + * Most drivers don't need their own mmap function */ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 96e91570cdd32..0fdf5f46802c7 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -124,7 +124,7 @@ config FB_PROVIDE_GET_FB_UNMAPPED_AREA depends on FB help Allow generic frame-buffer to provide get_fb_unmapped_area - function. + function to provide shareable character device support on nommu. menuconfig FB_FOREIGN_ENDIAN bool "Framebuffer foreign endianness support" diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 45e64016db328..024d0ee4f04f9 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -523,7 +523,7 @@ static int arcfb_probe(struct platform_device *dev) info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev); if (!info) - goto err; + goto err_fb_alloc; info->screen_base = (char __iomem *)videomemory; info->fbops = &arcfb_ops; @@ -535,7 +535,7 @@ static int arcfb_probe(struct platform_device *dev) if (!dio_addr || !cio_addr || !c2io_addr) { printk(KERN_WARNING "no IO addresses supplied\n"); - goto err1; + goto err_addr; } par->dio_addr = dio_addr; par->cio_addr = cio_addr; @@ -551,12 +551,12 @@ static int arcfb_probe(struct platform_device *dev) printk(KERN_INFO "arcfb: Failed req IRQ %d\n", par->irq); retval = -EBUSY; - goto err1; + goto err_addr; } } retval = register_framebuffer(info); if (retval < 0) - goto err1; + goto err_register_fb; platform_set_drvdata(dev, info); fb_info(info, "Arc frame buffer device, using %dK of video memory\n", videomemorysize >> 10); @@ -580,9 +580,12 @@ static int arcfb_probe(struct platform_device *dev) } return 0; -err1: + +err_register_fb: + free_irq(par->irq, info); +err_addr: framebuffer_release(info); -err: +err_fb_alloc: vfree(videomemory); return retval; } diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 8187a7c4f9106..987c5f5f02414 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -317,7 +317,7 @@ static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo) /** * atmel_lcdfb_alloc_video_memory - Allocate framebuffer memory * @sinfo: the frame buffer to allocate memory for - * + * * This function is called only from the atmel_lcdfb_probe() * so no locking by fb_info->mm_lock around smem_len setting is needed. */ diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index b02e4e645035c..cba2b113b28b0 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -3498,11 +3498,6 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info, if (ret) goto atyfb_setup_generic_fail; #endif - if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN)) - par->clk_wr_offset = (inb(R_GENMO) & 0x0CU) >> 2; - else - par->clk_wr_offset = aty_ld_8(CLOCK_CNTL, par) & 0x03U; - /* according to ATI, we should use clock 3 for acelerated mode */ par->clk_wr_offset = 3; diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c index a028ede39c122..832a82f45c809 100644 --- a/drivers/video/fbdev/cg14.c +++ b/drivers/video/fbdev/cg14.c @@ -512,7 +512,7 @@ static int cg14_probe(struct platform_device *op) is_8mb = (resource_size(&op->resource[1]) == (8 * 1024 * 1024)); BUILD_BUG_ON(sizeof(par->mmap_map) != sizeof(__cg14_mmap_map)); - + memcpy(&par->mmap_map, &__cg14_mmap_map, sizeof(par->mmap_map)); for (i = 0; i < CG14_MMAP_ENTRIES; i++) { diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c index 77dbf94aae5f4..82eeb139c4ebb 100644 --- a/drivers/video/fbdev/controlfb.c +++ b/drivers/video/fbdev/controlfb.c @@ -113,14 +113,14 @@ struct fb_info_control { struct fb_info info; struct fb_par_control par; u32 pseudo_palette[16]; - + struct cmap_regs __iomem *cmap_regs; unsigned long cmap_regs_phys; - + struct control_regs __iomem *control_regs; unsigned long control_regs_phys; unsigned long control_regs_size; - + __u8 __iomem *frame_buffer; unsigned long frame_buffer_phys; unsigned long fb_orig_base; @@ -196,7 +196,7 @@ static void set_control_clock(unsigned char *params) while (!req.complete) cuda_poll(); } -#endif +#endif } /* @@ -233,19 +233,19 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro if (p->par.xoffset != par->xoffset || p->par.yoffset != par->yoffset) set_screen_start(par->xoffset, par->yoffset, p); - + return; } - + p->par = *par; cmode = p->par.cmode; r = &par->regvals; - + /* Turn off display */ out_le32(CNTRL_REG(p,ctrl), 0x400 | par->ctrl); - + set_control_clock(r->clock_params); - + RADACAL_WRITE(0x20, r->radacal_ctrl); RADACAL_WRITE(0x21, p->control_use_bank2 ? 0 : 1); RADACAL_WRITE(0x10, 0); @@ -254,7 +254,7 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro rp = &p->control_regs->vswin; for (i = 0; i < 16; ++i, ++rp) out_le32(&rp->r, r->regs[i]); - + out_le32(CNTRL_REG(p,pitch), par->pitch); out_le32(CNTRL_REG(p,mode), r->mode); out_le32(CNTRL_REG(p,vram_attr), p->vram_attr); @@ -366,7 +366,7 @@ static int read_control_sense(struct fb_info_control *p) sense |= (in_le32(CNTRL_REG(p,mon_sense)) & 0x180) >> 7; out_le32(CNTRL_REG(p,mon_sense), 077); /* turn off drivers */ - + return sense; } @@ -558,9 +558,9 @@ static int control_var_to_par(struct fb_var_screeninfo *var, static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeninfo *var) { struct control_regints *rv; - + rv = (struct control_regints *) par->regvals.regs; - + memset(var, 0, sizeof(*var)); var->xres = par->xres; var->yres = par->yres; @@ -568,7 +568,7 @@ static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeni var->yres_virtual = par->vyres; var->xoffset = par->xoffset; var->yoffset = par->yoffset; - + switch(par->cmode) { default: case CMODE_8: @@ -634,7 +634,7 @@ static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *i err = control_var_to_par(var, &par, info); if (err) - return err; + return err; control_par_to_var(&par, var); return 0; @@ -655,7 +655,7 @@ static int controlfb_set_par (struct fb_info *info) " control_var_to_par: %d.\n", err); return err; } - + control_set_hardware(p, &par); info->fix.visual = (p->par.cmode == CMODE_8) ? @@ -840,7 +840,7 @@ static int __init init_control(struct fb_info_control *p) int full, sense, vmode, cmode, vyres; struct fb_var_screeninfo var; int rc; - + printk(KERN_INFO "controlfb: "); full = p->total_vram == 0x400000; diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index e808dc86001cc..28739f1cb5e7a 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1468,7 +1468,7 @@ __releases(&info->lock) } #if defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && !defined(CONFIG_MMU) -unsigned long get_fb_unmapped_area(struct file *filp, +static unsigned long get_fb_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c index 23cf8eba785d4..f7e019dded0fb 100644 --- a/drivers/video/fbdev/core/modedb.c +++ b/drivers/video/fbdev/core/modedb.c @@ -257,6 +257,11 @@ static const struct fb_videomode modedb[] = { { NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0, FB_VMODE_DOUBLE }, + /* 1920x1080 @ 60 Hz, 67.3 kHz hsync */ + { NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5, 0, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */ { NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, diff --git a/drivers/video/fbdev/g364fb.c b/drivers/video/fbdev/g364fb.c index 05837a3b985c7..c5b7673ddc6ca 100644 --- a/drivers/video/fbdev/g364fb.c +++ b/drivers/video/fbdev/g364fb.c @@ -6,7 +6,7 @@ * * This driver is based on tgafb.c * - * Copyright (C) 1997 Geert Uytterhoeven + * Copyright (C) 1997 Geert Uytterhoeven * Copyright (C) 1995 Jay Estabrook * * This file is subject to the terms and conditions of the GNU General Public @@ -28,7 +28,7 @@ #include #include -/* +/* * Various defines for the G364 */ #define G364_MEM_BASE 0xe4400000 @@ -125,7 +125,7 @@ static const struct fb_ops g364fb_ops = { * * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag */ -static int g364fb_pan_display(struct fb_var_screeninfo *var, +static int g364fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { if (var->xoffset || diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c index 20bdab738ab7a..0af58018441d3 100644 --- a/drivers/video/fbdev/hgafb.c +++ b/drivers/video/fbdev/hgafb.c @@ -1,6 +1,6 @@ /* * linux/drivers/video/hgafb.c -- Hercules graphics adaptor frame buffer device - * + * * Created 25 Nov 1999 by Ferenc Bakonyi (fero@drama.obuda.kando.hu) * Based on skeletonfb.c by Geert Uytterhoeven and * mdacon.c by Andrew Apted @@ -8,14 +8,14 @@ * History: * * - Revision 0.1.8 (23 Oct 2002): Ported to new framebuffer api. - * - * - Revision 0.1.7 (23 Jan 2001): fix crash resulting from MDA only cards + * + * - Revision 0.1.7 (23 Jan 2001): fix crash resulting from MDA only cards * being detected as Hercules. (Paul G.) * - Revision 0.1.6 (17 Aug 2000): new style structs * documentation * - Revision 0.1.5 (13 Mar 2000): spinlocks instead of saveflags();cli();etc * minor fixes - * - Revision 0.1.4 (24 Jan 2000): fixed a bug in hga_card_detect() for + * - Revision 0.1.4 (24 Jan 2000): fixed a bug in hga_card_detect() for * HGA-only systems * - Revision 0.1.3 (22 Jan 2000): modified for the new fb_info structure * screen is cleared after rmmod @@ -143,7 +143,7 @@ static bool nologo = 0; static void write_hga_b(unsigned int val, unsigned char reg) { - outb_p(reg, HGA_INDEX_PORT); + outb_p(reg, HGA_INDEX_PORT); outb_p(val, HGA_VALUE_PORT); } @@ -155,7 +155,7 @@ static void write_hga_w(unsigned int val, unsigned char reg) static int test_hga_b(unsigned char val, unsigned char reg) { - outb_p(reg, HGA_INDEX_PORT); + outb_p(reg, HGA_INDEX_PORT); outb (val, HGA_VALUE_PORT); udelay(20); val = (inb_p(HGA_VALUE_PORT) == val); return val; @@ -244,7 +244,7 @@ static void hga_show_logo(struct fb_info *info) void __iomem *dest = hga_vram; char *logo = linux_logo_bw; int x, y; - + for (y = 134; y < 134 + 80 ; y++) * this needs some cleanup * for (x = 0; x < 10 ; x++) writeb(~*(logo++),(dest + HGA_ROWADDR(y) + x + 40)); @@ -255,7 +255,7 @@ static void hga_pan(unsigned int xoffset, unsigned int yoffset) { unsigned int base; unsigned long flags; - + base = (yoffset / 8) * 90 + xoffset; spin_lock_irqsave(&hga_reg_lock, flags); write_hga_w(base, 0x0c); /* start address */ @@ -310,7 +310,7 @@ static int hga_card_detect(void) /* Ok, there is definitely a card registering at the correct * memory location, so now we do an I/O port test. */ - + if (!test_hga_b(0x66, 0x0f)) /* cursor low register */ goto error; @@ -321,7 +321,7 @@ static int hga_card_detect(void) * bit of the status register is changing. This test lasts for * approximately 1/10th of a second. */ - + p_save = q_save = inb_p(HGA_STATUS_PORT) & HGA_STATUS_VSYNC; for (count=0; count < 50000 && p_save == q_save; count++) { @@ -329,7 +329,7 @@ static int hga_card_detect(void) udelay(2); } - if (p_save == q_save) + if (p_save == q_save) goto error; switch (inb_p(HGA_STATUS_PORT) & 0x70) { @@ -415,7 +415,7 @@ static int hgafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, * @info:pointer to fb_info object containing info for current hga board * * This function looks only at xoffset, yoffset and the %FB_VMODE_YWRAP - * flag in @var. If input parameters are correct it calls hga_pan() to + * flag in @var. If input parameters are correct it calls hga_pan() to * program the hardware. @info->var is updated to the new values. * A zero is returned on success and %-EINVAL for failure. */ @@ -442,9 +442,9 @@ static int hgafb_pan_display(struct fb_var_screeninfo *var, * hgafb_blank - (un)blank the screen * @blank_mode:blanking method to use * @info:unused - * - * Blank the screen if blank_mode != 0, else unblank. - * Implements VESA suspend and powerdown modes on hardware that supports + * + * Blank the screen if blank_mode != 0, else unblank. + * Implements VESA suspend and powerdown modes on hardware that supports * disabling hsync/vsync: * @blank_mode == 2 means suspend vsync, * @blank_mode == 3 means suspend hsync, @@ -539,15 +539,15 @@ static const struct fb_ops hgafb_ops = { .fb_copyarea = hgafb_copyarea, .fb_imageblit = hgafb_imageblit, }; - + /* ------------------------------------------------------------------------- * * * Functions in fb_info - * + * * ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ - + /* * Initialization */ diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c index cdd44e5deafe4..77fbff47b1a8e 100644 --- a/drivers/video/fbdev/hpfb.c +++ b/drivers/video/fbdev/hpfb.c @@ -92,7 +92,7 @@ static int hpfb_setcolreg(unsigned regno, unsigned red, unsigned green, if (regno >= info->cmap.len) return 1; - + while (in_be16(fb_regs + 0x6002) & 0x4) udelay(1); out_be16(fb_regs + 0x60ba, 0xff); @@ -143,7 +143,7 @@ static void topcat_blit(int x0, int y0, int x1, int y1, int w, int h, int rr) out_8(fb_regs + WMOVE, fb_bitmask); } -static void hpfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) +static void hpfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { topcat_blit(area->sx, area->sy, area->dx, area->dy, area->width, area->height, RR_COPY); } @@ -315,7 +315,7 @@ static int hpfb_init_one(unsigned long phys_base, unsigned long virt_base) return ret; } -/* +/* * Check that the secondary ID indicates that we have some hope of working with this * framebuffer. The catseye boards are pretty much like topcats and we can muddle through. */ @@ -323,7 +323,7 @@ static int hpfb_init_one(unsigned long phys_base, unsigned long virt_base) #define topcat_sid_ok(x) (((x) == DIO_ID2_LRCATSEYE) || ((x) == DIO_ID2_HRCCATSEYE) \ || ((x) == DIO_ID2_HRMCATSEYE) || ((x) == DIO_ID2_TOPCAT)) -/* +/* * Initialise the framebuffer */ static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent) diff --git a/drivers/video/fbdev/i810/i810_dvt.c b/drivers/video/fbdev/i810/i810_dvt.c index b4b3670667ab8..2082b5c92e8f3 100644 --- a/drivers/video/fbdev/i810/i810_dvt.c +++ b/drivers/video/fbdev/i810/i810_dvt.c @@ -14,6 +14,7 @@ #include "i810_regs.h" #include "i810.h" +#include "i810_main.h" struct mode_registers std_modes[] = { /* 640x480 @ 60Hz */ @@ -276,7 +277,7 @@ void i810fb_fill_var_timings(struct fb_var_screeninfo *var) var->upper_margin = total - (yres + var->lower_margin + var->vsync_len); } -u32 i810_get_watermark(struct fb_var_screeninfo *var, +u32 i810_get_watermark(const struct fb_var_screeninfo *var, struct i810fb_par *par) { struct mode_registers *params = &par->regs; diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index bea45647184e1..975dd682fae4b 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1347,7 +1347,7 @@ static const struct fb_ops imsttfb_ops = { .fb_ioctl = imsttfb_ioctl, }; -static void init_imstt(struct fb_info *info) +static int init_imstt(struct fb_info *info) { struct imstt_par *par = info->par; __u32 i, tmp, *ip, *end; @@ -1420,7 +1420,7 @@ static void init_imstt(struct fb_info *info) || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) { printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); framebuffer_release(info); - return; + return -ENODEV; } sprintf(info->fix.id, "IMS TT (%s)", par->ramdac == IBM ? "IBM" : "TVP"); @@ -1456,12 +1456,13 @@ static void init_imstt(struct fb_info *info) if (register_framebuffer(info) < 0) { framebuffer_release(info); - return; + return -ENODEV; } tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8; fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n", info->fix.id, info->fix.smem_len >> 20, tmp); + return 0; } static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1529,10 +1530,10 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!par->cmap_regs) goto error; info->pseudo_palette = par->palette; - init_imstt(info); - - pci_set_drvdata(pdev, info); - return 0; + ret = init_imstt(info); + if (!ret) + pci_set_drvdata(pdev, info); + return ret; error: if (par->dc_regs) diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c index 312e35c9aa6c5..44ff860a3f378 100644 --- a/drivers/video/fbdev/macfb.c +++ b/drivers/video/fbdev/macfb.c @@ -339,7 +339,7 @@ static int civic_setpalette(unsigned int regno, unsigned int red, { unsigned long flags; int clut_status; - + local_irq_save(flags); /* Set the register address */ @@ -439,7 +439,7 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green, * (according to the entries in the `var' structure). * Return non-zero for invalid regno. */ - + if (regno >= fb_info->cmap.len) return 1; @@ -548,7 +548,7 @@ static int __init macfb_init(void) return -ENODEV; macfb_setup(option); - if (!MACH_IS_MAC) + if (!MACH_IS_MAC) return -ENODEV; if (mac_bi_data.id == MAC_MODEL_Q630 || @@ -644,7 +644,7 @@ static int __init macfb_init(void) err = -EINVAL; goto fail_unmap; } - + /* * We take a wild guess that if the video physical address is * in nubus slot space, that the nubus card is driving video. @@ -774,7 +774,7 @@ static int __init macfb_init(void) civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000); break; - + /* * Assorted weirdos * We think this may be like the LC II diff --git a/drivers/video/fbdev/maxinefb.c b/drivers/video/fbdev/maxinefb.c index ae1a42bcb0ead..4e6b05232ae22 100644 --- a/drivers/video/fbdev/maxinefb.c +++ b/drivers/video/fbdev/maxinefb.c @@ -138,7 +138,7 @@ int __init maxinefb_init(void) *(volatile unsigned char *)fboff = 0x0; maxinefb_fix.smem_start = fb_start; - + /* erase hardware cursor */ for (i = 0; i < 512; i++) { maxinefb_ims332_write_register(IMS332_REG_CURSOR_RAM + i, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index 1eaa35c278359..477789cff8e08 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -491,7 +491,8 @@ static int tpo_td043_probe(struct spi_device *spi) ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc"); if (IS_ERR(ddata->vcc_reg)) { - r = dev_err_probe(&spi->dev, r, "failed to get LCD VCC regulator\n"); + r = dev_err_probe(&spi->dev, PTR_ERR(ddata->vcc_reg), + "failed to get LCD VCC regulator\n"); goto err_regulator; } diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c index 3e44f95163182..0876962c52ebc 100644 --- a/drivers/video/fbdev/p9100.c +++ b/drivers/video/fbdev/p9100.c @@ -65,7 +65,7 @@ static const struct fb_ops p9100_ops = { #define P9100_FB_OFF 0x0UL /* 3 bits: 2=8bpp 3=16bpp 5=32bpp 7=24bpp */ -#define SYS_CONFIG_PIXELSIZE_SHIFT 26 +#define SYS_CONFIG_PIXELSIZE_SHIFT 26 #define SCREENPAINT_TIMECTL1_ENABLE_VIDEO 0x20 /* 0 = off, 1 = on */ @@ -110,7 +110,7 @@ struct p9100_regs { u32 vram_xxx[25]; /* Registers for IBM RGB528 Palette */ - u32 ramdac_cmap_wridx; + u32 ramdac_cmap_wridx; u32 ramdac_palette_data; u32 ramdac_pixel_mask; u32 ramdac_palette_rdaddr; diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c index 82f019f0a0d6c..f8283fcd5edb7 100644 --- a/drivers/video/fbdev/platinumfb.c +++ b/drivers/video/fbdev/platinumfb.c @@ -52,17 +52,17 @@ struct fb_info_platinum { __u8 red, green, blue; } palette[256]; u32 pseudo_palette[16]; - + volatile struct cmap_regs __iomem *cmap_regs; unsigned long cmap_regs_phys; - + volatile struct platinum_regs __iomem *platinum_regs; unsigned long platinum_regs_phys; - + __u8 __iomem *frame_buffer; volatile __u8 __iomem *base_frame_buffer; unsigned long frame_buffer_phys; - + unsigned long total_vram; int clktype; int dactype; @@ -133,7 +133,7 @@ static int platinumfb_set_par (struct fb_info *info) platinum_set_hardware(pinfo); init = platinum_reg_init[pinfo->vmode-1]; - + if ((pinfo->vmode == VMODE_832_624_75) && (pinfo->cmode > CMODE_8)) offset = 0x10; @@ -214,7 +214,7 @@ static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, break; } } - + return 0; } @@ -269,7 +269,7 @@ static void platinum_set_hardware(struct fb_info_platinum *pinfo) struct platinum_regvals *init; int i; int vmode, cmode; - + vmode = pinfo->vmode; cmode = pinfo->cmode; @@ -436,7 +436,7 @@ static int read_platinum_sense(struct fb_info_platinum *info) * This routine takes a user-supplied var, and picks the best vmode/cmode from it. * It also updates the var structure to the actual mode data obtained */ -static int platinum_var_to_par(struct fb_var_screeninfo *var, +static int platinum_var_to_par(struct fb_var_screeninfo *var, struct fb_info_platinum *pinfo, int check_only) { @@ -478,12 +478,12 @@ static int platinum_var_to_par(struct fb_var_screeninfo *var, pinfo->yoffset = 0; pinfo->vxres = pinfo->xres; pinfo->vyres = pinfo->yres; - + return 0; } -/* +/* * Parse user specified options (`video=platinumfb:') */ static int __init platinumfb_setup(char *options) @@ -624,7 +624,7 @@ static int platinumfb_probe(struct platform_device* odev) break; } dev_set_drvdata(&odev->dev, info); - + rc = platinum_init_fb(info); if (rc != 0) { iounmap(pinfo->frame_buffer); @@ -640,9 +640,9 @@ static void platinumfb_remove(struct platform_device* odev) { struct fb_info *info = dev_get_drvdata(&odev->dev); struct fb_info_platinum *pinfo = info->par; - + unregister_framebuffer (info); - + /* Unmap frame buffer and registers */ iounmap(pinfo->frame_buffer); iounmap(pinfo->platinum_regs); @@ -656,7 +656,7 @@ static void platinumfb_remove(struct platform_device* odev) framebuffer_release(info); } -static struct of_device_id platinumfb_match[] = +static struct of_device_id platinumfb_match[] = { { .name = "platinum", @@ -664,7 +664,7 @@ static struct of_device_id platinumfb_match[] = {}, }; -static struct platform_driver platinum_driver = +static struct platform_driver platinum_driver = { .driver = { .name = "platinumfb", diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c index b1b8ccdbac4a7..a2408bf00ca0f 100644 --- a/drivers/video/fbdev/sa1100fb.c +++ b/drivers/video/fbdev/sa1100fb.c @@ -57,14 +57,14 @@ * - Driver appears to be working for Brutus 320x200x8bpp mode. Other * resolutions are working, but only the 8bpp mode is supported. * Changes need to be made to the palette encode and decode routines - * to support 4 and 16 bpp modes. + * to support 4 and 16 bpp modes. * Driver is not designed to be a module. The FrameBuffer is statically - * allocated since dynamic allocation of a 300k buffer cannot be - * guaranteed. + * allocated since dynamic allocation of a 300k buffer cannot be + * guaranteed. * * 1999/06/17: * - FrameBuffer memory is now allocated at run-time when the - * driver is initialized. + * driver is initialized. * * 2000/04/10: Nicolas Pitre * - Big cleanup for dynamic selection of machine type at run time. @@ -74,8 +74,8 @@ * * 2000/08/07: Tak-Shing Chan * Jeff Sutherland - * - Resolved an issue caused by a change made to the Assabet's PLD - * earlier this year which broke the framebuffer driver for newer + * - Resolved an issue caused by a change made to the Assabet's PLD + * earlier this year which broke the framebuffer driver for newer * Phase 4 Assabets. Some other parameters were changed to optimize * for the Sharp display. * @@ -102,7 +102,7 @@ * 2000/11/23: Eric Peng * - Freebird add * - * 2001/02/07: Jamey Hicks + * 2001/02/07: Jamey Hicks * Cliff Brake * - Added PM callback * @@ -500,7 +500,7 @@ sa1100fb_set_cmap(struct fb_cmap *cmap, int kspc, int con, * the shortest recovery time * Suspend * This refers to a level of power management in which substantial power - * reduction is achieved by the display. The display can have a longer + * reduction is achieved by the display. The display can have a longer * recovery time from this state than from the Stand-by state * Off * This indicates that the display is consuming the lowest level of power @@ -522,9 +522,9 @@ sa1100fb_set_cmap(struct fb_cmap *cmap, int kspc, int con, */ /* * sa1100fb_blank(): - * Blank the display by setting all palette values to zero. Note, the + * Blank the display by setting all palette values to zero. Note, the * 12 and 16 bpp modes don't really use the palette, so this will not - * blank the display in all modes. + * blank the display in all modes. */ static int sa1100fb_blank(int blank, struct fb_info *info) { @@ -603,8 +603,8 @@ static inline unsigned int get_pcd(struct sa1100fb_info *fbi, /* * sa1100fb_activate_var(): - * Configures LCD Controller based on entries in var parameter. Settings are - * only written to the controller if changes were made. + * Configures LCD Controller based on entries in var parameter. Settings are + * only written to the controller if changes were made. */ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_info *fbi) { @@ -747,7 +747,7 @@ static void sa1100fb_setup_gpio(struct sa1100fb_info *fbi) * * SA1110 spec update nr. 25 says we can and should * clear LDD15 to 12 for 4 or 8bpp modes with active - * panels. + * panels. */ if ((fbi->reg_lccr0 & LCCR0_CMS) == LCCR0_Color && (fbi->reg_lccr0 & (LCCR0_Dual|LCCR0_Act)) != 0) { @@ -1020,9 +1020,9 @@ static int sa1100fb_resume(struct platform_device *dev) /* * sa1100fb_map_video_memory(): - * Allocates the DRAM memory for the frame buffer. This buffer is - * remapped into a non-cached, non-buffered, memory region to - * allow palette and pixel writes to occur without flushing the + * Allocates the DRAM memory for the frame buffer. This buffer is + * remapped into a non-cached, non-buffered, memory region to + * allow palette and pixel writes to occur without flushing the * cache. Once this area is remapped, all virtual memory * access to the video memory should occur at the new region. */ diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index ef8a4c5fc6875..686a234f3899e 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -1,11 +1,11 @@ /* - * linux/drivers/video/stifb.c - - * Low level Frame buffer driver for HP workstations with + * linux/drivers/video/stifb.c - + * Low level Frame buffer driver for HP workstations with * STI (standard text interface) video firmware. * * Copyright (C) 2001-2006 Helge Deller * Portions Copyright (C) 2001 Thomas Bogendoerfer - * + * * Based on: * - linux/drivers/video/artistfb.c -- Artist frame buffer driver * Copyright (C) 2000 Philipp Rumpf @@ -14,7 +14,7 @@ * - HP Xhp cfb-based X11 window driver for XFree86 * (c)Copyright 1992 Hewlett-Packard Co. * - * + * * The following graphics display devices (NGLE family) are supported by this driver: * * HPA4070A known as "HCRX", a 1280x1024 color device with 8 planes @@ -30,7 +30,7 @@ * supports 1280x1024 color displays with 8 planes. * HP710G same as HP710C, 1280x1024 grayscale only * HP710L same as HP710C, 1024x768 color only - * HP712 internal graphics support on HP9000s712 SPU, supports 640x480, + * HP712 internal graphics support on HP9000s712 SPU, supports 640x480, * 1024x768 or 1280x1024 color displays on 8 planes (Artist) * * This file is subject to the terms and conditions of the GNU General Public @@ -92,7 +92,7 @@ typedef struct { __s32 misc_video_end; } video_setup_t; -typedef struct { +typedef struct { __s16 sizeof_ngle_data; __s16 x_size_visible; /* visible screen dim in pixels */ __s16 y_size_visible; @@ -177,10 +177,10 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS]; #endif /* DEBUG_STIFB_REGS */ -#define ENABLE 1 /* for enabling/disabling screen */ +#define ENABLE 1 /* for enabling/disabling screen */ #define DISABLE 0 -#define NGLE_LOCK(fb_info) do { } while (0) +#define NGLE_LOCK(fb_info) do { } while (0) #define NGLE_UNLOCK(fb_info) do { } while (0) static void @@ -198,9 +198,9 @@ SETUP_HW(struct stifb_info *fb) static void SETUP_FB(struct stifb_info *fb) -{ +{ unsigned int reg10_value = 0; - + SETUP_HW(fb); switch (fb->id) { @@ -210,15 +210,15 @@ SETUP_FB(struct stifb_info *fb) reg10_value = 0x13601000; break; case S9000_ID_A1439A: - if (fb->info.var.bits_per_pixel == 32) + if (fb->info.var.bits_per_pixel == 32) reg10_value = 0xBBA0A000; - else + else reg10_value = 0x13601000; break; case S9000_ID_HCRX: if (fb->info.var.bits_per_pixel == 32) reg10_value = 0xBBA0A000; - else + else reg10_value = 0x13602000; break; case S9000_ID_TIMBER: @@ -243,7 +243,7 @@ START_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb) } static void -WRITE_IMAGE_COLOR(struct stifb_info *fb, int index, int color) +WRITE_IMAGE_COLOR(struct stifb_info *fb, int index, int color) { SETUP_HW(fb); WRITE_WORD(((0x100+index)<<2), fb, REG_3); @@ -251,30 +251,30 @@ WRITE_IMAGE_COLOR(struct stifb_info *fb, int index, int color) } static void -FINISH_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb) -{ +FINISH_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb) +{ WRITE_WORD(0x400, fb, REG_2); if (fb->info.var.bits_per_pixel == 32) { WRITE_WORD(0x83000100, fb, REG_1); } else { if (fb->id == S9000_ID_ARTIST || fb->id == CRT_ID_VISUALIZE_EG) WRITE_WORD(0x80000100, fb, REG_26); - else + else WRITE_WORD(0x80000100, fb, REG_1); } SETUP_FB(fb); } static void -SETUP_RAMDAC(struct stifb_info *fb) +SETUP_RAMDAC(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x04000000, fb, 0x1020); WRITE_WORD(0xff000000, fb, 0x1028); } -static void -CRX24_SETUP_RAMDAC(struct stifb_info *fb) +static void +CRX24_SETUP_RAMDAC(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x04000000, fb, 0x1000); @@ -286,14 +286,14 @@ CRX24_SETUP_RAMDAC(struct stifb_info *fb) } #if 0 -static void +static void HCRX_SETUP_RAMDAC(struct stifb_info *fb) { WRITE_WORD(0xffffffff, fb, REG_32); } #endif -static void +static void CRX24_SET_OVLY_MASK(struct stifb_info *fb) { SETUP_HW(fb); @@ -314,7 +314,7 @@ ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) WRITE_WORD(value, fb, 0x1038); } -static void +static void CRX24_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) { unsigned int value = enable ? 0x10000000 : 0x30000000; @@ -325,11 +325,11 @@ CRX24_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) } static void -ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) +ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) { u32 DregsMiscVideo = REG_21; u32 DregsMiscCtl = REG_27; - + SETUP_HW(fb); if (enable) { WRITE_WORD(READ_WORD(fb, DregsMiscVideo) | 0x0A000000, fb, DregsMiscVideo); @@ -344,7 +344,7 @@ ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) (READ_BYTE(fb, REG_16b3) - 1) #define HYPER_CONFIG_PLANES_24 0x00000100 - + #define IS_24_DEVICE(fb) \ (fb->deviceSpecificConfig & HYPER_CONFIG_PLANES_24) @@ -470,15 +470,15 @@ SETUP_ATTR_ACCESS(struct stifb_info *fb, unsigned BufferNumber) } static void -SET_ATTR_SIZE(struct stifb_info *fb, int width, int height) +SET_ATTR_SIZE(struct stifb_info *fb, int width, int height) { - /* REG_6 seems to have special values when run on a + /* REG_6 seems to have special values when run on a RDI precisionbook parisc laptop (INTERNAL_EG_DX1024 or INTERNAL_EG_X1024). The values are: 0x2f0: internal (LCD) & external display enabled 0x2a0: external display only 0x000: zero on standard artist graphic cards - */ + */ WRITE_WORD(0x00000000, fb, REG_6); WRITE_WORD((width<<16) | height, fb, REG_9); WRITE_WORD(0x05000000, fb, REG_6); @@ -486,7 +486,7 @@ SET_ATTR_SIZE(struct stifb_info *fb, int width, int height) } static void -FINISH_ATTR_ACCESS(struct stifb_info *fb) +FINISH_ATTR_ACCESS(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x00000000, fb, REG_12); @@ -499,7 +499,7 @@ elkSetupPlanes(struct stifb_info *fb) SETUP_FB(fb); } -static void +static void ngleSetupAttrPlanes(struct stifb_info *fb, int BufferNumber) { SETUP_ATTR_ACCESS(fb, BufferNumber); @@ -519,7 +519,7 @@ rattlerSetupPlanes(struct stifb_info *fb) * read mask register for overlay planes, not image planes). */ CRX24_SETUP_RAMDAC(fb); - + /* change fb->id temporarily to fool SETUP_FB() */ saved_id = fb->id; fb->id = CRX24_OVERLAY_PLANES; @@ -565,7 +565,7 @@ setNgleLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length) lutBltCtl.all = 0x80000000; lutBltCtl.fields.length = length; - switch (fb->id) + switch (fb->id) { case S9000_ID_A1439A: /* CRX24 */ if (fb->var.bits_per_pixel == 8) { @@ -576,12 +576,12 @@ setNgleLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length) lutBltCtl.fields.lutOffset = 0 * 256; } break; - + case S9000_ID_ARTIST: lutBltCtl.fields.lutType = NGLE_CMAP_INDEXED0_TYPE; lutBltCtl.fields.lutOffset = 0 * 256; break; - + default: lutBltCtl.fields.lutType = NGLE_CMAP_INDEXED0_TYPE; lutBltCtl.fields.lutOffset = 0; @@ -596,7 +596,7 @@ setNgleLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length) #endif static NgleLutBltCtl -setHyperLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length) +setHyperLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length) { NgleLutBltCtl lutBltCtl; @@ -633,7 +633,7 @@ static void hyperUndoITE(struct stifb_info *fb) /* Hardware setup for full-depth write to "magic" location */ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 7); - NGLE_QUICK_SET_DST_BM_ACCESS(fb, + NGLE_QUICK_SET_DST_BM_ACCESS(fb, BA(IndexedDcd, Otc04, Ots08, AddrLong, BAJustPoint(0), BINovly, BAIndexBase(0))); NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, @@ -653,13 +653,13 @@ static void hyperUndoITE(struct stifb_info *fb) NGLE_UNLOCK(fb); } -static void +static void ngleDepth8_ClearImagePlanes(struct stifb_info *fb) { /* FIXME! */ } -static void +static void ngleDepth24_ClearImagePlanes(struct stifb_info *fb) { /* FIXME! */ @@ -675,7 +675,7 @@ ngleResetAttrPlanes(struct stifb_info *fb, unsigned int ctlPlaneReg) NGLE_LOCK(fb); GET_FIFO_SLOTS(fb, nFreeFifoSlots, 4); - NGLE_QUICK_SET_DST_BM_ACCESS(fb, + NGLE_QUICK_SET_DST_BM_ACCESS(fb, BA(IndexedDcd, Otc32, OtsIndirect, AddrLong, BAJustPoint(0), BINattr, BAIndexBase(0))); @@ -713,22 +713,22 @@ ngleResetAttrPlanes(struct stifb_info *fb, unsigned int ctlPlaneReg) /**** Finally, set the Control Plane Register back to zero: ****/ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 1); NGLE_QUICK_SET_CTL_PLN_REG(fb, 0); - + NGLE_UNLOCK(fb); } - + static void ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data) { int nFreeFifoSlots = 0; u32 packed_dst; u32 packed_len; - + NGLE_LOCK(fb); /* Hardware setup */ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 8); - NGLE_QUICK_SET_DST_BM_ACCESS(fb, + NGLE_QUICK_SET_DST_BM_ACCESS(fb, BA(IndexedDcd, Otc04, Ots08, AddrLong, BAJustPoint(0), BINovly, BAIndexBase(0))); @@ -736,23 +736,23 @@ ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data) NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, data); NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, mask); - + packed_dst = 0; packed_len = (fb->info.var.xres << 16) | fb->info.var.yres; NGLE_SET_DSTXY(fb, packed_dst); - - /* Write zeroes to overlay planes */ + + /* Write zeroes to overlay planes */ NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, IBOvals(RopSrc, MaskAddrOffset(0), BitmapExtent08, StaticReg(0), DataDynamic, MaskOtc, BGx(0), FGx(0))); - + SET_LENXY_START_RECFILL(fb, packed_len); NGLE_UNLOCK(fb); } -static void +static void hyperResetPlanes(struct stifb_info *fb, int enable) { unsigned int controlPlaneReg; @@ -783,7 +783,7 @@ hyperResetPlanes(struct stifb_info *fb, int enable) ngleClearOverlayPlanes(fb, 0xff, 255); /************************************************** - ** Also need to counteract ITE settings + ** Also need to counteract ITE settings **************************************************/ hyperUndoITE(fb); break; @@ -803,13 +803,13 @@ hyperResetPlanes(struct stifb_info *fb, int enable) ngleResetAttrPlanes(fb, controlPlaneReg); break; } - + NGLE_UNLOCK(fb); } /* Return pointer to in-memory structure holding ELK device-dependent ROM values. */ -static void +static void ngleGetDeviceRomData(struct stifb_info *fb) { #if 0 @@ -821,7 +821,7 @@ XXX: FIXME: !!! char *pCard8; int i; char *mapOrigin = NULL; - + int romTableIdx; pPackedDevRomData = fb->ngle_rom; @@ -888,7 +888,7 @@ SETUP_HCRX(struct stifb_info *fb) /* Initialize Hyperbowl registers */ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 7); - + if (IS_24_DEVICE(fb)) { hyperbowl = (fb->info.var.bits_per_pixel == 32) ? HYPERBOWL_MODE01_8_24_LUT0_TRANSPARENT_LUT1_OPAQUE : @@ -897,9 +897,9 @@ SETUP_HCRX(struct stifb_info *fb) /* First write to Hyperbowl must happen twice (bug) */ WRITE_WORD(hyperbowl, fb, REG_40); WRITE_WORD(hyperbowl, fb, REG_40); - + WRITE_WORD(HYPERBOWL_MODE2_8_24, fb, REG_39); - + WRITE_WORD(0x014c0148, fb, REG_42); /* Set lut 0 to be the direct color */ WRITE_WORD(0x404c4048, fb, REG_43); WRITE_WORD(0x034c0348, fb, REG_44); @@ -990,7 +990,7 @@ stifb_setcolreg(u_int regno, u_int red, u_int green, 0, /* Offset w/i LUT */ 256); /* Load entire LUT */ NGLE_BINC_SET_SRCADDR(fb, - NGLE_LONG_FB_ADDRESS(0, 0x100, 0)); + NGLE_LONG_FB_ADDRESS(0, 0x100, 0)); /* 0x100 is same as used in WRITE_IMAGE_COLOR() */ START_COLORMAPLOAD(fb, lutBltCtl.all); SETUP_FB(fb); @@ -1028,7 +1028,7 @@ stifb_blank(int blank_mode, struct fb_info *info) ENABLE_DISABLE_DISPLAY(fb, enable); break; } - + SETUP_FB(fb); return 0; } @@ -1114,15 +1114,15 @@ stifb_init_display(struct stifb_info *fb) /* HCRX specific initialization */ SETUP_HCRX(fb); - + /* if (id == S9000_ID_HCRX) hyperInitSprite(fb); else ngleInitSprite(fb); */ - - /* Initialize the image planes. */ + + /* Initialize the image planes. */ switch (id) { case S9000_ID_HCRX: hyperResetPlanes(fb, ENABLE); @@ -1194,7 +1194,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) fb = kzalloc(sizeof(*fb), GFP_ATOMIC); if (!fb) return -ENOMEM; - + info = &fb->info; /* set struct to a known state */ @@ -1235,7 +1235,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) dev_name, fb->id); goto out_err0; } - + /* default to 8 bpp on most graphic chips */ bpp = 8; xres = sti_onscreen_x(fb->sti); @@ -1256,7 +1256,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) fb->id = S9000_ID_A1659A; break; case S9000_ID_TIMBER: /* HP9000/710 Any (may be a grayscale device) */ - if (strstr(dev_name, "GRAYSCALE") || + if (strstr(dev_name, "GRAYSCALE") || strstr(dev_name, "Grayscale") || strstr(dev_name, "grayscale")) var->grayscale = 1; @@ -1295,16 +1295,16 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) case CRT_ID_VISUALIZE_EG: case S9000_ID_ARTIST: /* Artist */ break; - default: + default: #ifdef FALLBACK_TO_1BPP - printk(KERN_WARNING + printk(KERN_WARNING "stifb: Unsupported graphics card (id=0x%08x) " "- now trying 1bpp mode instead\n", fb->id); bpp = 1; /* default to 1 bpp */ break; #else - printk(KERN_WARNING + printk(KERN_WARNING "stifb: Unsupported graphics card (id=0x%08x) " "- skipping.\n", fb->id); @@ -1320,11 +1320,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) fix->line_length = (fb->sti->glob_cfg->total_x * bpp) / 8; if (!fix->line_length) fix->line_length = 2048; /* default */ - + /* limit fbsize to max visible screen size */ if (fix->smem_len > yres*fix->line_length) fix->smem_len = ALIGN(yres*fix->line_length, 4*1024*1024); - + fix->accel = FB_ACCEL_NONE; switch (bpp) { @@ -1350,7 +1350,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) default: break; } - + var->xres = var->xres_virtual = xres; var->yres = var->yres_virtual = yres; var->bits_per_pixel = bpp; @@ -1379,7 +1379,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) fix->smem_start, fix->smem_start+fix->smem_len); goto out_err2; } - + if (!request_mem_region(fix->mmio_start, fix->mmio_len, "stifb mmio")) { printk(KERN_ERR "stifb: cannot reserve sti mmio region 0x%04lx-0x%04lx\n", fix->mmio_start, fix->mmio_start+fix->mmio_len); @@ -1393,11 +1393,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n", fix->id, - var->xres, + var->xres, var->yres, var->bits_per_pixel, dev_name, - fb->id, + fb->id, fix->mmio_start); return 0; @@ -1413,6 +1413,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) iounmap(info->screen_base); out_err0: kfree(fb); + sti->info = NULL; return -ENXIO; } @@ -1426,7 +1427,7 @@ static int __init stifb_init(void) struct sti_struct *sti; struct sti_struct *def_sti; int i; - + #ifndef MODULE char *option = NULL; @@ -1438,7 +1439,7 @@ static int __init stifb_init(void) printk(KERN_INFO "stifb: disabled by \"stifb=off\" kernel parameter\n"); return -ENXIO; } - + def_sti = sti_get_rom(0); if (def_sti) { for (i = 1; i <= MAX_STI_ROMS; i++) { @@ -1472,7 +1473,7 @@ stifb_cleanup(void) { struct sti_struct *sti; int i; - + for (i = 1; i <= MAX_STI_ROMS; i++) { sti = sti_get_rom(i); if (!sti) @@ -1495,10 +1496,10 @@ int __init stifb_setup(char *options) { int i; - + if (!options || !*options) return 1; - + if (strncmp(options, "off", 3) == 0) { stifb_disabled = 1; options += 3; diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 216d49c9d47e5..dabc30a09f969 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -27,6 +27,8 @@ #include