diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 99983e67c13c..da95513571ea 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -162,7 +162,7 @@ Description: Discover CPUs in the same CPU frequency coordination domain What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1} Date: August 2008 KernelVersion: 2.6.27 -Contact: discuss@x86-64.org +Contact: Linux kernel mailing list Description: Disable L3 cache indices These files exist in every CPU's cache/index3 directory. Each diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt index 7a4d4926f44e..5ba6450693b9 100644 --- a/Documentation/devicetree/bindings/clock/at91-clock.txt +++ b/Documentation/devicetree/bindings/clock/at91-clock.txt @@ -248,7 +248,7 @@ Required properties for peripheral clocks: - #address-cells : shall be 1 (reg is used to encode clk id). - clocks : shall be the master clock phandle. e.g. clocks = <&mck>; -- name: device tree node describing a specific system clock. +- name: device tree node describing a specific peripheral clock. * #clock-cells : from common clock binding; shall be set to 0. * reg: peripheral id. See Atmel's datasheets to get a full list of peripheral ids. diff --git a/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt b/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt index 4b641c7bf1c2..09089a6d69ed 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt @@ -32,8 +32,8 @@ Example: touchscreen-fuzz-x = <4>; touchscreen-fuzz-y = <7>; touchscreen-fuzz-pressure = <2>; - touchscreen-max-x = <4096>; - touchscreen-max-y = <4096>; + touchscreen-size-x = <4096>; + touchscreen-size-y = <4096>; touchscreen-max-pressure = <2048>; ti,x-plate-ohms = <280>; diff --git a/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt b/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt new file mode 100644 index 000000000000..6d7ab4e524d4 --- /dev/null +++ b/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt @@ -0,0 +1,35 @@ +* IPQ806x DWMAC Ethernet controller + +The device inherits all the properties of the dwmac/stmmac devices +described in the file net/stmmac.txt with the following changes. + +Required properties: + +- compatible: should be "qcom,ipq806x-gmac" along with "snps,dwmac" + and any applicable more detailed version number + described in net/stmmac.txt + +- qcom,nss-common: should contain a phandle to a syscon device mapping the + nss-common registers. + +- qcom,qsgmii-csr: should contain a phandle to a syscon device mapping the + qsgmii-csr registers. + +Example: + + gmac: ethernet@37000000 { + device_type = "network"; + compatible = "qcom,ipq806x-gmac"; + reg = <0x37000000 0x200000>; + interrupts = ; + interrupt-names = "macirq"; + + qcom,nss-common = <&nss_common>; + qcom,qsgmii-csr = <&qsgmii_csr>; + + clocks = <&gcc GMAC_CORE1_CLK>; + clock-names = "stmmaceth"; + + resets = <&gcc GMAC_CORE1_RESET>; + reset-names = "stmmaceth"; + }; diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt new file mode 100644 index 000000000000..58d935b58598 --- /dev/null +++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt @@ -0,0 +1,25 @@ +* Texas Instruments - dp83867 Giga bit ethernet phy + +Required properties: + - reg - The ID number for the phy, usually a small integer + - ti,rx-internal-delay - RGMII Recieve Clock Delay - see dt-bindings/net/ti-dp83867.h + for applicable values + - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h + for applicable values + - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h + for applicable values + +Default child nodes are standard Ethernet PHY device +nodes as described in Documentation/devicetree/bindings/net/phy.txt + +Example: + + ethernet-phy@0 { + reg = <0>; + ti,rx-internal-delay = ; + ti,tx-internal-delay = ; + ti,fifo-depth = ; + }; + +Datasheet can be found: +http://www.ti.com/product/DP83867IR/datasheet diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt index dc2a18f0b3a1..ddbe304beb21 100644 --- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt +++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt @@ -15,10 +15,8 @@ Optional properties: - phys: phandle + phy specifier pair - phy-names: must be "usb" - dmas: Must contain a list of references to DMA specifiers. - - dma-names : Must contain a list of DMA names: - - tx0 ... tx - - rx0 ... rx - - This means DnFIFO in USBHS module. + - dma-names : named "ch%d", where %d is the channel number ranging from zero + to the number of channels (DnFIFOs) minus one. Example: usbhs: usb@e6590000 { diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401 index 8eb88e974055..711f75e189eb 100644 --- a/Documentation/hwmon/tmp401 +++ b/Documentation/hwmon/tmp401 @@ -20,7 +20,7 @@ Supported chips: Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html * Texas Instruments TMP435 Prefix: 'tmp435' - Addresses scanned: I2C 0x37, 0x48 - 0x4f + Addresses scanned: I2C 0x48 - 0x4f Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html Authors: diff --git a/Documentation/i2c/slave-interface b/Documentation/i2c/slave-interface index 389bb5d61854..b228ca54bcf4 100644 --- a/Documentation/i2c/slave-interface +++ b/Documentation/i2c/slave-interface @@ -31,10 +31,10 @@ User manual =========== I2C slave backends behave like standard I2C clients. So, you can instantiate -them like described in the document 'instantiating-devices'. A quick example -for instantiating the slave-eeprom driver from userspace: +them as described in the document 'instantiating-devices'. A quick example for +instantiating the slave-eeprom driver from userspace at address 0x64 on bus 1: - # echo 0-0064 > /sys/bus/i2c/drivers/i2c-slave-eeprom/bind + # echo slave-24c02 0x64 > /sys/bus/i2c/devices/i2c-1/new_device Each backend should come with separate documentation to describe its specific behaviour and setup. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 61ab1628a057..6726139bd289 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1481,6 +1481,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. By default, super page will be supported if Intel IOMMU has the capability. With this option, super page will not be supported. + ecs_off [Default Off] + By default, extended context tables will be supported if + the hardware advertises that it has support both for the + extended tables themselves, and also PASID support. With + this option set, extended tables will not be used even + on hardware which claims to support them. intel_idle.max_cstate= [KNL,HW,ACPI,X86] 0 disables intel_idle and fall back on acpi_idle. diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt index 22bbc7225f8e..1700756af057 100644 --- a/Documentation/networking/ieee802154.txt +++ b/Documentation/networking/ieee802154.txt @@ -30,8 +30,8 @@ int sd = socket(PF_IEEE802154, SOCK_DGRAM, 0); The address family, socket addresses etc. are defined in the include/net/af_ieee802154.h header or in the special header -in our userspace package (see either linux-zigbee sourceforge download page -or git tree at git://linux-zigbee.git.sourceforge.net/gitroot/linux-zigbee). +in the userspace package (see either http://wpan.cakelab.org/ or the +git tree at https://github.com/linux-wpan/wpan-tools). One can use SOCK_RAW for passing raw data towards device xmit function. YMMV. @@ -49,15 +49,6 @@ Like with WiFi, there are several types of devices implementing IEEE 802.15.4. Those types of devices require different approach to be hooked into Linux kernel. -MLME - MAC Level Management -============================ - -Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands. -See the include/net/nl802154.h header. Our userspace tools package -(see above) provides CLI configuration utility for radio interfaces and simple -coordinator for IEEE 802.15.4 networks as an example users of MLME protocol. - - HardMAC ======= @@ -75,8 +66,6 @@ net_device with a pointer to struct ieee802154_mlme_ops instance. The fields assoc_req, assoc_resp, disassoc_req, start_req, and scan_req are optional. All other fields are required. -We provide an example of simple HardMAC driver at drivers/ieee802154/fakehard.c - SoftMAC ======= @@ -89,7 +78,8 @@ stack interface for network sniffers (e.g. WireShark). This layer is going to be extended soon. -See header include/net/mac802154.h and several drivers in drivers/ieee802154/. +See header include/net/mac802154.h and several drivers in +drivers/net/ieee802154/. Device drivers API @@ -114,18 +104,17 @@ Moreover IEEE 802.15.4 device operations structure should be filled. Fake drivers ============ -In addition there are two drivers available which simulate real devices with -HardMAC (fakehard) and SoftMAC (fakelb - IEEE 802.15.4 loopback driver) -interfaces. This option provides possibility to test and debug stack without -usage of real hardware. +In addition there is a driver available which simulates a real device with +SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option +provides possibility to test and debug stack without usage of real hardware. -See sources in drivers/ieee802154 folder for more details. +See sources in drivers/net/ieee802154 folder for more details. 6LoWPAN Linux implementation ============================ -The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80 +The IEEE 802.15.4 standard specifies an MTU of 127 bytes, yielding about 80 octets of actual MAC payload once security is turned on, on a wireless link with a link throughput of 250 kbps or less. The 6LoWPAN adaptation format [RFC4944] was specified to carry IPv6 datagrams over such constrained links, @@ -140,7 +129,8 @@ In Semptember 2011 the standard update was published - [RFC6282]. It deprecates HC1 and HC2 compression and defines IPHC encoding format which is used in this Linux implementation. -All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.* +All the code related to 6lowpan you may find in files: net/6lowpan/* +and net/ieee802154/6lowpan/* To setup 6lowpan interface you need (busybox release > 1.17.0): 1. Add IEEE802.15.4 interface and initialize PANid; diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index cb083e0d682c..5fae7704daab 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -751,8 +751,10 @@ IP Variables: ip_local_port_range - 2 INTEGERS Defines the local port range that is used by TCP and UDP to choose the local port. The first number is the first, the - second the last local port number. The default values are - 32768 and 61000 respectively. + second the last local port number. + If possible, it is better these numbers have different parity. + (one even and one odd values) + The default values are 32768 and 60999 respectively. ip_local_reserved_ports - list of comma separated ranges Specify the ports which are reserved for known third-party @@ -775,7 +777,7 @@ ip_local_reserved_ports - list of comma separated ranges ip_local_port_range, e.g.: $ cat /proc/sys/net/ipv4/ip_local_port_range - 32000 61000 + 32000 60999 $ cat /proc/sys/net/ipv4/ip_local_reserved_ports 8080,9148 diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 616f89267d23..da82cd75a4f6 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -114,11 +114,11 @@ would be sub-port 0 on port 1 on switch 1. Switch ID ^^^^^^^^^ -The switchdev driver must implement the switchdev op switchdev_port_attr_get for -SWITCHDEV_ATTR_PORT_PARENT_ID for each port netdev, returning the same physical ID -for each port of a switch. The ID must be unique between switches on the same -system. The ID does not need to be unique between switches on different -systems. +The switchdev driver must implement the switchdev op switchdev_port_attr_get +for SWITCHDEV_ATTR_PORT_PARENT_ID for each port netdev, returning the same +physical ID for each port of a switch. The ID must be unique between switches +on the same system. The ID does not need to be unique between switches on +different systems. The switch ID is used to locate ports on a switch and to know if aggregated ports belong to the same switch. @@ -142,7 +142,7 @@ The port netdevs representing the physical switch ports can be organized into higher-level switching constructs. The default construct is a standalone router port, used to offload L3 forwarding. Two or more ports can be bonded together to form a LAG. Two or more ports (or LAGs) can be bridged to bridge -to L2 networks. VLANs can be applied to sub-divide L2 networks. L2-over-L3 +L2 networks. VLANs can be applied to sub-divide L2 networks. L2-over-L3 tunnels can be built on ports. These constructs are built using standard Linux tools such as the bridge driver, the bonding/team drivers, and netlink-based tools such as iproute2. @@ -177,6 +177,10 @@ entries are installed, for example, using iproute2 bridge cmd: bridge fdb add ADDR dev DEV [vlan VID] [self] +The driver should use the helper switchdev_port_fdb_xxx ops for ndo_fdb_xxx +ops, and handle add/delete/dump of SWITCHDEV_OBJ_PORT_FDB object using +switchdev_port_obj_xxx ops. + XXX: what should be done if offloading this rule to hardware fails (for example, due to full capacity in hardware tables) ? @@ -194,11 +198,11 @@ in turn, will notify the bridge driver using the switchdev notifier call: err = call_switchdev_notifiers(val, dev, info); -Where val is SWITCHDEV_FDB_ADD when learning and SWITCHDEV_FDB_DEL when forgetting, and -info points to a struct switchdev_notifier_fdb_info. On SWITCHDEV_FDB_ADD, the bridge -driver will install the FDB entry into the bridge's FDB and mark the entry as -NTF_EXT_LEARNED. The iproute2 bridge command will label these entries -"offload": +Where val is SWITCHDEV_FDB_ADD when learning and SWITCHDEV_FDB_DEL when +forgetting, and info points to a struct switchdev_notifier_fdb_info. On +SWITCHDEV_FDB_ADD, the bridge driver will install the FDB entry into the +bridge's FDB and mark the entry as NTF_EXT_LEARNED. The iproute2 bridge +command will label these entries "offload": $ bridge fdb 52:54:00:12:35:01 dev sw1p1 master br0 permanent @@ -229,18 +233,18 @@ the bridge's FDB. It's possible, but not optimal, to enable learning on the device port and on the bridge port, and disable learning_sync. To support learning and learning_sync port attributes, the driver implements -switchdev op switchdev_port_attr_get/set for SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS. The driver -should initialize the attributes to the hardware defaults. +switchdev op switchdev_port_attr_get/set for SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS. +The driver should initialize the attributes to the hardware defaults. FDB Ageing ^^^^^^^^^^ There are two FDB ageing models supported: 1) ageing by the device, and 2) ageing by the kernel. Ageing by the device is preferred if many FDB entries -are supported. The driver calls call_switchdev_notifiers(SWITCHDEV_FDB_DEL, ...) to -age out the FDB entry. In this model, ageing by the kernel should be turned -off. XXX: how to turn off ageing in kernel on a per-port basis or otherwise -prevent the kernel from ageing out the FDB entry? +are supported. The driver calls call_switchdev_notifiers(SWITCHDEV_FDB_DEL, +...) to age out the FDB entry. In this model, ageing by the kernel should be +turned off. XXX: how to turn off ageing in kernel on a per-port basis or +otherwise prevent the kernel from ageing out the FDB entry? In the kernel ageing model, the standard bridge ageing mechanism is used to age out stale FDB entries. To keep an FDB entry "alive", the driver should refresh @@ -262,8 +266,8 @@ STP State Change on Port Internally or with a third-party STP protocol implementation (e.g. mstpd), the bridge driver maintains the STP state for ports, and will notify the switch -driver of STP state change on a port using the switchdev op switchdev_attr_port_set for -SWITCHDEV_ATTR_PORT_STP_UPDATE. +driver of STP state change on a port using the switchdev op +switchdev_attr_port_set for SWITCHDEV_ATTR_PORT_STP_UPDATE. State is one of BR_STATE_*. The switch driver can use STP state updates to update ingress packet filter list for the port. For example, if port is @@ -296,33 +300,38 @@ IGMP Snooping XXX: complete this section -L3 routing ----------- +L3 Routing Offload +------------------ Offloading L3 routing requires that device be programmed with FIB entries from the kernel, with the device doing the FIB lookup and forwarding. The device does a longest prefix match (LPM) on FIB entries matching route prefix and -forwards the packet to the matching FIB entry's nexthop(s) egress ports. To -program the device, the switchdev driver is called with add/delete ops for IPv4 -and IPv6 FIB entries. For IPv4, the driver implements switchdev ops: - - int (*switchdev_fib_ipv4_add)(struct net_device *dev, - __be32 dst, int dst_len, - struct fib_info *fi, - u8 tos, u8 type, - u32 nlflags, u32 tb_id); - - int (*switchdev_fib_ipv4_del)(struct net_device *dev, - __be32 dst, int dst_len, - struct fib_info *fi, - u8 tos, u8 type, - u32 tb_id); - -to add/delete IPv4 dst/dest_len prefix on table tb_id. The *fi structure holds -details on the route and route's nexthops. *dev is one of the port netdevs -mentioned in the routes next hop list. If the output port netdevs referenced -in the route's nexthop list don't all have the same switch ID, the driver is -not called to add/delete the FIB entry. +forwards the packet to the matching FIB entry's nexthop(s) egress ports. + +To program the device, the driver implements support for +SWITCHDEV_OBJ_IPV[4|6]_FIB object using switchdev_port_obj_xxx ops. +switchdev_port_obj_add is used for both adding a new FIB entry to the device, +or modifying an existing entry on the device. + +XXX: Currently, only SWITCHDEV_OBJ_IPV4_FIB objects are supported. + +SWITCHDEV_OBJ_IPV4_FIB object passes: + + struct switchdev_obj_ipv4_fib { /* IPV4_FIB */ + u32 dst; + int dst_len; + struct fib_info *fi; + u8 tos; + u8 type; + u32 nlflags; + u32 tb_id; + } ipv4_fib; + +to add/modify/delete IPv4 dst/dest_len prefix on table tb_id. The *fi +structure holds details on the route and route's nexthops. *dev is one of the +port netdevs mentioned in the routes next hop list. If the output port netdevs +referenced in the route's nexthop list don't all have the same switch ID, the +driver is not called to add/modify/delete the FIB entry. Routes offloaded to the device are labeled with "offload" in the ip route listing: @@ -340,7 +349,7 @@ listing: 12.0.0.4 via 11.0.0.9 dev sw1p2 proto zebra metric 20 offload 192.168.0.0/24 dev eth0 proto kernel scope link src 192.168.0.15 -XXX: add/del IPv6 FIB API +XXX: add/mod/del IPv6 FIB API Nexthop Resolution ^^^^^^^^^^^^^^^^^^ diff --git a/Documentation/networking/udplite.txt b/Documentation/networking/udplite.txt index d727a3829100..53a726855e49 100644 --- a/Documentation/networking/udplite.txt +++ b/Documentation/networking/udplite.txt @@ -20,7 +20,7 @@ files/UDP-Lite-HOWTO.txt o The Wireshark UDP-Lite WiKi (with capture files): - http://wiki.wireshark.org/Lightweight_User_Datagram_Protocol + https://wiki.wireshark.org/Lightweight_User_Datagram_Protocol o The Protocol Spec, RFC 3828, http://www.ietf.org/rfc/rfc3828.txt diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt index 43e94ea6d2ca..263b907517ac 100644 --- a/Documentation/target/tcmu-design.txt +++ b/Documentation/target/tcmu-design.txt @@ -15,8 +15,7 @@ Contents: a) Discovering and configuring TCMU uio devices b) Waiting for events on the device(s) c) Managing the command ring -3) Command filtering and pass_level -4) A final note +3) A final note TCM Userspace Design @@ -324,7 +323,7 @@ int handle_device_events(int fd, void *map) /* Process events from cmd ring until we catch up with cmd_head */ while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) { - if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) { + if (tcmu_hdr_get_op(ent->hdr.len_op) == TCMU_OP_CMD) { uint8_t *cdb = (void *)mb + ent->req.cdb_off; bool success = true; @@ -339,8 +338,12 @@ int handle_device_events(int fd, void *map) ent->rsp.scsi_status = SCSI_CHECK_CONDITION; } } + else if (tcmu_hdr_get_op(ent->hdr.len_op) != TCMU_OP_PAD) { + /* Tell the kernel we didn't handle unknown opcodes */ + ent->hdr.uflags |= TCMU_UFLAG_UNKNOWN_OP; + } else { - /* Do nothing for PAD entries */ + /* Do nothing for PAD entries except update cmd_tail */ } /* update cmd_tail */ @@ -360,28 +363,6 @@ int handle_device_events(int fd, void *map) } -Command filtering and pass_level --------------------------------- - -TCMU supports a "pass_level" option with valid values of 0 or 1. When -the value is 0 (the default), nearly all SCSI commands received for -the device are passed through to the handler. This allows maximum -flexibility but increases the amount of code required by the handler, -to support all mandatory SCSI commands. If pass_level is set to 1, -then only IO-related commands are presented, and the rest are handled -by LIO's in-kernel command emulation. The commands presented at level -1 include all versions of: - -READ -WRITE -WRITE_VERIFY -XDWRITEREAD -WRITE_SAME -COMPARE_AND_WRITE -SYNCHRONIZE_CACHE -UNMAP - - A final note ------------ diff --git a/MAINTAINERS b/MAINTAINERS index 823ee0993448..e87d0c1467db 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -51,9 +51,9 @@ trivial patch so apply some common sense. or does something very odd once a month document it. PLEASE remember that submissions must be made under the terms - of the OSDL certificate of contribution and should include a - Signed-off-by: line. The current version of this "Developer's - Certificate of Origin" (DCO) is listed in the file + of the Linux Foundation certificate of contribution and should + include a Signed-off-by: line. The current version of this + "Developer's Certificate of Origin" (DCO) is listed in the file Documentation/SubmittingPatches. 6. Make sure you have the right to send any changes you make. If you @@ -921,6 +921,13 @@ M: Krzysztof Halasa S: Maintained F: arch/arm/mach-cns3xxx/ +ARM/CAVIUM THUNDER NETWORK DRIVER +M: Sunil Goutham +M: Robert Richter +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: drivers/net/ethernet/cavium/ + ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE M: Alexander Shiyan L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -2426,7 +2433,6 @@ L: linux-security-module@vger.kernel.org S: Supported F: include/linux/capability.h F: include/uapi/linux/capability.h -F: security/capability.c F: security/commoncap.c F: kernel/capability.c @@ -3824,10 +3830,11 @@ M: David Woodhouse L: linux-embedded@vger.kernel.org S: Maintained -EMULEX LPFC FC SCSI DRIVER -M: James Smart +EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER +M: James Smart +M: Dick Kennedy L: linux-scsi@vger.kernel.org -W: http://sourceforge.net/projects/lpfcxxxx +W: http://www.avagotech.com S: Supported F: drivers/scsi/lpfc/ @@ -6374,6 +6381,12 @@ F: include/uapi/linux/meye.h F: include/uapi/linux/ivtv* F: include/uapi/linux/uvcvideo.h +MEDIATEK MT7601U WIRELESS LAN DRIVER +M: Jakub Kicinski +L: linux-wireless@vger.kernel.org +S: Maintained +F: drivers/net/wireless/mediatek/mt7601u/ + MEGARAID SCSI/SAS DRIVERS M: Kashyap Desai M: Sumit Saxena @@ -7583,6 +7596,7 @@ F: drivers/pci/host/pci-exynos.c PCI DRIVER FOR SYNOPSIS DESIGNWARE M: Jingoo Han +M: Pratyush Anand L: linux-pci@vger.kernel.org S: Maintained F: drivers/pci/host/*designware* @@ -7596,8 +7610,9 @@ F: Documentation/devicetree/bindings/pci/host-generic-pci.txt F: drivers/pci/host/pci-host-generic.c PCIE DRIVER FOR ST SPEAR13XX +M: Pratyush Anand L: linux-pci@vger.kernel.org -S: Orphan +S: Maintained F: drivers/pci/host/*spear* PCMCIA SUBSYSTEM @@ -8837,9 +8852,11 @@ F: drivers/misc/phantom.c F: include/uapi/linux/phantom.h SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER -M: Jayamohan Kallickal +M: Jayamohan Kallickal +M: Minh Tran +M: John Soni Jose L: linux-scsi@vger.kernel.org -W: http://www.emulex.com +W: http://www.avagotech.com S: Supported F: drivers/scsi/be2iscsi/ @@ -10593,8 +10610,7 @@ F: drivers/virtio/virtio_input.c F: include/uapi/linux/virtio_input.h VIA RHINE NETWORK DRIVER -M: Roger Luethi -S: Maintained +S: Orphan F: drivers/net/ethernet/via/via-rhine.c VIA SD/MMC CARD CONTROLLER DRIVER diff --git a/Makefile b/Makefile index dc20bcb9b271..f5c8983aeeb7 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = NAME = Hurr durr I'ma sheep # *DOCUMENTATION* diff --git a/arch/alpha/boot/Makefile b/arch/alpha/boot/Makefile index cd143887380a..8399bd0e68e8 100644 --- a/arch/alpha/boot/Makefile +++ b/arch/alpha/boot/Makefile @@ -14,6 +14,9 @@ targets := vmlinux.gz vmlinux \ tools/bootpzh bootloader bootpheader bootpzheader OBJSTRIP := $(obj)/tools/objstrip +HOSTCFLAGS := -Wall -I$(objtree)/usr/include +BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj) + # SRM bootable image. Copy to offset 512 of a partition. $(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh ( cat $(obj)/tools/lxboot $(obj)/tools/bootlx $(obj)/vmlinux.nh ) > $@ @@ -96,13 +99,14 @@ $(obj)/tools/bootph: $(obj)/bootpheader $(OBJSTRIP) FORCE $(obj)/tools/bootpzh: $(obj)/bootpzheader $(OBJSTRIP) FORCE $(call if_changed,objstrip) -LDFLAGS_bootloader := -static -uvsprintf -T #-N -relax -LDFLAGS_bootpheader := -static -uvsprintf -T #-N -relax -LDFLAGS_bootpzheader := -static -uvsprintf -T #-N -relax +LDFLAGS_bootloader := -static -T # -N -relax +LDFLAGS_bootloader := -static -T # -N -relax +LDFLAGS_bootpheader := -static -T # -N -relax +LDFLAGS_bootpzheader := -static -T # -N -relax -OBJ_bootlx := $(obj)/head.o $(obj)/main.o -OBJ_bootph := $(obj)/head.o $(obj)/bootp.o -OBJ_bootpzh := $(obj)/head.o $(obj)/bootpz.o $(obj)/misc.o +OBJ_bootlx := $(obj)/head.o $(obj)/stdio.o $(obj)/main.o +OBJ_bootph := $(obj)/head.o $(obj)/stdio.o $(obj)/bootp.o +OBJ_bootpzh := $(obj)/head.o $(obj)/stdio.o $(obj)/bootpz.o $(obj)/misc.o $(obj)/bootloader: $(obj)/bootloader.lds $(OBJ_bootlx) $(LIBS_Y) FORCE $(call if_changed,ld) diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c index 3baf2d1e908d..dd6eb4a33582 100644 --- a/arch/alpha/boot/main.c +++ b/arch/alpha/boot/main.c @@ -19,7 +19,6 @@ #include "ksize.h" -extern int vsprintf(char *, const char *, va_list); extern unsigned long switch_to_osf_pal(unsigned long nr, struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa, unsigned long *vptb); diff --git a/arch/alpha/boot/stdio.c b/arch/alpha/boot/stdio.c new file mode 100644 index 000000000000..f844dae8a54a --- /dev/null +++ b/arch/alpha/boot/stdio.c @@ -0,0 +1,306 @@ +/* + * Copyright (C) Paul Mackerras 1997. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include + +size_t strnlen(const char * s, size_t count) +{ + const char *sc; + + for (sc = s; count-- && *sc != '\0'; ++sc) + /* nothing */; + return sc - s; +} + +# define do_div(n, base) ({ \ + unsigned int __base = (base); \ + unsigned int __rem; \ + __rem = ((unsigned long long)(n)) % __base; \ + (n) = ((unsigned long long)(n)) / __base; \ + __rem; \ +}) + + +static int skip_atoi(const char **s) +{ + int i, c; + + for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s) + i = i*10 + c - '0'; + return i; +} + +#define ZEROPAD 1 /* pad with zero */ +#define SIGN 2 /* unsigned/signed long */ +#define PLUS 4 /* show plus */ +#define SPACE 8 /* space if plus */ +#define LEFT 16 /* left justified */ +#define SPECIAL 32 /* 0x */ +#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * str, unsigned long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; + int i; + + if (type & LARGE) + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + if (type & LEFT) + type &= ~ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & SIGN) { + if ((signed long long)num < 0) { + sign = '-'; + num = - (signed long long)num; + size--; + } else if (type & PLUS) { + sign = '+'; + size--; + } else if (type & SPACE) { + sign = ' '; + size--; + } + } + if (type & SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) { + tmp[i++] = digits[do_div(num, base)]; + } + if (i > precision) + precision = i; + size -= precision; + if (!(type&(ZEROPAD+LEFT))) + while(size-->0) + *str++ = ' '; + if (sign) + *str++ = sign; + if (type & SPECIAL) { + if (base==8) + *str++ = '0'; + else if (base==16) { + *str++ = '0'; + *str++ = digits[33]; + } + } + if (!(type & LEFT)) + while (size-- > 0) + *str++ = c; + while (i < precision--) + *str++ = '0'; + while (i-- > 0) + *str++ = tmp[i]; + while (size-- > 0) + *str++ = ' '; + return str; +} + +int vsprintf(char *buf, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char * str; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + + for (str=buf ; *fmt ; ++fmt) { + if (*fmt != '%') { + *str++ = *fmt; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= LEFT; goto repeat; + case '+': flags |= PLUS; goto repeat; + case ' ': flags |= SPACE; goto repeat; + case '#': flags |= SPECIAL; goto repeat; + case '0': flags |= ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if ('0' <= *fmt && *fmt <= '9') + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if ('0' <= *fmt && *fmt <= '9') + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'l' && *(fmt + 1) == 'l') { + qualifier = 'q'; + fmt += 2; + } else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' + || *fmt == 'Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & LEFT)) + while (--field_width > 0) + *str++ = ' '; + *str++ = (unsigned char) va_arg(args, int); + while (--field_width > 0) + *str++ = ' '; + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & LEFT)) + while (len < field_width--) + *str++ = ' '; + for (i = 0; i < len; ++i) + *str++ = *s++; + while (len < field_width--) + *str++ = ' '; + continue; + + case 'p': + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= ZEROPAD; + } + str = number(str, + (unsigned long) va_arg(args, void *), 16, + field_width, precision, flags); + continue; + + + case 'n': + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + *str++ = '%'; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= SIGN; + case 'u': + break; + + default: + *str++ = '%'; + if (*fmt) + *str++ = *fmt; + else + --fmt; + continue; + } + if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & SIGN) + num = (signed long) num; + } else if (qualifier == 'q') { + num = va_arg(args, unsigned long long); + if (flags & SIGN) + num = (signed long long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & SIGN) + num = (signed int) num; + } + str = number(str, num, base, field_width, precision, flags); + } + *str = '\0'; + return str-buf; +} + +int sprintf(char * buf, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i=vsprintf(buf,fmt,args); + va_end(args); + return i; +} diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c index 367d53d031fc..dee82695f48b 100644 --- a/arch/alpha/boot/tools/objstrip.c +++ b/arch/alpha/boot/tools/objstrip.c @@ -27,6 +27,9 @@ #include #ifdef __ELF__ # include +# define elfhdr elf64_hdr +# define elf_phdr elf64_phdr +# define elf_check_arch(x) ((x)->e_machine == EM_ALPHA) #endif /* bootfile size must be multiple of BLOCK_SIZE: */ diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h index f61e1a56c378..4cb4b6d3452c 100644 --- a/arch/alpha/include/asm/types.h +++ b/arch/alpha/include/asm/types.h @@ -2,6 +2,5 @@ #define _ALPHA_TYPES_H #include -#include #endif /* _ALPHA_TYPES_H */ diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index c509d306db45..a56e608db2f9 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -3,7 +3,7 @@ #include -#define NR_SYSCALLS 511 +#define NR_SYSCALLS 514 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h index d214a0358100..aa33bf5aacb6 100644 --- a/arch/alpha/include/uapi/asm/unistd.h +++ b/arch/alpha/include/uapi/asm/unistd.h @@ -472,5 +472,8 @@ #define __NR_sched_setattr 508 #define __NR_sched_getattr 509 #define __NR_renameat2 510 +#define __NR_getrandom 511 +#define __NR_memfd_create 512 +#define __NR_execveat 513 #endif /* _UAPI_ALPHA_UNISTD_H */ diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c index 253cf1a87481..51267ac5729b 100644 --- a/arch/alpha/kernel/err_ev6.c +++ b/arch/alpha/kernel/err_ev6.c @@ -6,7 +6,6 @@ * Error handling code supporting Alpha systems */ -#include #include #include diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 7b2be251c30f..51f2c8654253 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index e51f578636a5..36dc91ace83a 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1019,14 +1019,13 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, if (tv) { if (get_tv32((struct timeval *)&kts, tv)) return -EFAULT; + kts.tv_nsec *= 1000; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(*tz))) return -EFAULT; } - kts.tv_nsec *= 1000; - return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 1941a07b5811..84d13263ce46 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -236,12 +236,11 @@ release_thread(struct task_struct *dead_task) } /* - * Copy an alpha thread.. + * Copy architecture-specific thread state */ - int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long arg, + unsigned long kthread_arg, struct task_struct *p) { extern void ret_from_fork(void); @@ -262,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, sizeof(struct switch_stack) + sizeof(struct pt_regs)); childstack->r26 = (unsigned long) ret_from_kernel_thread; childstack->r9 = usp; /* function */ - childstack->r10 = arg; + childstack->r10 = kthread_arg; childregs->hae = alpha_mv.hae_cache, childti->pcb.usp = 0; return 0; diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 99ac36d5de4e..2f24447fef92 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -63,7 +63,6 @@ static struct { enum ipi_message_type { IPI_RESCHEDULE, IPI_CALL_FUNC, - IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, }; @@ -506,7 +505,6 @@ setup_profiling_timer(unsigned int multiplier) return -EINVAL; } - static void send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) { @@ -552,10 +550,6 @@ handle_ipi(struct pt_regs *regs) generic_smp_call_function_interrupt(); break; - case IPI_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; - case IPI_CPU_STOP: halt(); @@ -606,7 +600,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_single_ipi(int cpu) { - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } static void diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c index 6f01d9ad7b81..72b59511e59a 100644 --- a/arch/alpha/kernel/srmcons.c +++ b/arch/alpha/kernel/srmcons.c @@ -237,8 +237,7 @@ srmcons_init(void) return -ENODEV; } - -module_init(srmcons_init); +device_initcall(srmcons_init); /* diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index f21d61fab678..24e41bd7d3c9 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -331,7 +331,7 @@ marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin) pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); irq = intline; - msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI); + msi_loc = dev->msi_cap; msg_ctl = 0; if (msi_loc) pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl); diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 24789713f1ea..9b62e3fd4f03 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -529,6 +529,9 @@ sys_call_table: .quad sys_sched_setattr .quad sys_sched_getattr .quad sys_renameat2 /* 510 */ + .quad sys_getrandom + .quad sys_memfd_create + .quad sys_execveat .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 9c4c189eb22f..74aceead06e9 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/oprofile/op_model_ev4.c b/arch/alpha/oprofile/op_model_ev4.c index 18aa9b4f94f1..086a0d5445c5 100644 --- a/arch/alpha/oprofile/op_model_ev4.c +++ b/arch/alpha/oprofile/op_model_ev4.c @@ -8,7 +8,6 @@ */ #include -#include #include #include diff --git a/arch/alpha/oprofile/op_model_ev5.c b/arch/alpha/oprofile/op_model_ev5.c index c32f8a0ad925..c300f5ef3482 100644 --- a/arch/alpha/oprofile/op_model_ev5.c +++ b/arch/alpha/oprofile/op_model_ev5.c @@ -8,7 +8,6 @@ */ #include -#include #include #include diff --git a/arch/alpha/oprofile/op_model_ev6.c b/arch/alpha/oprofile/op_model_ev6.c index 1c84cc257fc7..02edf5971614 100644 --- a/arch/alpha/oprofile/op_model_ev6.c +++ b/arch/alpha/oprofile/op_model_ev6.c @@ -8,7 +8,6 @@ */ #include -#include #include #include diff --git a/arch/alpha/oprofile/op_model_ev67.c b/arch/alpha/oprofile/op_model_ev67.c index 34a57a126553..adb1744d20f3 100644 --- a/arch/alpha/oprofile/op_model_ev67.c +++ b/arch/alpha/oprofile/op_model_ev67.c @@ -9,7 +9,6 @@ */ #include -#include #include #include diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 86217db2937a..992736b5229b 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -223,7 +223,7 @@ dtb-$(CONFIG_SOC_IMX25) += \ imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \ imx25-karo-tx25.dtb \ imx25-pdk.dtb -dtb-$(CONFIG_SOC_IMX31) += \ +dtb-$(CONFIG_SOC_IMX27) += \ imx27-apf27.dtb \ imx27-apf27dev.dtb \ imx27-eukrea-mbimxsd27-baseboard.dtb \ diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index c3255e0c90aa..dbb3f4d2bf84 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -223,6 +223,25 @@ /include/ "tps65217.dtsi" &tps { + /* + * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only + * mode") at poweroff. Most BeagleBone versions do not support RTC-only + * mode and risk hardware damage if this mode is entered. + * + * For details, see linux-omap mailing list May 2015 thread + * [PATCH] ARM: dts: am335x-bone* enable pmic-shutdown-controller + * In particular, messages: + * http://www.spinics.net/lists/linux-omap/msg118585.html + * http://www.spinics.net/lists/linux-omap/msg118615.html + * + * You can override this later with + * &tps { /delete-property/ ti,pmic-shutdown-controller; } + * if you want to use RTC-only mode and made sure you are not affected + * by the hardware problems. (Tip: double-check by performing a current + * measurement after shutdown: it should be less than 1 mA.) + */ + ti,pmic-shutdown-controller; + regulators { dcdc1_reg: regulator@0 { regulator-name = "vdds_dpr"; diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts index 5c42d259fa68..901739fcb85a 100644 --- a/arch/arm/boot/dts/am335x-boneblack.dts +++ b/arch/arm/boot/dts/am335x-boneblack.dts @@ -80,7 +80,3 @@ status = "okay"; }; }; - -&rtc { - system-power-controller; -}; diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 87fc7a35e802..156d05efcb70 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -654,7 +654,7 @@ wlcore: wlcore@2 { compatible = "ti,wl1271"; reg = <2>; - interrupt-parent = <&gpio1>; + interrupt-parent = <&gpio0>; interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */ ref-clock-frequency = <38400000>; }; diff --git a/arch/arm/boot/dts/am35xx-clocks.dtsi b/arch/arm/boot/dts/am35xx-clocks.dtsi index 518b8fde88b0..18cc826e9db5 100644 --- a/arch/arm/boot/dts/am35xx-clocks.dtsi +++ b/arch/arm/boot/dts/am35xx-clocks.dtsi @@ -12,7 +12,7 @@ #clock-cells = <0>; compatible = "ti,am35xx-gate-clock"; clocks = <&ipss_ick>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <1>; }; @@ -20,7 +20,7 @@ #clock-cells = <0>; compatible = "ti,gate-clock"; clocks = <&rmii_ck>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <9>; }; @@ -28,7 +28,7 @@ #clock-cells = <0>; compatible = "ti,am35xx-gate-clock"; clocks = <&ipss_ick>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <2>; }; @@ -36,7 +36,7 @@ #clock-cells = <0>; compatible = "ti,gate-clock"; clocks = <&pclk_ck>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <10>; }; @@ -44,7 +44,7 @@ #clock-cells = <0>; compatible = "ti,am35xx-gate-clock"; clocks = <&ipss_ick>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <0>; }; @@ -52,7 +52,7 @@ #clock-cells = <0>; compatible = "ti,gate-clock"; clocks = <&sys_ck>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <8>; }; @@ -60,7 +60,7 @@ #clock-cells = <0>; compatible = "ti,am35xx-gate-clock"; clocks = <&sys_ck>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <3>; }; }; diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts index a2cf2154dcdb..fdd187c55aa5 100644 --- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts +++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts @@ -95,6 +95,11 @@ internal-regs { + rtc@10300 { + /* No crystal connected to the internal RTC */ + status = "disabled"; + }; + /* J10: VCC, NC, RX, NC, TX, GND */ serial@12000 { status = "okay"; diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index de8427be830a..289806adb343 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi @@ -382,7 +382,7 @@ ti,hwmods = "usb_otg_hs"; usb0: usb@47401000 { - compatible = "ti,musb-am33xx"; + compatible = "ti,musb-dm816"; reg = <0x47401400 0x400 0x47401000 0x200>; reg-names = "mc", "control"; @@ -422,7 +422,7 @@ }; usb1: usb@47401800 { - compatible = "ti,musb-am33xx"; + compatible = "ti,musb-dm816"; reg = <0x47401c00 0x400 0x47401800 0x200>; reg-names = "mc", "control"; diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 173ffa479ad3..792394dd0f2a 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts @@ -736,7 +736,7 @@ display-timings { timing-0 { - clock-frequency = <0>; + clock-frequency = <57153600>; hactive = <720>; vactive = <1280>; hfront-porch = <5>; diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index 6951b66d1ab7..bc215e4b75fd 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi @@ -533,7 +533,7 @@ fec: ethernet@1002b000 { compatible = "fsl,imx27-fec"; - reg = <0x1002b000 0x4000>; + reg = <0x1002b000 0x1000>; interrupts = <50>; clocks = <&clks IMX27_CLK_FEC_IPG_GATE>, <&clks IMX27_CLK_FEC_AHB_GATE>; diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts index 134d3f27a8ec..921de6605f07 100644 --- a/arch/arm/boot/dts/omap3-devkit8000.dts +++ b/arch/arm/boot/dts/omap3-devkit8000.dts @@ -110,6 +110,8 @@ nand@0,0 { reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ nand-bus-width = <16>; + gpmc,device-width = <2>; + ti,nand-ecc-opt = "sw"; gpmc,sync-clk-ps = <0>; gpmc,cs-on-ns = <0>; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 5c16145920ea..5f5e0f3d5b64 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -832,8 +832,8 @@ touchscreen-fuzz-x = <4>; touchscreen-fuzz-y = <7>; touchscreen-fuzz-pressure = <2>; - touchscreen-max-x = <4096>; - touchscreen-max-y = <4096>; + touchscreen-size-x = <4096>; + touchscreen-size-y = <4096>; touchscreen-max-pressure = <2048>; ti,x-plate-ohms = <280>; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 0ca4a3eaf65d..fbbb1915c6a9 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -429,7 +429,7 @@ CONFIG_USB_EHCI_EXYNOS=y CONFIG_USB_EHCI_TEGRA=y CONFIG_USB_EHCI_HCD_STI=y CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_ISP1760_HCD=y +CONFIG_USB_ISP1760=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_STI=y CONFIG_USB_OHCI_HCD_PLATFORM=y diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f8ccc21fa032..4e7f40c577e6 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -33,7 +33,9 @@ ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) disable_irq @ disable interrupts - ldr r1, [tsk, #TI_FLAGS] + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing + tst r1, #_TIF_SYSCALL_WORK + bne __sys_trace_return tst r1, #_TIF_WORK_MASK bne fast_work_pending asm_trace_hardirqs_on diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 213919ba326f..3b8c2833c537 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -304,16 +304,17 @@ static int probe_current_pmu(struct arm_pmu *pmu) static int of_pmu_irq_cfg(struct platform_device *pdev) { int i, irq; - int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); - - if (!irqs) - return -ENOMEM; + int *irqs; /* Don't bother with PPIs; they're already affine */ irq = platform_get_irq(pdev, 0); if (irq >= 0 && irq_is_percpu(irq)) return 0; + irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); + if (!irqs) + return -ENOMEM; + for (i = 0; i < pdev->num_resources; ++i) { struct device_node *dn; int cpu; diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index c0b6dccbf7bd..7d23ce04cad5 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3; static u32 exynos_irqwake_intmask = 0xffffffff; static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { - { 105, BIT(1) }, /* RTC alarm */ - { 106, BIT(2) }, /* RTC tick */ + { 73, BIT(1) }, /* RTC alarm */ + { 74, BIT(2) }, /* RTC tick */ { /* sentinel */ }, }; diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 4d60005e9277..6d0893a3828e 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -280,9 +280,15 @@ void __init imx_gpc_check_dt(void) struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc"); - if (WARN_ON(!np || - !of_find_property(np, "interrupt-controller", NULL))) - pr_warn("Outdated DT detected, system is about to crash!!!\n"); + if (WARN_ON(!np)) + return; + + if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) { + pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); + + /* map GPC, so that at least CPUidle and WARs keep working */ + gpc_base = of_iomap(np, 0); + } } #ifdef CONFIG_PM_GENERIC_DOMAINS @@ -443,6 +449,10 @@ static int imx_gpc_probe(struct platform_device *pdev) struct regulator *pu_reg; int ret; + /* bail out if DT too old and doesn't provide the necessary info */ + if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells")) + return 0; + pu_reg = devm_regulator_get_optional(&pdev->dev, "pu"); if (PTR_ERR(pu_reg) == -ENODEV) pu_reg = NULL; diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index d1dedc8195ed..eafd120b53f1 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -203,23 +203,8 @@ save_context_wfi: */ ldr r1, kernel_flush blx r1 - /* - * The kernel doesn't interwork: v7_flush_dcache_all in particluar will - * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled. - * This sequence switches back to ARM. Note that .align may insert a - * nop: bx pc needs to be word-aligned in order to work. - */ - THUMB( .thumb ) - THUMB( .align ) - THUMB( bx pc ) - THUMB( nop ) - .arm - b omap3_do_wfi - -/* - * Local variables - */ +ENDPROC(omap34xx_cpu_suspend) omap3_do_wfi_sram_addr: .word omap3_do_wfi_sram kernel_flush: @@ -364,10 +349,7 @@ exit_nonoff_modes: * =================================== */ ldmfd sp!, {r4 - r11, pc} @ restore regs and return - -/* - * Local variables - */ +ENDPROC(omap3_do_wfi) sdrc_power: .word SDRC_POWER_V cm_idlest1_core: diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c index f1aeb54fabe3..2385052b0ce1 100644 --- a/arch/arm/mach-pxa/pxa_cplds_irqs.c +++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c @@ -107,7 +107,7 @@ static int cplds_probe(struct platform_device *pdev) struct resource *res; struct cplds *fpga; int ret; - unsigned int base_irq = 0; + int base_irq; unsigned long irqflags = 0; fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4e6ef896c619..7186382672b5 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void) } /* - * Find the first non-section-aligned page, and point + * Find the first non-pmd-aligned page, and point * memblock_limit at it. This relies on rounding the - * limit down to be section-aligned, which happens at - * the end of this function. + * limit down to be pmd-aligned, which happens at the + * end of this function. * * With this algorithm, the start or end of almost any - * bank can be non-section-aligned. The only exception - * is that the start of the bank 0 must be section- + * bank can be non-pmd-aligned. The only exception is + * that the start of the bank 0 must be section- * aligned, since otherwise memory would need to be * allocated when mapping the start of bank 0, which * occurs before any free memory is mapped. */ if (!memblock_limit) { - if (!IS_ALIGNED(block_start, SECTION_SIZE)) + if (!IS_ALIGNED(block_start, PMD_SIZE)) memblock_limit = block_start; - else if (!IS_ALIGNED(block_end, SECTION_SIZE)) + else if (!IS_ALIGNED(block_end, PMD_SIZE)) memblock_limit = arm_lowmem_limit; } @@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void) high_memory = __va(arm_lowmem_limit - 1) + 1; /* - * Round the memblock limit down to a section size. This + * Round the memblock limit down to a pmd size. This * helps to ensure that we will allocate memory from the - * last full section, which should be mapped. + * last full pmd, which should be mapped. */ if (memblock_limit) - memblock_limit = round_down(memblock_limit, SECTION_SIZE); + memblock_limit = round_down(memblock_limit, PMD_SIZE); if (!memblock_limit) memblock_limit = arm_lowmem_limit; diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts index 43d54017b779..d0ab012fa379 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts @@ -16,7 +16,8 @@ #include "mt8173.dtsi" / { - model = "mediatek,mt8173-evb"; + model = "MediaTek MT8173 evaluation board"; + compatible = "mediatek,mt8173-evb", "mediatek,mt8173"; aliases { serial0 = &uart0; diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h index 4e8ad0523118..6abebe82d4e9 100644 --- a/arch/blackfin/include/asm/io.h +++ b/arch/blackfin/include/asm/io.h @@ -10,6 +10,7 @@ #include #include #include +#include #define __raw_readb bfin_read8 #define __raw_readw bfin_read16 diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 15051e9c2c6f..b054c5c6e713 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -127,7 +127,7 @@ int smp_num_siblings = 1; volatile int ia64_cpu_to_sapicid[NR_CPUS]; EXPORT_SYMBOL(ia64_cpu_to_sapicid); -static volatile cpumask_t cpu_callin_map; +static cpumask_t cpu_callin_map; struct smp_boot_data smp_boot_data __initdata; @@ -477,6 +477,7 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) for (timeout = 0; timeout < 100000; timeout++) { if (cpumask_test_cpu(cpu, &cpu_callin_map)) break; /* It has booted */ + barrier(); /* Make sure we re-read cpu_callin_map */ udelay(100); } Dprintk("\n"); diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index d4e162d35b34..7cc3be9fa7c6 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -478,9 +478,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { - struct pci_controller *controller = bridge->bus->sysdata; - - ACPI_COMPANION_SET(&bridge->dev, controller->companion); + /* + * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL + * here, pci_create_root_bus() has been called by someone else and + * sysdata is likely to be different from what we expect. Let it go in + * that case. + */ + if (!bridge->dev.parent) { + struct pci_controller *controller = bridge->bus->sysdata; + ACPI_COMPANION_SET(&bridge->dev, controller->companion); + } return 0; } diff --git a/arch/lib/defconfig b/arch/lib/defconfig index b0ad77cc9c32..70089fcad786 100644 --- a/arch/lib/defconfig +++ b/arch/lib/defconfig @@ -70,9 +70,9 @@ CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y CONFIG_SYN_COOKIES=y # CONFIG_NET_IPVTI is not set -CONFIG_NET_UDP_TUNNEL=m -# CONFIG_NET_FOU is not set -# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_NET_UDP_TUNNEL=y +CONFIG_NET_FOU=y +CONFIG_NET_FOU_IP_TUNNELS=y # CONFIG_GENEVE is not set CONFIG_INET_AH=y CONFIG_INET_ESP=y @@ -363,8 +363,10 @@ CONFIG_ATM_LANE=m CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m CONFIG_ATM_BR2684_IPFILTER=y -CONFIG_L2TP=m -# CONFIG_L2TP_V3 is not set +CONFIG_L2TP=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y CONFIG_STP=m CONFIG_GARP=m CONFIG_BRIDGE=m diff --git a/arch/lib/lib-socket.c b/arch/lib/lib-socket.c index d9be5fc2af41..75760c774288 100644 --- a/arch/lib/lib-socket.c +++ b/arch/lib/lib-socket.c @@ -242,46 +242,6 @@ int lib_sock_getsockopt(struct SimSocket *socket, int level, int optname, return err; } -int lib_sock_canrecv(struct SimSocket *socket) -{ - struct socket *sock = (struct socket *)socket; - struct inet_connection_sock *icsk; - - switch (sock->sk->sk_state) { - case TCP_CLOSE: - if (SOCK_STREAM == sock->sk->sk_type) - return 1; - case TCP_ESTABLISHED: - return sock->sk->sk_receive_queue.qlen > 0; - case TCP_SYN_SENT: - case TCP_SYN_RECV: - case TCP_LAST_ACK: - case TCP_CLOSING: - return 0; - case TCP_FIN_WAIT1: - case TCP_FIN_WAIT2: - case TCP_TIME_WAIT: - case TCP_CLOSE_WAIT: - return 1; - case TCP_LISTEN: - { - icsk = inet_csk(sock->sk); - return !reqsk_queue_empty(&icsk->icsk_accept_queue); - } - - default: - break; - } - - return 0; -} -int lib_sock_cansend(struct SimSocket *socket) -{ - struct socket *sock = (struct socket *)socket; - - return sock_writeable(sock->sk); -} - /** * Struct used to pass pool table context between DCE and Kernel and back from * Kernel to DCE diff --git a/arch/lib/time.c b/arch/lib/time.c index b54be754f532..7129660e66b2 100644 --- a/arch/lib/time.c +++ b/arch/lib/time.c @@ -71,21 +71,23 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs) /* * Divide a ktime value by a nanosecond value */ -u64 __ktime_divns(const ktime_t kt, s64 div) +s64 __ktime_divns(const ktime_t kt, s64 div) { - u64 dclc; int sft = 0; + s64 dclc; + u64 tmp; dclc = ktime_to_ns(kt); + tmp = dclc < 0 ? -dclc : dclc; + /* Make sure the divisor is less than 2^32: */ while (div >> 32) { sft++; div >>= 1; } - dclc >>= sft; - do_div(dclc, (unsigned long)div); - - return dclc; + tmp >>= sft; + do_div(tmp, (unsigned long) div); + return dclc < 0 ? -tmp : tmp; } #endif /* BITS_PER_LONG >= 64 */ diff --git a/arch/lib/timer.c b/arch/lib/timer.c index 87d2283a4e01..94b3f5cf6d2f 100644 --- a/arch/lib/timer.c +++ b/arch/lib/timer.c @@ -141,8 +141,8 @@ int del_timer(struct timer_list *timer) retval = 0; if (timer->entry.prev != LIST_POISON2) { list_del(&timer->entry); - timer->entry.next = NULL; } + timer->entry.next = NULL; return retval; } diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c index e1fe63051136..597899ad5438 100644 --- a/arch/mips/ath79/prom.c +++ b/arch/mips/ath79/prom.c @@ -1,6 +1,7 @@ /* * Atheros AR71XX/AR724X/AR913X specific prom routines * + * Copyright (C) 2015 Laurent Fasnacht * Copyright (C) 2008-2010 Gabor Juhos * Copyright (C) 2008 Imre Kaloz * @@ -25,12 +26,14 @@ void __init prom_init(void) { fw_init_cmdline(); +#ifdef CONFIG_BLK_DEV_INITRD /* Read the initrd address from the firmware environment */ initrd_start = fw_getenvl("initrd_start"); if (initrd_start) { initrd_start = KSEG0ADDR(initrd_start); initrd_end = initrd_start + fw_getenvl("initrd_size"); } +#endif } void __init prom_free_prom_memory(void) diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index a73c93c3d44a..7fc8397d16f2 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -225,7 +225,7 @@ void __init plat_time_init(void) ddr_clk_rate = ath79_get_sys_clk_rate("ddr"); ref_clk_rate = ath79_get_sys_clk_rate("ref"); - pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz", + pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz\n", cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000, ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000, ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000, diff --git a/arch/mips/cobalt/Makefile b/arch/mips/cobalt/Makefile index 558e94977942..68f0c5871adc 100644 --- a/arch/mips/cobalt/Makefile +++ b/arch/mips/cobalt/Makefile @@ -2,7 +2,6 @@ # Makefile for the Cobalt micro systems family specific parts of the kernel # -obj-y := buttons.o irq.o lcd.o led.o reset.o rtc.o serial.o setup.o time.o +obj-y := buttons.o irq.o lcd.o led.o mtd.o reset.o rtc.o serial.o setup.o time.o obj-$(CONFIG_PCI) += pci.o -obj-$(CONFIG_MTD_PHYSMAP) += mtd.o diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig index 002680648dcb..b2a577ebce0b 100644 --- a/arch/mips/configs/fuloong2e_defconfig +++ b/arch/mips/configs/fuloong2e_defconfig @@ -194,7 +194,7 @@ CONFIG_USB_WUSB_CBAF=m CONFIG_USB_C67X00_HCD=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_ISP1760_HCD=m +CONFIG_USB_ISP1760=m CONFIG_USB_OHCI_HCD=y CONFIG_USB_UHCI_HCD=m CONFIG_USB_R8A66597_HCD=m diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index 18ae5ddef118..c28a8499aec7 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -113,7 +113,7 @@ #define _PAGE_PRESENT_SHIFT 0 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) /* R2 or later cores check for RI/XI support to determine _PAGE_READ */ -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1) #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) #else @@ -135,16 +135,16 @@ #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) /* Only R2 or newer cores have the XI bit */ -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1) #else #define _PAGE_GLOBAL_SHIFT (_PAGE_SPLITTING_SHIFT + 1) #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ #endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */ -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) /* XI - page cannot be executed */ #ifndef _PAGE_NO_EXEC_SHIFT #define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1) @@ -160,10 +160,10 @@ #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) -#else /* !CONFIG_CPU_MIPSR2 */ +#else /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */ #define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1) #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) @@ -205,7 +205,7 @@ */ static inline uint64_t pte_to_entrylo(unsigned long pte_val) { -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) if (cpu_has_rixi) { int sa; #ifdef CONFIG_32BIT diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index e92d6c4b5ed1..7163cd7fdd69 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -104,7 +104,6 @@ do { \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \ __fpsave = FP_SAVE_VECTOR; \ (last) = resume(prev, next, task_thread_info(next), __fpsave); \ - disable_msa(); \ } while (0) #define finish_arch_switch(prev) \ @@ -122,6 +121,7 @@ do { \ if (cpu_has_userlocal) \ write_c0_userlocal(current_thread_info()->tp_value); \ __restore_watch(); \ + disable_msa(); \ } while (0) #endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index e36515dcd3b2..209e5b76c1bc 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -74,13 +74,12 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c) { unsigned long sr, mask, fcsr, fcsr0, fcsr1; + fcsr = c->fpu_csr31; mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM; sr = read_c0_status(); __enable_fpu(FPU_AS_IS); - fcsr = read_32bit_cp1_register(CP1_STATUS); - fcsr0 = fcsr & mask; write_32bit_cp1_register(CP1_STATUS, fcsr0); fcsr0 = read_32bit_cp1_register(CP1_STATUS); diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index d2bfbc2e8995..3c8a18a00a65 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -29,7 +29,7 @@ int kgdb_early_setup; #endif -static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; +static DECLARE_BITMAP(irq_map, NR_IRQS); int allocate_irqno(void) { @@ -109,7 +109,7 @@ void __init init_IRQ(void) #endif } -#ifdef DEBUG_STACKOVERFLOW +#ifdef CONFIG_DEBUG_STACKOVERFLOW static inline void check_stack_overflow(void) { unsigned long sp; diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index fd528d7ea278..336708ae5c5b 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -444,7 +444,7 @@ struct plat_smp_ops bmips5000_smp_ops = { static void bmips_wr_vec(unsigned long dst, char *start, char *end) { memcpy((void *)dst, start, end - start); - dma_cache_wback((unsigned long)start, end - start); + dma_cache_wback(dst, end - start); local_flush_icache_range(dst, dst + (end - start)); instruction_hazard(); } diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 4b50c5787e25..d5fa3eaf39a1 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -2409,7 +2409,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, if (vcpu->mmio_needed == 2) *gpr = *(int16_t *) run->mmio.data; else - *gpr = *(int16_t *) run->mmio.data; + *gpr = *(uint16_t *)run->mmio.data; break; case 1: diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index 7d12c0dded3d..77e64942f004 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S @@ -34,7 +34,12 @@ LEAF(__strnlen_\func\()_asm) FEXPORT(__strnlen_\func\()_nocheck_asm) move v0, a0 PTR_ADDU a1, a0 # stop pointer -1: beq v0, a1, 1f # limit reached? +1: +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS + .set noat + li AT, 1 +#endif + beq v0, a1, 1f # limit reached? .ifeqs "\func", "kernel" EX(lb, t0, (v0), .Lfault\@) .else @@ -42,7 +47,13 @@ FEXPORT(__strnlen_\func\()_nocheck_asm) .endif .set noreorder bnez t0, 1b -1: PTR_ADDIU v0, 1 +1: +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS + PTR_ADDIU v0, 1 +#else + PTR_ADDU v0, AT + .set at +#endif .set reorder PTR_SUBU v0, a0 jr ra diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile index e70c33fdb881..f2e8153e44f5 100644 --- a/arch/mips/loongson/common/Makefile +++ b/arch/mips/loongson/common/Makefile @@ -3,15 +3,13 @@ # obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \ - bonito-irq.o mem.o machtype.o platform.o + bonito-irq.o mem.o machtype.o platform.o serial.o obj-$(CONFIG_PCI) += pci.o # # Serial port support # obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -loongson-serial-$(CONFIG_SERIAL_8250) := serial.o -obj-y += $(loongson-serial-m) $(loongson-serial-y) obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o obj-$(CONFIG_LOONGSON_MC146818) += rtc.o diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c index e3c68b5da18d..509877c6e9d9 100644 --- a/arch/mips/loongson/loongson-3/smp.c +++ b/arch/mips/loongson/loongson-3/smp.c @@ -272,7 +272,7 @@ void loongson3_ipi_interrupt(struct pt_regs *regs) if (action & SMP_ASK_C0COUNT) { BUG_ON(cpu != 0); c0count = read_c0_count(); - for (i = 1; i < loongson_sysconf.nr_cpus; i++) + for (i = 1; i < num_possible_cpus(); i++) per_cpu(core0_c0count, i) = c0count; } } diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 0dbb65a51ce5..2e03ab173591 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1372,7 +1372,7 @@ static int probe_scache(void) scache_size = addr; c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); c->scache.ways = 1; - c->dcache.waybit = 0; /* does not matter */ + c->scache.waybit = 0; /* does not matter */ return 1; } diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 5d6139390bf8..e23fdf2a9c80 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -681,11 +681,7 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx) sp_off += config_enabled(CONFIG_64BIT) ? (ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE; - /* - * Subtract the bytes for the last registers since we only care about - * the location on the stack pointer. - */ - return sp_off - RSIZE; + return sp_off; } static void build_prologue(struct jit_ctx *ctx) diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c index e20b02e3ae28..e10d10b9e82a 100644 --- a/arch/mips/ralink/ill_acc.c +++ b/arch/mips/ralink/ill_acc.c @@ -41,7 +41,7 @@ static irqreturn_t ill_acc_irq_handler(int irq, void *_priv) addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M, type & ILL_ACC_LEN_M); - rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE); + rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE); return IRQ_HANDLED; } diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h index ba8593a515ba..f6498eec9ee1 100644 --- a/arch/s390/net/bpf_jit.h +++ b/arch/s390/net/bpf_jit.h @@ -28,6 +28,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * | old backchain | | * +---------------+ | * | r15 - r6 | | + * +---------------+ | + * | 4 byte align | | + * | tail_call_cnt | | * BFP -> +===============+ | * | | | * | BPF stack | | @@ -46,12 +49,17 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * R15 -> +---------------+ + low * * We get 160 bytes stack space from calling function, but only use - * 11 * 8 byte (old backchain + r15 - r6) for storing registers. + * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt. */ -#define STK_OFF (MAX_BPF_STACK + 8 + 4 + 4 + (160 - 11 * 8)) +#define STK_SPACE (MAX_BPF_STACK + 8 + 4 + 4 + 160) +#define STK_160_UNUSED (160 - 12 * 8) +#define STK_OFF (STK_SPACE - STK_160_UNUSED) #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ +#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ +#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ + /* Offset to skip condition code check */ #define OFF_OK 4 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 20c146d1251a..d3766dd67e23 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include "bpf_jit.h" @@ -40,6 +41,8 @@ struct bpf_jit { int base_ip; /* Base address for literal pool */ int ret0_ip; /* Address of return 0 */ int exit_ip; /* Address of exit */ + int tail_call_start; /* Tail call start offset */ + int labels[1]; /* Labels for local jumps */ }; #define BPF_SIZE_MAX 4096 /* Max size for program */ @@ -49,6 +52,7 @@ struct bpf_jit { #define SEEN_RET0 4 /* ret0_ip points to a valid return 0 */ #define SEEN_LITERAL 8 /* code uses literals */ #define SEEN_FUNC 16 /* calls C functions */ +#define SEEN_TAIL_CALL 32 /* code uses tail calls */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) /* @@ -60,6 +64,7 @@ struct bpf_jit { #define REG_L (__MAX_BPF_REG+3) /* Literal pool register */ #define REG_15 (__MAX_BPF_REG+4) /* Register 15 */ #define REG_0 REG_W0 /* Register 0 */ +#define REG_1 REG_W1 /* Register 1 */ #define REG_2 BPF_REG_1 /* Register 2 */ #define REG_14 BPF_REG_0 /* Register 14 */ @@ -223,6 +228,24 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) REG_SET_SEEN(b3); \ }) +#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \ +({ \ + int rel = (jit->labels[label] - jit->prg) >> 1; \ + _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), \ + op2 | mask << 12); \ + REG_SET_SEEN(b1); \ + REG_SET_SEEN(b2); \ +}) + +#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \ +({ \ + int rel = (jit->labels[label] - jit->prg) >> 1; \ + _EMIT6(op1 | (reg_high(b1) | mask) << 16 | \ + (rel & 0xffff), op2 | (imm & 0xff) << 8); \ + REG_SET_SEEN(b1); \ + BUILD_BUG_ON(((unsigned long) imm) > 0xff); \ +}) + #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ ({ \ /* Branch instruction needs 6 bytes */ \ @@ -286,7 +309,7 @@ static void jit_fill_hole(void *area, unsigned int size) */ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re) { - u32 off = 72 + (rs - 6) * 8; + u32 off = STK_OFF_R6 + (rs - 6) * 8; if (rs == re) /* stg %rs,off(%r15) */ @@ -301,7 +324,7 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re) */ static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re) { - u32 off = 72 + (rs - 6) * 8; + u32 off = STK_OFF_R6 + (rs - 6) * 8; if (jit->seen & SEEN_STACK) off += STK_OFF; @@ -374,6 +397,16 @@ static void save_restore_regs(struct bpf_jit *jit, int op) */ static void bpf_jit_prologue(struct bpf_jit *jit) { + if (jit->seen & SEEN_TAIL_CALL) { + /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */ + _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT); + } else { + /* j tail_call_start: NOP if no tail calls are used */ + EMIT4_PCREL(0xa7f40000, 6); + _EMIT2(0); + } + /* Tail calls have to skip above initialization */ + jit->tail_call_start = jit->prg; /* Save registers */ save_restore_regs(jit, REGS_SAVE); /* Setup literal pool */ @@ -384,13 +417,16 @@ static void bpf_jit_prologue(struct bpf_jit *jit) } /* Setup stack and backchain */ if (jit->seen & SEEN_STACK) { - /* lgr %bfp,%r15 (BPF frame pointer) */ - EMIT4(0xb9040000, BPF_REG_FP, REG_15); + if (jit->seen & SEEN_FUNC) + /* lgr %w1,%r15 (backchain) */ + EMIT4(0xb9040000, REG_W1, REG_15); + /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */ + EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED); /* aghi %r15,-STK_OFF */ EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF); if (jit->seen & SEEN_FUNC) - /* stg %bfp,152(%r15) (backchain) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0, + /* stg %w1,152(%r15) (backchain) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 152); } /* @@ -948,6 +984,75 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb9040000, BPF_REG_0, REG_2); break; } + case BPF_JMP | BPF_CALL | BPF_X: + /* + * Implicit input: + * B1: pointer to ctx + * B2: pointer to bpf_array + * B3: index in bpf_array + */ + jit->seen |= SEEN_TAIL_CALL; + + /* + * if (index >= array->map.max_entries) + * goto out; + */ + + /* llgf %w1,map.max_entries(%b2) */ + EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, + offsetof(struct bpf_array, map.max_entries)); + /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ + EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, + REG_W1, 0, 0xa); + + /* + * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT) + * goto out; + */ + + if (jit->seen & SEEN_STACK) + off = STK_OFF_TCCNT + STK_OFF; + else + off = STK_OFF_TCCNT; + /* lhi %w0,1 */ + EMIT4_IMM(0xa7080000, REG_W0, 1); + /* laal %w1,%w0,off(%r15) */ + EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off); + /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */ + EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1, + MAX_TAIL_CALL_CNT, 0, 0x2); + + /* + * prog = array->prog[index]; + * if (prog == NULL) + * goto out; + */ + + /* sllg %r1,%b3,3: %r1 = index * 8 */ + EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); + /* lg %r1,prog(%b2,%r1) */ + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, + REG_1, offsetof(struct bpf_array, prog)); + /* clgij %r1,0,0x8,label0 */ + EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8); + + /* + * Restore registers before calling function + */ + save_restore_regs(jit, REGS_RESTORE); + + /* + * goto *(prog->bpf_func + tail_call_start); + */ + + /* lg %r1,bpf_func(%r1) */ + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0, + offsetof(struct bpf_prog, bpf_func)); + /* bc 0xf,tail_call_start(%r1) */ + _EMIT4(0x47f01000 + jit->tail_call_start); + /* out: */ + jit->labels[0] = jit->prg; + break; case BPF_JMP | BPF_EXIT: /* return b0 */ last = (i == fp->len - 1) ? 1 : 0; if (last && !(jit->seen & SEEN_RET0)) diff --git a/arch/score/lib/string.S b/arch/score/lib/string.S index 00b7d3a2fc60..16efa3ad037f 100644 --- a/arch/score/lib/string.S +++ b/arch/score/lib/string.S @@ -175,10 +175,10 @@ ENTRY(__clear_user) br r3 .section .fixup, "ax" +99: br r3 .previous .section __ex_table, "a" .align 2 -99: .word 0b, 99b .previous diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index a6e424d185d0..a6cfdabb6054 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h @@ -24,7 +24,8 @@ typedef struct { unsigned int icache_line_size; unsigned int ecache_size; unsigned int ecache_line_size; - int core_id; + unsigned short sock_id; + unsigned short core_id; int proc_id; } cpuinfo_sparc; diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index dc165ebdf05a..2a52c91d2c8a 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -308,12 +308,26 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) " sllx %1, 32, %1\n" " or %0, %1, %0\n" " .previous\n" + " .section .sun_m7_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%4), %1\n" + " sethi %%hi(%4), %0\n" + " .word 662b\n" + " or %1, %%ulo(%4), %1\n" + " or %0, %%lo(%4), %0\n" + " .word 663b\n" + " sllx %1, 32, %1\n" + " or %0, %1, %0\n" + " .previous\n" : "=r" (mask), "=r" (tmp) : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | + _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V), + "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | + _PAGE_CP_4V | _PAGE_E_4V | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); @@ -342,9 +356,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) " andn %0, %4, %0\n" " or %0, %5, %0\n" " .previous\n" + " .section .sun_m7_2insn_patch, \"ax\"\n" + " .word 661b\n" + " andn %0, %6, %0\n" + " or %0, %5, %0\n" + " .previous\n" : "=r" (val) : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), - "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); + "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V), + "i" (_PAGE_CP_4V)); return __pgprot(val); } diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index ed8f071132e4..d1761df5cca6 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h @@ -40,11 +40,12 @@ static inline int pcibus_to_node(struct pci_bus *pbus) #ifdef CONFIG_SMP #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) #define topology_core_id(cpu) (cpu_data(cpu).core_id) -#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) +#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) #endif /* CONFIG_SMP */ extern cpumask_t cpu_core_map[NR_CPUS]; +extern cpumask_t cpu_core_sib_map[NR_CPUS]; static inline const struct cpumask *cpu_coregroup_mask(int cpu) { return &cpu_core_map[cpu]; diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h index 6fd4436d32f0..ec9c04de3664 100644 --- a/arch/sparc/include/asm/trap_block.h +++ b/arch/sparc/include/asm/trap_block.h @@ -79,6 +79,8 @@ struct sun4v_2insn_patch_entry { }; extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, __sun4v_2insn_patch_end; +extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch, + __sun_m7_2insn_patch_end; #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 07cc49e541f4..0f679421b468 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -69,6 +69,8 @@ void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, struct sun4v_1insn_patch_entry *); void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, struct sun4v_2insn_patch_entry *); +void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *, + struct sun4v_2insn_patch_entry *); extern unsigned int dcache_parity_tl1_occurred; extern unsigned int icache_parity_tl1_occurred; diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index 94e392bdee7d..814fb1729b12 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -723,7 +723,6 @@ static int grpci2_of_probe(struct platform_device *ofdev) err = -ENOMEM; goto err1; } - memset(grpci2priv, 0, sizeof(*grpci2priv)); priv->regs = regs; priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT; diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 26c80e18d7b1..6f80936e0eea 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -614,45 +614,68 @@ static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp) } } -static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) +static void find_back_node_value(struct mdesc_handle *hp, u64 node, + char *srch_val, + void (*func)(struct mdesc_handle *, u64, int), + u64 val, int depth) { - u64 a; - - mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { - u64 t = mdesc_arc_target(hp, a); - const char *name; - const u64 *id; + u64 arc; - name = mdesc_node_name(hp, t); - if (!strcmp(name, "cpu")) { - id = mdesc_get_property(hp, t, "id", NULL); - if (*id < NR_CPUS) - cpu_data(*id).core_id = core_id; - } else { - u64 j; + /* Since we have an estimate of recursion depth, do a sanity check. */ + if (depth == 0) + return; - mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { - u64 n = mdesc_arc_target(hp, j); - const char *n_name; + mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) { + u64 n = mdesc_arc_target(hp, arc); + const char *name = mdesc_node_name(hp, n); - n_name = mdesc_node_name(hp, n); - if (strcmp(n_name, "cpu")) - continue; + if (!strcmp(srch_val, name)) + (*func)(hp, n, val); - id = mdesc_get_property(hp, n, "id", NULL); - if (*id < NR_CPUS) - cpu_data(*id).core_id = core_id; - } - } + find_back_node_value(hp, n, srch_val, func, val, depth-1); } } +static void __mark_core_id(struct mdesc_handle *hp, u64 node, + int core_id) +{ + const u64 *id = mdesc_get_property(hp, node, "id", NULL); + + if (*id < num_possible_cpus()) + cpu_data(*id).core_id = core_id; +} + +static void __mark_sock_id(struct mdesc_handle *hp, u64 node, + int sock_id) +{ + const u64 *id = mdesc_get_property(hp, node, "id", NULL); + + if (*id < num_possible_cpus()) + cpu_data(*id).sock_id = sock_id; +} + +static void mark_core_ids(struct mdesc_handle *hp, u64 mp, + int core_id) +{ + find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); +} + +static void mark_sock_ids(struct mdesc_handle *hp, u64 mp, + int sock_id) +{ + find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10); +} + static void set_core_ids(struct mdesc_handle *hp) { int idx; u64 mp; idx = 1; + + /* Identify unique cores by looking for cpus backpointed to by + * level 1 instruction caches. + */ mdesc_for_each_node_by_name(hp, mp, "cache") { const u64 *level; const char *type; @@ -667,11 +690,72 @@ static void set_core_ids(struct mdesc_handle *hp) continue; mark_core_ids(hp, mp, idx); + idx++; + } +} + +static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) +{ + u64 mp; + int idx = 1; + int fnd = 0; + + /* Identify unique sockets by looking for cpus backpointed to by + * shared level n caches. + */ + mdesc_for_each_node_by_name(hp, mp, "cache") { + const u64 *cur_lvl; + + cur_lvl = mdesc_get_property(hp, mp, "level", NULL); + if (*cur_lvl != level) + continue; + + mark_sock_ids(hp, mp, idx); + idx++; + fnd = 1; + } + return fnd; +} + +static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp) +{ + int idx = 1; + mdesc_for_each_node_by_name(hp, mp, "socket") { + u64 a; + + mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { + u64 t = mdesc_arc_target(hp, a); + const char *name; + const u64 *id; + + name = mdesc_node_name(hp, t); + if (strcmp(name, "cpu")) + continue; + + id = mdesc_get_property(hp, t, "id", NULL); + if (*id < num_possible_cpus()) + cpu_data(*id).sock_id = idx; + } idx++; } } +static void set_sock_ids(struct mdesc_handle *hp) +{ + u64 mp; + + /* If machine description exposes sockets data use it. + * Otherwise fallback to use shared L3 or L2 caches. + */ + mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); + if (mp != MDESC_NODE_NULL) + return set_sock_ids_by_socket(hp, mp); + + if (!set_sock_ids_by_cache(hp, 3)) + set_sock_ids_by_cache(hp, 2); +} + static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) { u64 a; @@ -707,7 +791,6 @@ static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) continue; mark_proc_ids(hp, mp, idx); - idx++; } } @@ -900,6 +983,7 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask) set_core_ids(hp); set_proc_ids(hp); + set_sock_ids(hp); mdesc_release(hp); diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 6f7251fd2eab..c928bc64b4ba 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -1002,6 +1002,38 @@ static int __init pcibios_init(void) subsys_initcall(pcibios_init); #ifdef CONFIG_SYSFS + +#define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */ + +static void pcie_bus_slot_names(struct pci_bus *pbus) +{ + struct pci_dev *pdev; + struct pci_bus *bus; + + list_for_each_entry(pdev, &pbus->devices, bus_list) { + char name[SLOT_NAME_SIZE]; + struct pci_slot *pci_slot; + const u32 *slot_num; + int len; + + slot_num = of_get_property(pdev->dev.of_node, + "physical-slot#", &len); + + if (slot_num == NULL || len != 4) + continue; + + snprintf(name, sizeof(name), "%u", slot_num[0]); + pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL); + + if (IS_ERR(pci_slot)) + pr_err("PCI: pci_create_slot returned %ld.\n", + PTR_ERR(pci_slot)); + } + + list_for_each_entry(bus, &pbus->children, node) + pcie_bus_slot_names(bus); +} + static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) { const struct pci_slot_names { @@ -1053,18 +1085,29 @@ static int __init of_pci_slot_init(void) while ((pbus = pci_find_next_bus(pbus)) != NULL) { struct device_node *node; + struct pci_dev *pdev; + + pdev = list_first_entry(&pbus->devices, struct pci_dev, + bus_list); - if (pbus->self) { - /* PCI->PCI bridge */ - node = pbus->self->dev.of_node; + if (pdev && pci_is_pcie(pdev)) { + pcie_bus_slot_names(pbus); } else { - struct pci_pbm_info *pbm = pbus->sysdata; - /* Host PCI controller */ - node = pbm->op->dev.of_node; - } + if (pbus->self) { + + /* PCI->PCI bridge */ + node = pbus->self->dev.of_node; + + } else { + struct pci_pbm_info *pbm = pbus->sysdata; - pci_bus_slot_names(node, pbus); + /* Host PCI controller */ + node = pbm->op->dev.of_node; + } + + pci_bus_slot_names(node, pbus); + } } return 0; diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index c38d19fc27ba..f7b261749383 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -255,6 +255,24 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start, } } +void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start, + struct sun4v_2insn_patch_entry *end) +{ + while (start < end) { + unsigned long addr = start->addr; + + *(unsigned int *) (addr + 0) = start->insns[0]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 0)); + + *(unsigned int *) (addr + 4) = start->insns[1]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 4)); + + start++; + } +} + static void __init sun4v_patch(void) { extern void sun4v_hvapi_init(void); @@ -267,6 +285,9 @@ static void __init sun4v_patch(void) sun4v_patch_2insn_range(&__sun4v_2insn_patch, &__sun4v_2insn_patch_end); + if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7) + sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, + &__sun_m7_2insn_patch_end); sun4v_hvapi_init(); } diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 61139d9924ca..19cd08d18672 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -60,8 +60,12 @@ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; cpumask_t cpu_core_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; +cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { + [0 ... NR_CPUS-1] = CPU_MASK_NONE }; + EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_SYMBOL(cpu_core_map); +EXPORT_SYMBOL(cpu_core_sib_map); static cpumask_t smp_commenced_mask; @@ -1243,6 +1247,15 @@ void smp_fill_in_sib_core_maps(void) } } + for_each_present_cpu(i) { + unsigned int j; + + for_each_present_cpu(j) { + if (cpu_data(i).sock_id == cpu_data(j).sock_id) + cpumask_set_cpu(j, &cpu_core_sib_map[i]); + } + } + for_each_present_cpu(i) { unsigned int j; diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 09243057cb0b..f1a2f688b28a 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -138,6 +138,11 @@ SECTIONS *(.pause_3insn_patch) __pause_3insn_patch_end = .; } + .sun_m7_2insn_patch : { + __sun_m7_2insn_patch = .; + *(.sun_m7_2insn_patch) + __sun_m7_2insn_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) . = ALIGN(PAGE_SIZE); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 4ca0d6ba5ec8..559cb744112c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -54,6 +54,7 @@ #include "init_64.h" unsigned long kern_linear_pte_xor[4] __read_mostly; +static unsigned long page_cache4v_flag; /* A bitmap, two bits for every 256MB of physical memory. These two * bits determine what page size we use for kernel linear @@ -1909,11 +1910,24 @@ static void __init sun4u_linear_pte_xor_finalize(void) static void __init sun4v_linear_pte_xor_finalize(void) { + unsigned long pagecv_flag; + + /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead + * enables MCD error. Do not set bit 9 on M7 processor. + */ + switch (sun4v_chip_type) { + case SUN4V_CHIP_SPARC_M7: + pagecv_flag = 0x00; + break; + default: + pagecv_flag = _PAGE_CV_4V; + break; + } #ifndef CONFIG_DEBUG_PAGEALLOC if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ PAGE_OFFSET; - kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | + kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; @@ -1922,7 +1936,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ PAGE_OFFSET; - kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V | + kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[2] = kern_linear_pte_xor[1]; @@ -1931,7 +1945,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ PAGE_OFFSET; - kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V | + kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[3] = kern_linear_pte_xor[2]; @@ -1958,6 +1972,13 @@ static phys_addr_t __init available_memory(void) return available; } +#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) +#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) +#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) +#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) +#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) +#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) + /* We need to exclude reserved regions. This exclusion will include * vmlinux and initrd. To be more precise the initrd size could be used to * compute a new lower limit because it is freed later during initialization. @@ -2034,6 +2055,25 @@ void __init paging_init(void) memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); #endif + /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde + * bit on M7 processor. This is a conflicting usage of the same + * bit. Enabling TTE.cv on M7 would turn on Memory Corruption + * Detection error on all pages and this will lead to problems + * later. Kernel does not run with MCD enabled and hence rest + * of the required steps to fully configure memory corruption + * detection are not taken. We need to ensure TTE.mcde is not + * set on M7 processor. Compute the value of cacheability + * flag for use later taking this into consideration. + */ + switch (sun4v_chip_type) { + case SUN4V_CHIP_SPARC_M7: + page_cache4v_flag = _PAGE_CP_4V; + break; + default: + page_cache4v_flag = _PAGE_CACHE_4V; + break; + } + if (tlb_type == hypervisor) sun4v_pgprot_init(); else @@ -2274,13 +2314,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif -#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) -#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) -#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) -#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) -#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) -#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) - pgprot_t PAGE_KERNEL __read_mostly; EXPORT_SYMBOL(PAGE_KERNEL); @@ -2312,8 +2345,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, _PAGE_P_4U | _PAGE_W_4U); if (tlb_type == hypervisor) pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | - _PAGE_CP_4V | _PAGE_CV_4V | - _PAGE_P_4V | _PAGE_W_4V); + page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V); pte_base |= _PAGE_PMD_HUGE; @@ -2450,14 +2482,14 @@ static void __init sun4v_pgprot_init(void) int i; PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | - _PAGE_CACHE_4V | _PAGE_P_4V | + page_cache4v_flag | _PAGE_P_4V | __ACCESS_BITS_4V | __DIRTY_BITS_4V | _PAGE_EXEC_4V); PAGE_KERNEL_LOCKED = PAGE_KERNEL; _PAGE_IE = _PAGE_IE_4V; _PAGE_E = _PAGE_E_4V; - _PAGE_CACHE = _PAGE_CACHE_4V; + _PAGE_CACHE = page_cache4v_flag; #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; @@ -2465,8 +2497,8 @@ static void __init sun4v_pgprot_init(void) kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ PAGE_OFFSET; #endif - kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | - _PAGE_P_4V | _PAGE_W_4V); + kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V | + _PAGE_W_4V); for (i = 1; i < 4; i++) kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; @@ -2479,12 +2511,12 @@ static void __init sun4v_pgprot_init(void) _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); - page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; - page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag; + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); - page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_EXEC_4V); - page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_EXEC_4V); page_exec_bit = _PAGE_EXEC_4V; @@ -2542,7 +2574,7 @@ static unsigned long kern_large_tte(unsigned long paddr) _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); if (tlb_type == hypervisor) val = (_PAGE_VALID | _PAGE_SZ4MB_4V | - _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | + page_cache4v_flag | _PAGE_P_4V | _PAGE_EXEC_4V | _PAGE_W_4V); return val | paddr; diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 89dd0d78013a..805d25ca5f1d 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -2,15 +2,14 @@ #define BOOT_COMPRESSED_MISC_H /* - * we have to be careful, because no indirections are allowed here, and - * paravirt_ops is a kind of one. As it will only run in baremetal anyway, - * we just keep it from happening + * Special hack: we have to be careful, because no indirections are allowed here, + * and paravirt_ops is a kind of one. As it will only run in baremetal anyway, + * we just keep it from happening. (This list needs to be extended when new + * paravirt and debugging variants are added.) */ #undef CONFIG_PARAVIRT +#undef CONFIG_PARAVIRT_SPINLOCKS #undef CONFIG_KASAN -#ifdef CONFIG_X86_32 -#define _ASM_X86_DESC_H 1 -#endif #include #include diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 19507ffa5d28..5fabf1362942 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -107,7 +107,7 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) static inline int user_mode(struct pt_regs *regs) { #ifdef CONFIG_X86_32 - return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL; #else return !!(regs->cs & 3); #endif diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 5a9856eb12ba..7d5a1929d76b 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -231,11 +231,21 @@ #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8) #ifdef __KERNEL__ + +/* + * early_idt_handler_array is an array of entry points referenced in the + * early IDT. For simplicity, it's a real array with one entry point + * every nine bytes. That leaves room for an optional 'push $0' if the + * vector has no error code (two bytes), a 'push $vector_number' (two + * bytes), and a jump to the common entry code (up to five bytes). + */ +#define EARLY_IDT_HANDLER_SIZE 9 + #ifndef __ASSEMBLY__ -extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; +extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; #ifdef CONFIG_TRACING -# define trace_early_idt_handlers early_idt_handlers +# define trace_early_idt_handler_array early_idt_handler_array #endif /* diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index c469490db4a8..3c6bb342a48f 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -140,6 +140,7 @@ #define MSR_CORE_C3_RESIDENCY 0x000003fc #define MSR_CORE_C6_RESIDENCY 0x000003fd #define MSR_CORE_C7_RESIDENCY 0x000003fe +#define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff #define MSR_PKG_C2_RESIDENCY 0x0000060d #define MSR_PKG_C8_RESIDENCY 0x00000630 #define MSR_PKG_C9_RESIDENCY 0x00000631 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index e535533d5ab8..20190bdac9d5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -708,6 +708,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, struct pt_regs *regs) { int i, ret = 0; + char *tmp; for (i = 0; i < mca_cfg.banks; i++) { m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); @@ -716,9 +717,11 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, if (quirk_no_way_out) quirk_no_way_out(i, m, regs); } - if (mce_severity(m, mca_cfg.tolerant, msg, true) >= - MCE_PANIC_SEVERITY) + + if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { + *msg = tmp; ret = 1; + } } return ret; } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 87848ebe2bb7..4f7001f28936 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -190,6 +190,7 @@ static bool check_hw_exists(void) u64 val, val_fail, val_new= ~0; int i, reg, reg_fail, ret = 0; int bios_fail = 0; + int reg_safe = -1; /* * Check to see if the BIOS enabled any of the counters, if so @@ -204,6 +205,8 @@ static bool check_hw_exists(void) bios_fail = 1; val_fail = val; reg_fail = reg; + } else { + reg_safe = i; } } @@ -221,12 +224,23 @@ static bool check_hw_exists(void) } } + /* + * If all the counters are enabled, the below test will always + * fail. The tools will also become useless in this scenario. + * Just fail and disable the hardware counters. + */ + + if (reg_safe == -1) { + reg = reg_safe; + goto msr_fail; + } + /* * Read the current value, change it and read it back to see if it * matches, this is needed to detect certain hardware emulators * (qemu/kvm) that don't trap on the MSR access and always return 0s. */ - reg = x86_pmu_event_addr(0); + reg = x86_pmu_event_addr(reg_safe); if (rdmsrl_safe(reg, &val)) goto msr_fail; val ^= 0xffffUL; @@ -611,6 +625,7 @@ struct sched_state { int event; /* event index */ int counter; /* counter index */ int unassigned; /* number of events to be assigned left */ + int nr_gp; /* number of GP counters used */ unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; }; @@ -620,27 +635,29 @@ struct sched_state { struct perf_sched { int max_weight; int max_events; - struct perf_event **events; - struct sched_state state; + int max_gp; int saved_states; + struct event_constraint **constraints; + struct sched_state state; struct sched_state saved[SCHED_STATES_MAX]; }; /* * Initialize interator that runs through all events and counters. */ -static void perf_sched_init(struct perf_sched *sched, struct perf_event **events, - int num, int wmin, int wmax) +static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, + int num, int wmin, int wmax, int gpmax) { int idx; memset(sched, 0, sizeof(*sched)); sched->max_events = num; sched->max_weight = wmax; - sched->events = events; + sched->max_gp = gpmax; + sched->constraints = constraints; for (idx = 0; idx < num; idx++) { - if (events[idx]->hw.constraint->weight == wmin) + if (constraints[idx]->weight == wmin) break; } @@ -687,7 +704,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) if (sched->state.event >= sched->max_events) return false; - c = sched->events[sched->state.event]->hw.constraint; + c = sched->constraints[sched->state.event]; /* Prefer fixed purpose counters */ if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { idx = INTEL_PMC_IDX_FIXED; @@ -696,11 +713,16 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) goto done; } } + /* Grab the first unused counter starting with idx */ idx = sched->state.counter; for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { - if (!__test_and_set_bit(idx, sched->state.used)) + if (!__test_and_set_bit(idx, sched->state.used)) { + if (sched->state.nr_gp++ >= sched->max_gp) + return false; + goto done; + } } return false; @@ -745,7 +767,7 @@ static bool perf_sched_next_event(struct perf_sched *sched) if (sched->state.weight > sched->max_weight) return false; } - c = sched->events[sched->state.event]->hw.constraint; + c = sched->constraints[sched->state.event]; } while (c->weight != sched->state.weight); sched->state.counter = 0; /* start with first counter */ @@ -756,12 +778,12 @@ static bool perf_sched_next_event(struct perf_sched *sched) /* * Assign a counter for each event. */ -int perf_assign_events(struct perf_event **events, int n, - int wmin, int wmax, int *assign) +int perf_assign_events(struct event_constraint **constraints, int n, + int wmin, int wmax, int gpmax, int *assign) { struct perf_sched sched; - perf_sched_init(&sched, events, n, wmin, wmax); + perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax); do { if (!perf_sched_find_counter(&sched)) @@ -788,9 +810,9 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) x86_pmu.start_scheduling(cpuc); for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { - hwc = &cpuc->event_list[i]->hw; + cpuc->event_constraint[i] = NULL; c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); - hwc->constraint = c; + cpuc->event_constraint[i] = c; wmin = min(wmin, c->weight); wmax = max(wmax, c->weight); @@ -801,7 +823,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) */ for (i = 0; i < n; i++) { hwc = &cpuc->event_list[i]->hw; - c = hwc->constraint; + c = cpuc->event_constraint[i]; /* never assigned */ if (hwc->idx == -1) @@ -821,9 +843,26 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) } /* slow path */ - if (i != n) - unsched = perf_assign_events(cpuc->event_list, n, wmin, - wmax, assign); + if (i != n) { + int gpmax = x86_pmu.num_counters; + + /* + * Do not allow scheduling of more than half the available + * generic counters. + * + * This helps avoid counter starvation of sibling thread by + * ensuring at most half the counters cannot be in exclusive + * mode. There is no designated counters for the limits. Any + * N/2 counters can be used. This helps with events with + * specific counter constraints. + */ + if (is_ht_workaround_enabled() && !cpuc->is_fake && + READ_ONCE(cpuc->excl_cntrs->exclusive_present)) + gpmax /= 2; + + unsched = perf_assign_events(cpuc->event_constraint, n, wmin, + wmax, gpmax, assign); + } /* * In case of success (unsched = 0), mark events as committed, @@ -840,7 +879,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) e = cpuc->event_list[i]; e->hw.flags |= PERF_X86_EVENT_COMMITTED; if (x86_pmu.commit_scheduling) - x86_pmu.commit_scheduling(cpuc, e, assign[i]); + x86_pmu.commit_scheduling(cpuc, i, assign[i]); } } @@ -1292,8 +1331,10 @@ static void x86_pmu_del(struct perf_event *event, int flags) x86_pmu.put_event_constraints(cpuc, event); /* Delete the array entry. */ - while (++i < cpuc->n_events) + while (++i < cpuc->n_events) { cpuc->event_list[i-1] = cpuc->event_list[i]; + cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; + } --cpuc->n_events; perf_event_update_userpage(event); diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 6ac5cb7a9e14..ef78516850fb 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -74,6 +74,7 @@ struct event_constraint { #define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */ #define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */ #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ +#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */ struct amd_nb { @@ -134,8 +135,6 @@ enum intel_excl_state_type { struct intel_excl_states { enum intel_excl_state_type init_state[X86_PMC_IDX_MAX]; enum intel_excl_state_type state[X86_PMC_IDX_MAX]; - int num_alloc_cntrs;/* #counters allocated */ - int max_alloc_cntrs;/* max #counters allowed */ bool sched_started; /* true if scheduling has started */ }; @@ -144,6 +143,11 @@ struct intel_excl_cntrs { struct intel_excl_states states[2]; + union { + u16 has_exclusive[2]; + u32 exclusive_present; + }; + int refcnt; /* per-core: #HT threads */ unsigned core_id; /* per-core: core id */ }; @@ -172,7 +176,11 @@ struct cpu_hw_events { added in the current transaction */ int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ u64 tags[X86_PMC_IDX_MAX]; + struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ + struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; + + int n_excl; /* the number of exclusive events */ unsigned int group_flag; int is_fake; @@ -519,9 +527,7 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); - void (*commit_scheduling)(struct cpu_hw_events *cpuc, - struct perf_event *event, - int cntr); + void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); void (*start_scheduling)(struct cpu_hw_events *cpuc); @@ -717,8 +723,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, void x86_pmu_enable_all(int added); -int perf_assign_events(struct perf_event **events, int n, - int wmin, int wmax, int *assign); +int perf_assign_events(struct event_constraint **constraints, int n, + int wmin, int wmax, int gpmax, int *assign); int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); void x86_pmu_stop(struct perf_event *event, int flags); @@ -929,4 +935,8 @@ static inline struct intel_shared_regs *allocate_shared_regs(int cpu) return NULL; } +static inline int is_ht_workaround_enabled(void) +{ + return 0; +} #endif /* CONFIG_CPU_SUP_INTEL */ diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3998131d1a68..a1e35c9f06b9 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1923,7 +1923,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc) xl = &excl_cntrs->states[tid]; xl->sched_started = true; - xl->num_alloc_cntrs = 0; /* * lock shared state until we are done scheduling * in stop_event_scheduling() @@ -2000,6 +1999,11 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, * across HT threads */ is_excl = c->flags & PERF_X86_EVENT_EXCL; + if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { + event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; + if (!cpuc->n_excl++) + WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1); + } /* * xl = state of current HT @@ -2008,18 +2012,6 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, xl = &excl_cntrs->states[tid]; xlo = &excl_cntrs->states[o_tid]; - /* - * do not allow scheduling of more than max_alloc_cntrs - * which is set to half the available generic counters. - * this helps avoid counter starvation of sibling thread - * by ensuring at most half the counters cannot be in - * exclusive mode. There is not designated counters for the - * limits. Any N/2 counters can be used. This helps with - * events with specifix counter constraints - */ - if (xl->num_alloc_cntrs++ == xl->max_alloc_cntrs) - return &emptyconstraint; - cx = c; /* @@ -2106,7 +2098,7 @@ static struct event_constraint * intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) { - struct event_constraint *c1 = event->hw.constraint; + struct event_constraint *c1 = cpuc->event_constraint[idx]; struct event_constraint *c2; /* @@ -2150,6 +2142,11 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, xl = &excl_cntrs->states[tid]; xlo = &excl_cntrs->states[o_tid]; + if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) { + hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT; + if (!--cpuc->n_excl) + WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0); + } /* * put_constraint may be called from x86_schedule_events() @@ -2188,8 +2185,6 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, static void intel_put_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { - struct event_constraint *c = event->hw.constraint; - intel_put_shared_regs_event_constraints(cpuc, event); /* @@ -2197,19 +2192,14 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc, * all events are subject to and must call the * put_excl_constraints() routine */ - if (c && cpuc->excl_cntrs) + if (cpuc->excl_cntrs) intel_put_excl_constraints(cpuc, event); - - /* cleanup dynamic constraint */ - if (c && (c->flags & PERF_X86_EVENT_DYNAMIC)) - event->hw.constraint = NULL; } -static void intel_commit_scheduling(struct cpu_hw_events *cpuc, - struct perf_event *event, int cntr) +static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) { struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; - struct event_constraint *c = event->hw.constraint; + struct event_constraint *c = cpuc->event_constraint[idx]; struct intel_excl_states *xlo, *xl; int tid = cpuc->excl_thread_id; int o_tid = 1 - tid; @@ -2639,8 +2629,6 @@ static void intel_pmu_cpu_starting(int cpu) cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { - int h = x86_pmu.num_counters >> 1; - for_each_cpu(i, topology_thread_cpumask(cpu)) { struct intel_excl_cntrs *c; @@ -2654,11 +2642,6 @@ static void intel_pmu_cpu_starting(int cpu) } cpuc->excl_cntrs->core_id = core_id; cpuc->excl_cntrs->refcnt++; - /* - * set hard limit to half the number of generic counters - */ - cpuc->excl_cntrs->states[0].max_alloc_cntrs = h; - cpuc->excl_cntrs->states[1].max_alloc_cntrs = h; } } diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 813f75d71175..7f73b3553e2e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -706,9 +706,9 @@ void intel_pmu_pebs_disable(struct perf_event *event) cpuc->pebs_enabled &= ~(1ULL << hwc->idx); - if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_LDLAT) + if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); - else if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_ST) + else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) cpuc->pebs_enabled &= ~(1ULL << 63); if (cpuc->enabled) diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c index ffe666c2c6b5..123ff1bb2f60 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_pt.c +++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c @@ -151,7 +151,7 @@ static int __init pt_pmu_hw_init(void) de_attr->attr.attr.name = pt_caps[i].name; - sysfs_attr_init(&de_attrs->attr.attr); + sysfs_attr_init(&de_attr->attr.attr); de_attr->attr.attr.mode = S_IRUGO; de_attr->attr.show = pt_cap_show; @@ -615,7 +615,8 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, struct perf_output_handle *handle) { - unsigned long idx, npages, end; + unsigned long head = local64_read(&buf->head); + unsigned long idx, npages, wakeup; if (buf->snapshot) return 0; @@ -634,17 +635,26 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, buf->topa_index[buf->stop_pos]->stop = 0; buf->topa_index[buf->intr_pos]->intr = 0; - if (pt_cap_get(PT_CAP_topa_multiple_entries)) { - npages = (handle->size + 1) >> PAGE_SHIFT; - end = (local64_read(&buf->head) >> PAGE_SHIFT) + npages; - /*if (end > handle->wakeup >> PAGE_SHIFT) - end = handle->wakeup >> PAGE_SHIFT;*/ - idx = end & (buf->nr_pages - 1); - buf->stop_pos = idx; - idx = (local64_read(&buf->head) >> PAGE_SHIFT) + npages - 1; - idx &= buf->nr_pages - 1; - buf->intr_pos = idx; - } + /* how many pages till the STOP marker */ + npages = handle->size >> PAGE_SHIFT; + + /* if it's on a page boundary, fill up one more page */ + if (!offset_in_page(head + handle->size + 1)) + npages++; + + idx = (head >> PAGE_SHIFT) + npages; + idx &= buf->nr_pages - 1; + buf->stop_pos = idx; + + wakeup = handle->wakeup >> PAGE_SHIFT; + + /* in the worst case, wake up the consumer one page before hard stop */ + idx = (head >> PAGE_SHIFT) + npages - 1; + if (idx > wakeup) + idx = wakeup; + + idx &= buf->nr_pages - 1; + buf->intr_pos = idx; buf->topa_index[buf->stop_pos]->stop = 1; buf->topa_index[buf->intr_pos]->intr = 1; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index c635b8b49e93..90b7c501c95b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -365,9 +365,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { - hwc = &box->event_list[i]->hw; c = uncore_get_event_constraint(box, box->event_list[i]); - hwc->constraint = c; + box->event_constraint[i] = c; wmin = min(wmin, c->weight); wmax = max(wmax, c->weight); } @@ -375,7 +374,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int /* fastpath, try to reuse previous register */ for (i = 0; i < n; i++) { hwc = &box->event_list[i]->hw; - c = hwc->constraint; + c = box->event_constraint[i]; /* never assigned */ if (hwc->idx == -1) @@ -395,8 +394,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int } /* slow path */ if (i != n) - ret = perf_assign_events(box->event_list, n, - wmin, wmax, assign); + ret = perf_assign_events(box->event_constraint, n, + wmin, wmax, n, assign); if (!assign || ret) { for (i = 0; i < n; i++) @@ -840,6 +839,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id box->phys_id = phys_id; box->pci_dev = pdev; box->pmu = pmu; + uncore_box_init(box); pci_set_drvdata(pdev, box); raw_spin_lock(&uncore_box_lock); @@ -1003,8 +1003,10 @@ static int uncore_cpu_starting(int cpu) pmu = &type->pmus[j]; box = *per_cpu_ptr(pmu->box, cpu); /* called by uncore_cpu_init? */ - if (box && box->phys_id >= 0) + if (box && box->phys_id >= 0) { + uncore_box_init(box); continue; + } for_each_online_cpu(k) { exist = *per_cpu_ptr(pmu->box, k); @@ -1020,8 +1022,10 @@ static int uncore_cpu_starting(int cpu) } } - if (box) + if (box) { box->phys_id = phys_id; + uncore_box_init(box); + } } } return 0; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 6c8c1e7e69d8..ceac8f5dc018 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -97,6 +97,7 @@ struct intel_uncore_box { atomic_t refcnt; struct perf_event *events[UNCORE_PMC_IDX_MAX]; struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; + struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; u64 tags[UNCORE_PMC_IDX_MAX]; struct pci_dev *pci_dev; @@ -257,14 +258,6 @@ static inline int uncore_num_counters(struct intel_uncore_box *box) return box->pmu->type->num_counters; } -static inline void uncore_box_init(struct intel_uncore_box *box) -{ - if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { - if (box->pmu->type->ops->init_box) - box->pmu->type->ops->init_box(box); - } -} - static inline void uncore_disable_box(struct intel_uncore_box *box) { if (box->pmu->type->ops->disable_box) @@ -273,8 +266,6 @@ static inline void uncore_disable_box(struct intel_uncore_box *box) static inline void uncore_enable_box(struct intel_uncore_box *box) { - uncore_box_init(box); - if (box->pmu->type->ops->enable_box) box->pmu->type->ops->enable_box(box); } @@ -297,6 +288,14 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box, return box->pmu->type->ops->read_counter(box, event); } +static inline void uncore_box_init(struct intel_uncore_box *box) +{ + if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->init_box) + box->pmu->type->ops->init_box(box); + } +} + static inline bool uncore_box_is_fake(struct intel_uncore_box *box) { return (box->phys_id < 0); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index 12d9548457e7..6d6e85dd5849 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -164,8 +164,8 @@ ((1ULL << (n)) - 1))) /* Haswell-EP Ubox */ -#define HSWEP_U_MSR_PMON_CTR0 0x705 -#define HSWEP_U_MSR_PMON_CTL0 0x709 +#define HSWEP_U_MSR_PMON_CTR0 0x709 +#define HSWEP_U_MSR_PMON_CTL0 0x705 #define HSWEP_U_MSR_PMON_FILTER 0x707 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703 @@ -1914,7 +1914,7 @@ static struct intel_uncore_type hswep_uncore_cbox = { .name = "cbox", .num_counters = 4, .num_boxes = 18, - .perf_ctr_bits = 44, + .perf_ctr_bits = 48, .event_ctl = HSWEP_C0_MSR_PMON_CTL0, .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 2b55ee6db053..5a4668136e98 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) clear_bss(); for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) - set_intr_gate(i, early_idt_handlers[i]); + set_intr_gate(i, early_idt_handler_array[i]); load_idt((const struct desc_ptr *)&idt_descr); copy_bootdata(__va(real_mode_data)); diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index d031bad9e07e..53eeb226657c 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -478,21 +478,22 @@ is486: __INIT setup_once: /* - * Set up a idt with 256 entries pointing to ignore_int, - * interrupt gates. It doesn't actually load idt - that needs - * to be done on each CPU. Interrupts are enabled elsewhere, - * when we can be relatively sure everything is ok. + * Set up a idt with 256 interrupt gates that push zero if there + * is no error code and then jump to early_idt_handler_common. + * It doesn't actually load the idt - that needs to be done on + * each CPU. Interrupts are enabled elsewhere, when we can be + * relatively sure everything is ok. */ movl $idt_table,%edi - movl $early_idt_handlers,%eax + movl $early_idt_handler_array,%eax movl $NUM_EXCEPTION_VECTORS,%ecx 1: movl %eax,(%edi) movl %eax,4(%edi) /* interrupt gate, dpl=0, present */ movl $(0x8E000000 + __KERNEL_CS),2(%edi) - addl $9,%eax + addl $EARLY_IDT_HANDLER_SIZE,%eax addl $8,%edi loop 1b @@ -524,26 +525,28 @@ setup_once: andl $0,setup_once_ref /* Once is enough, thanks */ ret -ENTRY(early_idt_handlers) +ENTRY(early_idt_handler_array) # 36(%esp) %eflags # 32(%esp) %cs # 28(%esp) %eip # 24(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS - .if (EXCEPTION_ERRCODE_MASK >> i) & 1 - ASM_NOP2 - .else + .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 pushl $0 # Dummy error code, to make stack frame uniform .endif pushl $i # 20(%esp) Vector number - jmp early_idt_handler + jmp early_idt_handler_common i = i + 1 + .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr -ENDPROC(early_idt_handlers) +ENDPROC(early_idt_handler_array) - /* This is global to keep gas from relaxing the jumps */ -ENTRY(early_idt_handler) +early_idt_handler_common: + /* + * The stack is the hardware frame, an error code or zero, and the + * vector number. + */ cld cmpl $2,(%esp) # X86_TRAP_NMI @@ -603,7 +606,7 @@ ex_entry: is_nmi: addl $8,%esp /* drop vector number and error code */ iret -ENDPROC(early_idt_handler) +ENDPROC(early_idt_handler_common) /* This is the default interrupt "handler" :-) */ ALIGN diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index ae6588b301c2..df7e78057ae0 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -321,26 +321,28 @@ bad_address: jmp bad_address __INIT - .globl early_idt_handlers -early_idt_handlers: +ENTRY(early_idt_handler_array) # 104(%rsp) %rflags # 96(%rsp) %cs # 88(%rsp) %rip # 80(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS - .if (EXCEPTION_ERRCODE_MASK >> i) & 1 - ASM_NOP2 - .else + .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 pushq $0 # Dummy error code, to make stack frame uniform .endif pushq $i # 72(%rsp) Vector number - jmp early_idt_handler + jmp early_idt_handler_common i = i + 1 + .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr +ENDPROC(early_idt_handler_array) -/* This is global to keep gas from relaxing the jumps */ -ENTRY(early_idt_handler) +early_idt_handler_common: + /* + * The stack is the hardware frame, an error code or zero, and the + * vector number. + */ cld cmpl $2,(%rsp) # X86_TRAP_NMI @@ -412,7 +414,7 @@ ENTRY(early_idt_handler) is_nmi: addq $16,%rsp # drop vector number and error code INTERRUPT_RETURN -ENDPROC(early_idt_handler) +ENDPROC(early_idt_handler_common) __INITDATA diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 009183276bb7..6185d3141219 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -173,6 +173,21 @@ static void init_thread_xstate(void) xstate_size = sizeof(struct i387_fxsave_struct); else xstate_size = sizeof(struct i387_fsave_struct); + + /* + * Quirk: we don't yet handle the XSAVES* instructions + * correctly, as we don't correctly convert between + * standard and compacted format when interfacing + * with user-space - so disable it for now. + * + * The difference is small: with recent CPUs the + * compacted format is only marginally smaller than + * the standard FPU state format. + * + * ( This is easy to backport while we are fixing + * XSAVES* support. ) + */ + setup_clear_cpu_cap(X86_FEATURE_XSAVES); } /* diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 629af0f1c5c4..4c7deb4f78a1 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1090,6 +1090,17 @@ static void update_divide_count(struct kvm_lapic *apic) apic->divide_count); } +static void apic_update_lvtt(struct kvm_lapic *apic) +{ + u32 timer_mode = kvm_apic_get_reg(apic, APIC_LVTT) & + apic->lapic_timer.timer_mode_mask; + + if (apic->lapic_timer.timer_mode != timer_mode) { + apic->lapic_timer.timer_mode = timer_mode; + hrtimer_cancel(&apic->lapic_timer.timer); + } +} + static void apic_timer_expired(struct kvm_lapic *apic) { struct kvm_vcpu *vcpu = apic->vcpu; @@ -1298,6 +1309,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) apic_set_reg(apic, APIC_LVTT + 0x10 * i, lvt_val | APIC_LVT_MASKED); } + apic_update_lvtt(apic); atomic_set(&apic->lapic_timer.pending, 0); } @@ -1330,20 +1342,13 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) break; - case APIC_LVTT: { - u32 timer_mode = val & apic->lapic_timer.timer_mode_mask; - - if (apic->lapic_timer.timer_mode != timer_mode) { - apic->lapic_timer.timer_mode = timer_mode; - hrtimer_cancel(&apic->lapic_timer.timer); - } - + case APIC_LVTT: if (!kvm_apic_sw_enabled(apic)) val |= APIC_LVT_MASKED; val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); apic_set_reg(apic, APIC_LVTT, val); + apic_update_lvtt(apic); break; - } case APIC_TMICT: if (apic_lvtt_tscdeadline(apic)) @@ -1576,7 +1581,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) for (i = 0; i < APIC_LVT_NUM; i++) apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); - apic->lapic_timer.timer_mode = 0; + apic_update_lvtt(apic); apic_set_reg(apic, APIC_LVT0, SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); @@ -1802,6 +1807,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, apic_update_ppr(apic); hrtimer_cancel(&apic->lapic_timer.timer); + apic_update_lvtt(apic); update_divide_count(apic); start_apic_timer(apic); apic->irr_pending = true; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 44a7d2515497..b73337634214 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4215,13 +4215,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, u64 entry, gentry, *spte; int npte; bool remote_flush, local_flush, zap_page; - union kvm_mmu_page_role mask = (union kvm_mmu_page_role) { - .cr0_wp = 1, - .cr4_pae = 1, - .nxe = 1, - .smep_andnot_wp = 1, - .smap_andnot_wp = 1, - }; + union kvm_mmu_page_role mask = { }; + + mask.cr0_wp = 1; + mask.cr4_pae = 1; + mask.nxe = 1; + mask.smep_andnot_wp = 1; + mask.smap_andnot_wp = 1; /* * If we don't have indirect shadow pages, it means no page is diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 2ca777635d8e..579a8fd74be0 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1068,7 +1068,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog) } ctx.cleanup_addr = proglen; - for (pass = 0; pass < 10; pass++) { + /* JITed image shrinks with every pass and the loop iterates + * until the image stops shrinking. Very large bpf programs + * may converge on the last pass. In such case do one more + * pass to emit the final image + */ + for (pass = 0; pass < 10 || image; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); if (proglen <= 0) { image = NULL; diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index d93963340c3c..14a63ed6fe09 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -482,9 +482,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { - struct pci_sysdata *sd = bridge->bus->sysdata; - - ACPI_COMPANION_SET(&bridge->dev, sd->companion); + /* + * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL + * here, pci_create_root_bus() has been called by someone else and + * sysdata is likely to be different from what we expect. Let it go in + * that case. + */ + if (!bridge->dev.parent) { + struct pci_sysdata *sd = bridge->bus->sysdata; + ACPI_COMPANION_SET(&bridge->dev, sd->companion); + } return 0; } diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 172a02a6ad14..ba78ccf651e7 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -185,4 +185,17 @@ static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, return -EINVAL; } +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + return NULL; +} + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ +} + #endif /* _XTENSA_DMA_MAPPING_H */ diff --git a/block/blk-mq.c b/block/blk-mq.c index e68b71b85a7e..594eea04266e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1600,6 +1600,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, return NOTIFY_OK; } +/* hctx->ctxs will be freed in queue's release handler */ static void blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) @@ -1618,7 +1619,6 @@ static void blk_mq_exit_hctx(struct request_queue *q, blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); blk_free_flush_queue(hctx->fq); - kfree(hctx->ctxs); blk_mq_free_bitmap(&hctx->ctx_map); } @@ -1891,8 +1891,12 @@ void blk_mq_release(struct request_queue *q) unsigned int i; /* hctx kobj stays in hctx */ - queue_for_each_hw_ctx(q, hctx, i) + queue_for_each_hw_ctx(q, hctx, i) { + if (!hctx) + continue; + kfree(hctx->ctxs); kfree(hctx); + } kfree(q->queue_hw_ctx); diff --git a/block/genhd.c b/block/genhd.c index 0a536dc05f3b..ea982eadaf63 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) /* allocate ext devt */ idr_preload(GFP_KERNEL); - spin_lock(&ext_devt_lock); + spin_lock_bh(&ext_devt_lock); idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT); - spin_unlock(&ext_devt_lock); + spin_unlock_bh(&ext_devt_lock); idr_preload_end(); if (idx < 0) @@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt) return; if (MAJOR(devt) == BLOCK_EXT_MAJOR) { - spin_lock(&ext_devt_lock); + spin_lock_bh(&ext_devt_lock); idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); - spin_unlock(&ext_devt_lock); + spin_unlock_bh(&ext_devt_lock); } } @@ -653,7 +653,6 @@ void del_gendisk(struct gendisk *disk) disk->flags &= ~GENHD_FL_UP; sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); - bdi_unregister(&disk->queue->backing_dev_info); blk_unregister_queue(disk); blk_unregister_region(disk_devt(disk), disk->minors); @@ -691,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno) } else { struct hd_struct *part; - spin_lock(&ext_devt_lock); + spin_lock_bh(&ext_devt_lock); part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); if (part && get_disk(part_to_disk(part))) { *partno = part->partno; disk = part_to_disk(part); } - spin_unlock(&ext_devt_lock); + spin_unlock_bh(&ext_devt_lock); } return disk; diff --git a/crypto/Kconfig b/crypto/Kconfig index 8aaf298a80e1..362905e7c841 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1512,15 +1512,6 @@ config CRYPTO_USER_API_RNG This option enables the user-spaces interface for random number generator algorithms. -config CRYPTO_USER_API_AEAD - tristate "User-space interface for AEAD cipher algorithms" - depends on NET - select CRYPTO_AEAD - select CRYPTO_USER_API - help - This option enables the user-spaces interface for AEAD - cipher algorithms. - config CRYPTO_HASH_INFO bool diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index 23716dd8a7ec..5928d0746a27 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, writel((cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, hpriv->mmio + AHCI_WINDOW_CTRL(i)); - writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i)); + writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i)); writel(((cs->size - 1) & 0xffff0000), hpriv->mmio + AHCI_WINDOW_SIZE(i)); } diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 80a80548ad0a..27245957eee3 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = { }, {}, }; -MODULE_DEVICE_TABLE(of, octeon_i2c_match); +MODULE_DEVICE_TABLE(of, octeon_cf_match); static struct platform_driver octeon_cf_driver = { .probe = octeon_cf_probe, diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 0237271e80fc..a8da3a50e374 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -117,7 +117,7 @@ static short nvpibits = -1; static short nvcibits = -1; static short rx_skb_reserve = 16; static bool irq_coalesce = true; -static bool sdh = 0; +static bool sdh; /* Read from EEPROM = 0000 0011b */ static unsigned int readtab[] = { diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 924f8e26789d..65e65903faa0 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -2618,7 +2618,7 @@ static void ia_close(struct atm_vcc *vcc) if (vcc->qos.txtp.traffic_class != ATM_NONE) { iadev->close_pending++; prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE); - schedule_timeout(50); + schedule_timeout(msecs_to_jiffies(500)); finish_wait(&iadev->timeout_wait, &wait); spin_lock_irqsave(&iadev->tx_lock, flags); while((skb = skb_dequeue(&iadev->tx_backlog))) { diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 9c2ba1c97c42..df0c66cb7ad3 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu) { int ret; - if (init_cache_level(cpu)) + if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT; per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), diff --git a/drivers/base/init.c b/drivers/base/init.c index da033d3bab3c..48c0e220acc0 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "base.h" @@ -34,4 +35,5 @@ void __init driver_init(void) cpu_dev_init(); memory_dev_init(); container_dev_init(); + of_core_init(); } diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index eb1fed5bd516..3ccef9eba6f9 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX config BLK_DEV_PMEM tristate "Persistent memory block device support" + depends on HAS_IOMEM help Saying Y here will allow you to use a contiguous range of reserved memory as one or more persistent block devices. diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 85b8036deaa3..683dff272562 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1750,6 +1750,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) struct nvme_iod *iod; dma_addr_t meta_dma = 0; void *meta = NULL; + void __user *metadata; if (copy_from_user(&io, uio, sizeof(io))) return -EFAULT; @@ -1763,6 +1764,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) meta_len = 0; } + metadata = (void __user *)(unsigned long)io.metadata; + write = io.opcode & 1; switch (io.opcode) { @@ -1786,13 +1789,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) if (meta_len) { meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, &meta_dma, GFP_KERNEL); + if (!meta) { status = -ENOMEM; goto unmap; } if (write) { - if (copy_from_user(meta, (void __user *)io.metadata, - meta_len)) { + if (copy_from_user(meta, metadata, meta_len)) { status = -EFAULT; goto unmap; } @@ -1819,8 +1822,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) nvme_free_iod(dev, iod); if (meta) { if (status == NVME_SC_SUCCESS && !write) { - if (copy_to_user((void __user *)io.metadata, meta, - meta_len)) + if (copy_to_user(metadata, meta, meta_len)) status = -EFAULT; } dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8dcbced0eafd..6e134f4759c0 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram) memset(&zram->stats, 0, sizeof(zram->stats)); zram->disksize = 0; zram->max_comp_streams = 1; + set_capacity(zram->disk, 0); + part_stat_set_all(&zram->disk->part0, 0); up_write(&zram->init_lock); /* I/O operation under all of CPU are done so let's free */ diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index ed5c2738bea2..2e777071e1dc 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -9,6 +9,10 @@ config BT_BCM tristate select FW_LOADER +config BT_RTL + tristate + select FW_LOADER + config BT_HCIBTUSB tristate "HCI USB driver" depends on USB @@ -32,6 +36,17 @@ config BT_HCIBTUSB_BCM Say Y here to compile support for Broadcom protocol. +config BT_HCIBTUSB_RTL + bool "Realtek protocol support" + depends on BT_HCIBTUSB + select BT_RTL + default y + help + The Realtek protocol support enables firmware and configuration + download support for Realtek Bluetooth controllers. + + Say Y here to compile support for Realtek protocol. + config BT_HCIBTSDIO tristate "HCI SDIO driver" depends on MMC diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index dd0d9c40b999..f40e194e7080 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_BT_MRVL) += btmrvl.o obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o obj-$(CONFIG_BT_WILINK) += btwilink.o obj-$(CONFIG_BT_BCM) += btbcm.o +obj-$(CONFIG_BT_RTL) += btrtl.o btmrvl-y := btmrvl_main.o btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 4bba86677adc..728fce38a5a2 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -55,12 +55,6 @@ int btbcm_check_bdaddr(struct hci_dev *hdev) } bda = (struct hci_rp_read_bd_addr *)skb->data; - if (bda->status) { - BT_ERR("%s: BCM: Device address result failed (%02x)", - hdev->name, bda->status); - kfree_skb(skb); - return -bt_to_errno(bda->status); - } /* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller * with no configured address. diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 2d43d4279b00..828f2f8d1568 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -53,12 +53,6 @@ int btintel_check_bdaddr(struct hci_dev *hdev) } bda = (struct hci_rp_read_bd_addr *)skb->data; - if (bda->status) { - BT_ERR("%s: Intel device address result failed (%02x)", - hdev->name, bda->status); - kfree_skb(skb); - return -bt_to_errno(bda->status); - } /* For some Intel based controllers, the default Bluetooth device * address 00:03:19:9E:8B:00 can be found. These controllers are diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 01d6da577eeb..b9a811900f6a 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1217,7 +1217,7 @@ static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv) unsigned int reg, reg_start, reg_end; enum rdwr_status stat; u8 *dbg_ptr, *end_ptr, *fw_dump_data, *fw_dump_ptr; - u8 dump_num, idx, i, read_reg, doneflag = 0; + u8 dump_num = 0, idx, i, read_reg, doneflag = 0; u32 memory_size, fw_dump_len = 0; /* dump sdio register first */ diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c new file mode 100644 index 000000000000..84288938f7f2 --- /dev/null +++ b/drivers/bluetooth/btrtl.c @@ -0,0 +1,390 @@ +/* + * Bluetooth support for Realtek devices + * + * Copyright (C) 2015 Endless Mobile, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include +#include + +#include "btrtl.h" + +#define VERSION "0.1" + +#define RTL_EPATCH_SIGNATURE "Realtech" +#define RTL_ROM_LMP_3499 0x3499 +#define RTL_ROM_LMP_8723A 0x1200 +#define RTL_ROM_LMP_8723B 0x8723 +#define RTL_ROM_LMP_8821A 0x8821 +#define RTL_ROM_LMP_8761A 0x8761 + +static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) +{ + struct rtl_rom_version_evt *rom_version; + struct sk_buff *skb; + + /* Read RTL ROM version command */ + skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Read ROM version failed (%ld)", + hdev->name, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*rom_version)) { + BT_ERR("%s: RTL version event length mismatch", hdev->name); + kfree_skb(skb); + return -EIO; + } + + rom_version = (struct rtl_rom_version_evt *)skb->data; + BT_INFO("%s: rom_version status=%x version=%x", + hdev->name, rom_version->status, rom_version->version); + + *version = rom_version->version; + + kfree_skb(skb); + return 0; +} + +static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, + const struct firmware *fw, + unsigned char **_buf) +{ + const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; + struct rtl_epatch_header *epatch_info; + unsigned char *buf; + int i, ret, len; + size_t min_size; + u8 opcode, length, data, rom_version = 0; + int project_id = -1; + const unsigned char *fwptr, *chip_id_base; + const unsigned char *patch_length_base, *patch_offset_base; + u32 patch_offset = 0; + u16 patch_length, num_patches; + const u16 project_id_to_lmp_subver[] = { + RTL_ROM_LMP_8723A, + RTL_ROM_LMP_8723B, + RTL_ROM_LMP_8821A, + RTL_ROM_LMP_8761A + }; + + ret = rtl_read_rom_version(hdev, &rom_version); + if (ret) + return ret; + + min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; + if (fw->size < min_size) + return -EINVAL; + + fwptr = fw->data + fw->size - sizeof(extension_sig); + if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { + BT_ERR("%s: extension section signature mismatch", hdev->name); + return -EINVAL; + } + + /* Loop from the end of the firmware parsing instructions, until + * we find an instruction that identifies the "project ID" for the + * hardware supported by this firwmare file. + * Once we have that, we double-check that that project_id is suitable + * for the hardware we are working with. + */ + while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) { + opcode = *--fwptr; + length = *--fwptr; + data = *--fwptr; + + BT_DBG("check op=%x len=%x data=%x", opcode, length, data); + + if (opcode == 0xff) /* EOF */ + break; + + if (length == 0) { + BT_ERR("%s: found instruction with length 0", + hdev->name); + return -EINVAL; + } + + if (opcode == 0 && length == 1) { + project_id = data; + break; + } + + fwptr -= length; + } + + if (project_id < 0) { + BT_ERR("%s: failed to find version instruction", hdev->name); + return -EINVAL; + } + + if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) { + BT_ERR("%s: unknown project id %d", hdev->name, project_id); + return -EINVAL; + } + + if (lmp_subver != project_id_to_lmp_subver[project_id]) { + BT_ERR("%s: firmware is for %x but this is a %x", hdev->name, + project_id_to_lmp_subver[project_id], lmp_subver); + return -EINVAL; + } + + epatch_info = (struct rtl_epatch_header *)fw->data; + if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { + BT_ERR("%s: bad EPATCH signature", hdev->name); + return -EINVAL; + } + + num_patches = le16_to_cpu(epatch_info->num_patches); + BT_DBG("fw_version=%x, num_patches=%d", + le32_to_cpu(epatch_info->fw_version), num_patches); + + /* After the rtl_epatch_header there is a funky patch metadata section. + * Assuming 2 patches, the layout is: + * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2 + * + * Find the right patch for this chip. + */ + min_size += 8 * num_patches; + if (fw->size < min_size) + return -EINVAL; + + chip_id_base = fw->data + sizeof(struct rtl_epatch_header); + patch_length_base = chip_id_base + (sizeof(u16) * num_patches); + patch_offset_base = patch_length_base + (sizeof(u16) * num_patches); + for (i = 0; i < num_patches; i++) { + u16 chip_id = get_unaligned_le16(chip_id_base + + (i * sizeof(u16))); + if (chip_id == rom_version + 1) { + patch_length = get_unaligned_le16(patch_length_base + + (i * sizeof(u16))); + patch_offset = get_unaligned_le32(patch_offset_base + + (i * sizeof(u32))); + break; + } + } + + if (!patch_offset) { + BT_ERR("%s: didn't find patch for chip id %d", + hdev->name, rom_version); + return -EINVAL; + } + + BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i); + min_size = patch_offset + patch_length; + if (fw->size < min_size) + return -EINVAL; + + /* Copy the firmware into a new buffer and write the version at + * the end. + */ + len = patch_length; + buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); + + *_buf = buf; + return len; +} + +static int rtl_download_firmware(struct hci_dev *hdev, + const unsigned char *data, int fw_len) +{ + struct rtl_download_cmd *dl_cmd; + int frag_num = fw_len / RTL_FRAG_LEN + 1; + int frag_len = RTL_FRAG_LEN; + int ret = 0; + int i; + + dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL); + if (!dl_cmd) + return -ENOMEM; + + for (i = 0; i < frag_num; i++) { + struct sk_buff *skb; + + BT_DBG("download fw (%d/%d)", i, frag_num); + + dl_cmd->index = i; + if (i == (frag_num - 1)) { + dl_cmd->index |= 0x80; /* data end */ + frag_len = fw_len % RTL_FRAG_LEN; + } + memcpy(dl_cmd->data, data, frag_len); + + /* Send download command */ + skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: download fw command failed (%ld)", + hdev->name, PTR_ERR(skb)); + ret = -PTR_ERR(skb); + goto out; + } + + if (skb->len != sizeof(struct rtl_download_response)) { + BT_ERR("%s: download fw event length mismatch", + hdev->name); + kfree_skb(skb); + ret = -EIO; + goto out; + } + + kfree_skb(skb); + data += RTL_FRAG_LEN; + } + +out: + kfree(dl_cmd); + return ret; +} + +static int btrtl_setup_rtl8723a(struct hci_dev *hdev) +{ + const struct firmware *fw; + int ret; + + BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name); + ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev); + if (ret < 0) { + BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name); + return ret; + } + + if (fw->size < 8) { + ret = -EINVAL; + goto out; + } + + /* Check that the firmware doesn't have the epatch signature + * (which is only for RTL8723B and newer). + */ + if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) { + BT_ERR("%s: unexpected EPATCH signature!", hdev->name); + ret = -EINVAL; + goto out; + } + + ret = rtl_download_firmware(hdev, fw->data, fw->size); + +out: + release_firmware(fw); + return ret; +} + +static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver, + const char *fw_name) +{ + unsigned char *fw_data = NULL; + const struct firmware *fw; + int ret; + + BT_INFO("%s: rtl: loading %s", hdev->name, fw_name); + ret = request_firmware(&fw, fw_name, &hdev->dev); + if (ret < 0) { + BT_ERR("%s: Failed to load %s", hdev->name, fw_name); + return ret; + } + + ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data); + if (ret < 0) + goto out; + + ret = rtl_download_firmware(hdev, fw_data, ret); + kfree(fw_data); + if (ret < 0) + goto out; + +out: + release_firmware(fw); + return ret; +} + +static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", + hdev->name, PTR_ERR(skb)); + return skb; + } + + if (skb->len != sizeof(struct hci_rp_read_local_version)) { + BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", + hdev->name); + kfree_skb(skb); + return ERR_PTR(-EIO); + } + + return skb; +} + +int btrtl_setup_realtek(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct hci_rp_read_local_version *resp; + u16 lmp_subver; + + skb = btrtl_read_local_version(hdev); + if (IS_ERR(skb)) + return -PTR_ERR(skb); + + resp = (struct hci_rp_read_local_version *)skb->data; + BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x " + "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev, + resp->lmp_ver, resp->lmp_subver); + + lmp_subver = le16_to_cpu(resp->lmp_subver); + kfree_skb(skb); + + /* Match a set of subver values that correspond to stock firmware, + * which is not compatible with standard btusb. + * If matched, upload an alternative firmware that does conform to + * standard btusb. Once that firmware is uploaded, the subver changes + * to a different value. + */ + switch (lmp_subver) { + case RTL_ROM_LMP_8723A: + case RTL_ROM_LMP_3499: + return btrtl_setup_rtl8723a(hdev); + case RTL_ROM_LMP_8723B: + return btrtl_setup_rtl8723b(hdev, lmp_subver, + "rtl_bt/rtl8723b_fw.bin"); + case RTL_ROM_LMP_8821A: + return btrtl_setup_rtl8723b(hdev, lmp_subver, + "rtl_bt/rtl8821a_fw.bin"); + case RTL_ROM_LMP_8761A: + return btrtl_setup_rtl8723b(hdev, lmp_subver, + "rtl_bt/rtl8761a_fw.bin"); + default: + BT_INFO("rtl: assuming no firmware upload needed."); + return 0; + } +} +EXPORT_SYMBOL_GPL(btrtl_setup_realtek); + +MODULE_AUTHOR("Daniel Drake "); +MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h new file mode 100644 index 000000000000..38ffe4890cd1 --- /dev/null +++ b/drivers/bluetooth/btrtl.h @@ -0,0 +1,52 @@ +/* + * Bluetooth support for Realtek devices + * + * Copyright (C) 2015 Endless Mobile, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define RTL_FRAG_LEN 252 + +struct rtl_download_cmd { + __u8 index; + __u8 data[RTL_FRAG_LEN]; +} __packed; + +struct rtl_download_response { + __u8 status; + __u8 index; +} __packed; + +struct rtl_rom_version_evt { + __u8 status; + __u8 version; +} __packed; + +struct rtl_epatch_header { + __u8 signature[8]; + __le32 fw_version; + __le16 num_patches; +} __packed; + +#if IS_ENABLED(CONFIG_BT_RTL) + +int btrtl_setup_realtek(struct hci_dev *hdev); + +#else + +static inline int btrtl_setup_realtek(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +#endif diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 3c10d4dfe9a7..94c6c048130f 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -31,13 +31,14 @@ #include "btintel.h" #include "btbcm.h" +#include "btrtl.h" #define VERSION "0.8" static bool disable_scofix; static bool force_scofix; -static bool reset = 1; +static bool reset = true; static struct usb_driver btusb_driver; @@ -330,6 +331,7 @@ static const struct usb_device_id blacklist_table[] = { #define BTUSB_FIRMWARE_LOADED 7 #define BTUSB_FIRMWARE_FAILED 8 #define BTUSB_BOOTING 9 +#define BTUSB_RESET_RESUME 10 struct btusb_data { struct hci_dev *hdev; @@ -1372,378 +1374,6 @@ static int btusb_setup_csr(struct hci_dev *hdev) return ret; } -#define RTL_FRAG_LEN 252 - -struct rtl_download_cmd { - __u8 index; - __u8 data[RTL_FRAG_LEN]; -} __packed; - -struct rtl_download_response { - __u8 status; - __u8 index; -} __packed; - -struct rtl_rom_version_evt { - __u8 status; - __u8 version; -} __packed; - -struct rtl_epatch_header { - __u8 signature[8]; - __le32 fw_version; - __le16 num_patches; -} __packed; - -#define RTL_EPATCH_SIGNATURE "Realtech" -#define RTL_ROM_LMP_3499 0x3499 -#define RTL_ROM_LMP_8723A 0x1200 -#define RTL_ROM_LMP_8723B 0x8723 -#define RTL_ROM_LMP_8821A 0x8821 -#define RTL_ROM_LMP_8761A 0x8761 - -static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) -{ - struct rtl_rom_version_evt *rom_version; - struct sk_buff *skb; - int ret; - - /* Read RTL ROM version command */ - skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s: Read ROM version failed (%ld)", - hdev->name, PTR_ERR(skb)); - return PTR_ERR(skb); - } - - if (skb->len != sizeof(*rom_version)) { - BT_ERR("%s: RTL version event length mismatch", hdev->name); - kfree_skb(skb); - return -EIO; - } - - rom_version = (struct rtl_rom_version_evt *)skb->data; - BT_INFO("%s: rom_version status=%x version=%x", - hdev->name, rom_version->status, rom_version->version); - - ret = rom_version->status; - if (ret == 0) - *version = rom_version->version; - - kfree_skb(skb); - return ret; -} - -static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, - const struct firmware *fw, - unsigned char **_buf) -{ - const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; - struct rtl_epatch_header *epatch_info; - unsigned char *buf; - int i, ret, len; - size_t min_size; - u8 opcode, length, data, rom_version = 0; - int project_id = -1; - const unsigned char *fwptr, *chip_id_base; - const unsigned char *patch_length_base, *patch_offset_base; - u32 patch_offset = 0; - u16 patch_length, num_patches; - const u16 project_id_to_lmp_subver[] = { - RTL_ROM_LMP_8723A, - RTL_ROM_LMP_8723B, - RTL_ROM_LMP_8821A, - RTL_ROM_LMP_8761A - }; - - ret = rtl_read_rom_version(hdev, &rom_version); - if (ret) - return -bt_to_errno(ret); - - min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; - if (fw->size < min_size) - return -EINVAL; - - fwptr = fw->data + fw->size - sizeof(extension_sig); - if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { - BT_ERR("%s: extension section signature mismatch", hdev->name); - return -EINVAL; - } - - /* Loop from the end of the firmware parsing instructions, until - * we find an instruction that identifies the "project ID" for the - * hardware supported by this firwmare file. - * Once we have that, we double-check that that project_id is suitable - * for the hardware we are working with. - */ - while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) { - opcode = *--fwptr; - length = *--fwptr; - data = *--fwptr; - - BT_DBG("check op=%x len=%x data=%x", opcode, length, data); - - if (opcode == 0xff) /* EOF */ - break; - - if (length == 0) { - BT_ERR("%s: found instruction with length 0", - hdev->name); - return -EINVAL; - } - - if (opcode == 0 && length == 1) { - project_id = data; - break; - } - - fwptr -= length; - } - - if (project_id < 0) { - BT_ERR("%s: failed to find version instruction", hdev->name); - return -EINVAL; - } - - if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) { - BT_ERR("%s: unknown project id %d", hdev->name, project_id); - return -EINVAL; - } - - if (lmp_subver != project_id_to_lmp_subver[project_id]) { - BT_ERR("%s: firmware is for %x but this is a %x", hdev->name, - project_id_to_lmp_subver[project_id], lmp_subver); - return -EINVAL; - } - - epatch_info = (struct rtl_epatch_header *)fw->data; - if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { - BT_ERR("%s: bad EPATCH signature", hdev->name); - return -EINVAL; - } - - num_patches = le16_to_cpu(epatch_info->num_patches); - BT_DBG("fw_version=%x, num_patches=%d", - le32_to_cpu(epatch_info->fw_version), num_patches); - - /* After the rtl_epatch_header there is a funky patch metadata section. - * Assuming 2 patches, the layout is: - * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2 - * - * Find the right patch for this chip. - */ - min_size += 8 * num_patches; - if (fw->size < min_size) - return -EINVAL; - - chip_id_base = fw->data + sizeof(struct rtl_epatch_header); - patch_length_base = chip_id_base + (sizeof(u16) * num_patches); - patch_offset_base = patch_length_base + (sizeof(u16) * num_patches); - for (i = 0; i < num_patches; i++) { - u16 chip_id = get_unaligned_le16(chip_id_base + - (i * sizeof(u16))); - if (chip_id == rom_version + 1) { - patch_length = get_unaligned_le16(patch_length_base + - (i * sizeof(u16))); - patch_offset = get_unaligned_le32(patch_offset_base + - (i * sizeof(u32))); - break; - } - } - - if (!patch_offset) { - BT_ERR("%s: didn't find patch for chip id %d", - hdev->name, rom_version); - return -EINVAL; - } - - BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i); - min_size = patch_offset + patch_length; - if (fw->size < min_size) - return -EINVAL; - - /* Copy the firmware into a new buffer and write the version at - * the end. - */ - len = patch_length; - buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); - - *_buf = buf; - return len; -} - -static int rtl_download_firmware(struct hci_dev *hdev, - const unsigned char *data, int fw_len) -{ - struct rtl_download_cmd *dl_cmd; - int frag_num = fw_len / RTL_FRAG_LEN + 1; - int frag_len = RTL_FRAG_LEN; - int ret = 0; - int i; - - dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL); - if (!dl_cmd) - return -ENOMEM; - - for (i = 0; i < frag_num; i++) { - struct rtl_download_response *dl_resp; - struct sk_buff *skb; - - BT_DBG("download fw (%d/%d)", i, frag_num); - - dl_cmd->index = i; - if (i == (frag_num - 1)) { - dl_cmd->index |= 0x80; /* data end */ - frag_len = fw_len % RTL_FRAG_LEN; - } - memcpy(dl_cmd->data, data, frag_len); - - /* Send download command */ - skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, - HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s: download fw command failed (%ld)", - hdev->name, PTR_ERR(skb)); - ret = -PTR_ERR(skb); - goto out; - } - - if (skb->len != sizeof(*dl_resp)) { - BT_ERR("%s: download fw event length mismatch", - hdev->name); - kfree_skb(skb); - ret = -EIO; - goto out; - } - - dl_resp = (struct rtl_download_response *)skb->data; - if (dl_resp->status != 0) { - kfree_skb(skb); - ret = bt_to_errno(dl_resp->status); - goto out; - } - - kfree_skb(skb); - data += RTL_FRAG_LEN; - } - -out: - kfree(dl_cmd); - return ret; -} - -static int btusb_setup_rtl8723a(struct hci_dev *hdev) -{ - struct btusb_data *data = dev_get_drvdata(&hdev->dev); - struct usb_device *udev = interface_to_usbdev(data->intf); - const struct firmware *fw; - int ret; - - BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name); - ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev); - if (ret < 0) { - BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name); - return ret; - } - - if (fw->size < 8) { - ret = -EINVAL; - goto out; - } - - /* Check that the firmware doesn't have the epatch signature - * (which is only for RTL8723B and newer). - */ - if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) { - BT_ERR("%s: unexpected EPATCH signature!", hdev->name); - ret = -EINVAL; - goto out; - } - - ret = rtl_download_firmware(hdev, fw->data, fw->size); - -out: - release_firmware(fw); - return ret; -} - -static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver, - const char *fw_name) -{ - struct btusb_data *data = dev_get_drvdata(&hdev->dev); - struct usb_device *udev = interface_to_usbdev(data->intf); - unsigned char *fw_data = NULL; - const struct firmware *fw; - int ret; - - BT_INFO("%s: rtl: loading %s", hdev->name, fw_name); - ret = request_firmware(&fw, fw_name, &udev->dev); - if (ret < 0) { - BT_ERR("%s: Failed to load %s", hdev->name, fw_name); - return ret; - } - - ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data); - if (ret < 0) - goto out; - - ret = rtl_download_firmware(hdev, fw_data, ret); - kfree(fw_data); - if (ret < 0) - goto out; - -out: - release_firmware(fw); - return ret; -} - -static int btusb_setup_realtek(struct hci_dev *hdev) -{ - struct sk_buff *skb; - struct hci_rp_read_local_version *resp; - u16 lmp_subver; - - skb = btusb_read_local_version(hdev); - if (IS_ERR(skb)) - return -PTR_ERR(skb); - - resp = (struct hci_rp_read_local_version *)skb->data; - BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x " - "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev, - resp->lmp_ver, resp->lmp_subver); - - lmp_subver = le16_to_cpu(resp->lmp_subver); - kfree_skb(skb); - - /* Match a set of subver values that correspond to stock firmware, - * which is not compatible with standard btusb. - * If matched, upload an alternative firmware that does conform to - * standard btusb. Once that firmware is uploaded, the subver changes - * to a different value. - */ - switch (lmp_subver) { - case RTL_ROM_LMP_8723A: - case RTL_ROM_LMP_3499: - return btusb_setup_rtl8723a(hdev); - case RTL_ROM_LMP_8723B: - return btusb_setup_rtl8723b(hdev, lmp_subver, - "rtl_bt/rtl8723b_fw.bin"); - case RTL_ROM_LMP_8821A: - return btusb_setup_rtl8723b(hdev, lmp_subver, - "rtl_bt/rtl8821a_fw.bin"); - case RTL_ROM_LMP_8761A: - return btusb_setup_rtl8723b(hdev, lmp_subver, - "rtl_bt/rtl8761a_fw.bin"); - default: - BT_INFO("rtl: assuming no firmware upload needed."); - return 0; - } -} - static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev, struct intel_version *ver) { @@ -1951,12 +1581,6 @@ static int btusb_setup_intel(struct hci_dev *hdev) } ver = (struct intel_version *)skb->data; - if (ver->status) { - BT_ERR("%s Intel fw version event failed (%02x)", hdev->name, - ver->status); - kfree_skb(skb); - return -bt_to_errno(ver->status); - } BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x", hdev->name, ver->hw_platform, ver->hw_variant, @@ -2004,15 +1628,6 @@ static int btusb_setup_intel(struct hci_dev *hdev) return PTR_ERR(skb); } - if (skb->data[0]) { - u8 evt_status = skb->data[0]; - - BT_ERR("%s enable Intel manufacturer mode event failed (%02x)", - hdev->name, evt_status); - kfree_skb(skb); - release_firmware(fw); - return -bt_to_errno(evt_status); - } kfree_skb(skb); disable_patch = 1; @@ -2358,13 +1973,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) } ver = (struct intel_version *)skb->data; - if (ver->status) { - BT_ERR("%s: Intel version command failure (%02x)", - hdev->name, ver->status); - err = -bt_to_errno(ver->status); - kfree_skb(skb); - return err; - } /* The hardware platform number has a fixed value of 0x37 and * for now only accept this single value. @@ -2439,13 +2047,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) } params = (struct intel_boot_params *)skb->data; - if (params->status) { - BT_ERR("%s: Intel boot parameters command failure (%02x)", - hdev->name, params->status); - err = -bt_to_errno(params->status); - kfree_skb(skb); - return err; - } BT_INFO("%s: Device revision is %u", hdev->name, le16_to_cpu(params->dev_revid)); @@ -2678,13 +2279,6 @@ static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code) return; } - if (skb->data[0] != 0x00) { - BT_ERR("%s: Exception info command failure (%02x)", - hdev->name, skb->data[0]); - kfree_skb(skb); - return; - } - BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1)); kfree_skb(skb); @@ -2792,6 +2386,7 @@ struct qca_device_info { static const struct qca_device_info qca_devices_table[] = { { 0x00000100, 20, 4, 10 }, /* Rome 1.0 */ { 0x00000101, 20, 4, 10 }, /* Rome 1.1 */ + { 0x00000200, 28, 4, 18 }, /* Rome 2.0 */ { 0x00000201, 28, 4, 18 }, /* Rome 2.1 */ { 0x00000300, 28, 4, 18 }, /* Rome 3.0 */ { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */ @@ -3175,8 +2770,17 @@ static int btusb_probe(struct usb_interface *intf, hdev->set_bdaddr = btusb_set_bdaddr_ath3012; } - if (id->driver_info & BTUSB_REALTEK) - hdev->setup = btusb_setup_realtek; +#ifdef CONFIG_BT_HCIBTUSB_RTL + if (id->driver_info & BTUSB_REALTEK) { + hdev->setup = btrtl_setup_realtek; + + /* Realtek devices lose their updated firmware over suspend, + * but the USB hub doesn't notice any status change. + * Explicitly request a device reset on resume. + */ + set_bit(BTUSB_RESET_RESUME, &data->flags); + } +#endif if (id->driver_info & BTUSB_AMP) { /* AMP controllers do not support SCO packets */ @@ -3308,6 +2912,14 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) btusb_stop_traffic(data); usb_kill_anchored_urbs(&data->tx_anchor); + /* Optionally request a device reset on resume, but only when + * wakeups are disabled. If wakeups are enabled we assume the + * device will stay powered up throughout suspend. + */ + if (test_bit(BTUSB_RESET_RESUME, &data->flags) && + !device_may_wakeup(&data->udev->dev)) + data->udev->reset_resume = 1; + return 0; } diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index 55c135b7757a..7a722df97343 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -22,7 +22,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ -#define DEBUG + #include #include #include diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index dc8e3d4356a0..fc0056a28b81 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -47,8 +47,8 @@ #include "hci_uart.h" -static bool txcrc = 1; -static bool hciextn = 1; +static bool txcrc = true; +static bool hciextn = true; #define BCSP_TXWINSIZE 4 diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index 5bd792c68f9b..ab3bde16ecb4 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -453,7 +453,7 @@ void __iomem *mips_cdmm_early_probe(unsigned int dev_type) /* Look for a specific device type */ for (; drb < bus->drbs; drb += size + 1) { - acsr = readl(cdmm + drb * CDMM_DRB_SIZE); + acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE); type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; if (type == dev_type) return cdmm + drb * CDMM_DRB_SIZE; @@ -500,7 +500,7 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus) bus->discovered = true; pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs); for (; drb < bus->drbs; drb += size + 1) { - acsr = readl(cdmm + drb * CDMM_DRB_SIZE); + acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE); type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT; rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT; diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index fb9ec6221730..6f047dcb94c2 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -58,7 +58,6 @@ #include #include #include -#include /* * DDR target is the same on all platforms. @@ -70,6 +69,7 @@ */ #define WIN_CTRL_OFF 0x0000 #define WIN_CTRL_ENABLE BIT(0) +/* Only on HW I/O coherency capable platforms */ #define WIN_CTRL_SYNCBARRIER BIT(1) #define WIN_CTRL_TGT_MASK 0xf0 #define WIN_CTRL_TGT_SHIFT 4 @@ -102,9 +102,7 @@ /* Relative to mbusbridge_base */ #define MBUS_BRIDGE_CTRL_OFF 0x0 -#define MBUS_BRIDGE_SIZE_MASK 0xffff0000 #define MBUS_BRIDGE_BASE_OFF 0x4 -#define MBUS_BRIDGE_BASE_MASK 0xffff0000 /* Maximum number of windows, for all known platforms */ #define MBUS_WINS_MAX 20 @@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus, ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | (attr << WIN_CTRL_ATTR_SHIFT) | (target << WIN_CTRL_TGT_SHIFT) | - WIN_CTRL_SYNCBARRIER | WIN_CTRL_ENABLE; + if (mbus->hw_io_coherency) + ctrl |= WIN_CTRL_SYNCBARRIER; writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF); writel(ctrl, addr + WIN_CTRL_OFF); @@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win) return MVEBU_MBUS_NO_REMAP; } -/* - * Use the memblock information to find the MBus bridge hole in the - * physical address space. - */ -static void __init -mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end) -{ - struct memblock_region *r; - uint64_t s = 0; - - for_each_memblock(memory, r) { - /* - * This part of the memory is above 4 GB, so we don't - * care for the MBus bridge hole. - */ - if (r->base >= 0x100000000) - continue; - - /* - * The MBus bridge hole is at the end of the RAM under - * the 4 GB limit. - */ - if (r->base + r->size > s) - s = r->base + r->size; - } - - *start = s; - *end = 0x100000000; -} - static void __init mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) { int i; int cs; - uint64_t mbus_bridge_base, mbus_bridge_end; mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR; - mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end); - for (i = 0, cs = 0; i < 4; i++) { - u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i)); - u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i)); - u64 end; - struct mbus_dram_window *w; - - /* Ignore entries that are not enabled */ - if (!(size & DDR_SIZE_ENABLED)) - continue; - - /* - * Ignore entries whose base address is above 2^32, - * since devices cannot DMA to such high addresses - */ - if (base & DDR_BASE_CS_HIGH_MASK) - continue; - - base = base & DDR_BASE_CS_LOW_MASK; - size = (size | ~DDR_SIZE_MASK) + 1; - end = base + size; - - /* - * Adjust base/size of the current CS to make sure it - * doesn't overlap with the MBus bridge hole. This is - * particularly important for devices that do DMA from - * DRAM to a SRAM mapped in a MBus window, such as the - * CESA cryptographic engine. - */ + u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i)); + u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i)); /* - * The CS is fully enclosed inside the MBus bridge - * area, so ignore it. + * We only take care of entries for which the chip + * select is enabled, and that don't have high base + * address bits set (devices can only access the first + * 32 bits of the memory). */ - if (base >= mbus_bridge_base && end <= mbus_bridge_end) - continue; + if ((size & DDR_SIZE_ENABLED) && + !(base & DDR_BASE_CS_HIGH_MASK)) { + struct mbus_dram_window *w; - /* - * Beginning of CS overlaps with end of MBus, raise CS - * base address, and shrink its size. - */ - if (base >= mbus_bridge_base && end > mbus_bridge_end) { - size -= mbus_bridge_end - base; - base = mbus_bridge_end; + w = &mvebu_mbus_dram_info.cs[cs++]; + w->cs_index = i; + w->mbus_attr = 0xf & ~(1 << i); + if (mbus->hw_io_coherency) + w->mbus_attr |= ATTR_HW_COHERENCY; + w->base = base & DDR_BASE_CS_LOW_MASK; + w->size = (size | ~DDR_SIZE_MASK) + 1; } - - /* - * End of CS overlaps with beginning of MBus, shrink - * CS size. - */ - if (base < mbus_bridge_base && end > mbus_bridge_base) - size -= end - mbus_bridge_base; - - w = &mvebu_mbus_dram_info.cs[cs++]; - w->cs_index = i; - w->mbus_attr = 0xf & ~(1 << i); - if (mbus->hw_io_coherency) - w->mbus_attr |= ATTR_HW_COHERENCY; - w->base = base; - w->size = size; } mvebu_mbus_dram_info.num_cs = cs; } diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c index 597fed423d7d..df2c1afa52b4 100644 --- a/drivers/clk/at91/clk-peripheral.c +++ b/drivers/clk/at91/clk-peripheral.c @@ -29,7 +29,7 @@ #define PERIPHERAL_RSHIFT_MASK 0x3 #define PERIPHERAL_RSHIFT(val) (((val) >> 16) & PERIPHERAL_RSHIFT_MASK) -#define PERIPHERAL_MAX_SHIFT 4 +#define PERIPHERAL_MAX_SHIFT 3 struct clk_peripheral { struct clk_hw hw; @@ -242,7 +242,7 @@ static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw, return *parent_rate; if (periph->range.max) { - for (; shift < PERIPHERAL_MAX_SHIFT; shift++) { + for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) { cur_rate = *parent_rate >> shift; if (cur_rate <= periph->range.max) break; @@ -254,7 +254,7 @@ static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw, best_diff = cur_rate - rate; best_rate = cur_rate; - for (; shift < PERIPHERAL_MAX_SHIFT; shift++) { + for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) { cur_rate = *parent_rate >> shift; if (cur_rate < rate) cur_diff = rate - cur_rate; @@ -289,7 +289,7 @@ static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw, if (periph->range.max && rate > periph->range.max) return -EINVAL; - for (shift = 0; shift < PERIPHERAL_MAX_SHIFT; shift++) { + for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { if (parent_rate >> shift == rate) { periph->auto_div = false; periph->div = shift; diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c index 6ec79dbc0840..cbbe40377ad6 100644 --- a/drivers/clk/at91/clk-pll.c +++ b/drivers/clk/at91/clk-pll.c @@ -173,8 +173,7 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate, int i = 0; /* Check if parent_rate is a valid input rate */ - if (parent_rate < characteristics->input.min || - parent_rate > characteristics->input.max) + if (parent_rate < characteristics->input.min) return -ERANGE; /* @@ -187,6 +186,15 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate, if (!mindiv) mindiv = 1; + if (parent_rate > characteristics->input.max) { + tmpdiv = DIV_ROUND_UP(parent_rate, characteristics->input.max); + if (tmpdiv > PLL_DIV_MAX) + return -ERANGE; + + if (tmpdiv > mindiv) + mindiv = tmpdiv; + } + /* * Calculate the maximum divider which is limited by PLL register * layout (limited by the MUL or DIV field size). diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 69abb08cf146..eb8e5dc9076d 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -121,7 +121,7 @@ extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np, struct at91_pmc *pmc); #endif -#if defined(CONFIG_HAVE_AT91_SMD) +#if defined(CONFIG_HAVE_AT91_H32MX) extern void __init of_sama5d4_clk_h32mx_setup(struct device_node *np, struct at91_pmc *pmc); #endif diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index a50936a17376..563969942a1d 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -140,12 +140,47 @@ static struct clk_regmap pll14_vote = { }, }; +#define NSS_PLL_RATE(f, _l, _m, _n, i) \ + { \ + .freq = f, \ + .l = _l, \ + .m = _m, \ + .n = _n, \ + .ibits = i, \ + } + +static struct pll_freq_tbl pll18_freq_tbl[] = { + NSS_PLL_RATE(550000000, 44, 0, 1, 0x01495625), + NSS_PLL_RATE(733000000, 58, 16, 25, 0x014b5625), +}; + +static struct clk_pll pll18 = { + .l_reg = 0x31a4, + .m_reg = 0x31a8, + .n_reg = 0x31ac, + .config_reg = 0x31b4, + .mode_reg = 0x31a0, + .status_reg = 0x31b8, + .status_bit = 16, + .post_div_shift = 16, + .post_div_width = 1, + .freq_tbl = pll18_freq_tbl, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pll18", + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + enum { P_PXO, P_PLL8, P_PLL3, P_PLL0, P_CXO, + P_PLL14, + P_PLL18, }; static const struct parent_map gcc_pxo_pll8_map[] = { @@ -197,6 +232,22 @@ static const char *gcc_pxo_pll8_pll0_map[] = { "pll0_vote", }; +static const struct parent_map gcc_pxo_pll8_pll14_pll18_pll0_map[] = { + { P_PXO, 0 }, + { P_PLL8, 4 }, + { P_PLL0, 2 }, + { P_PLL14, 5 }, + { P_PLL18, 1 } +}; + +static const char *gcc_pxo_pll8_pll14_pll18_pll0[] = { + "pxo", + "pll8_vote", + "pll0_vote", + "pll14", + "pll18", +}; + static struct freq_tbl clk_tbl_gsbi_uart[] = { { 1843200, P_PLL8, 2, 6, 625 }, { 3686400, P_PLL8, 2, 12, 625 }, @@ -2202,6 +2253,472 @@ static struct clk_branch ebi2_aon_clk = { }, }; +static const struct freq_tbl clk_tbl_gmac[] = { + { 133000000, P_PLL0, 1, 50, 301 }, + { 266000000, P_PLL0, 1, 127, 382 }, + { } +}; + +static struct clk_dyn_rcg gmac_core1_src = { + .ns_reg[0] = 0x3cac, + .ns_reg[1] = 0x3cb0, + .md_reg[0] = 0x3ca4, + .md_reg[1] = 0x3ca8, + .bank_reg = 0x3ca0, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_gmac, + .clkr = { + .enable_reg = 0x3ca0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core1_src", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + }, + }, +}; + +static struct clk_branch gmac_core1_clk = { + .halt_reg = 0x3c20, + .halt_bit = 4, + .hwcg_reg = 0x3cb4, + .hwcg_bit = 6, + .clkr = { + .enable_reg = 0x3cb4, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core1_clk", + .parent_names = (const char *[]){ + "gmac_core1_src", + }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_dyn_rcg gmac_core2_src = { + .ns_reg[0] = 0x3ccc, + .ns_reg[1] = 0x3cd0, + .md_reg[0] = 0x3cc4, + .md_reg[1] = 0x3cc8, + .bank_reg = 0x3ca0, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_gmac, + .clkr = { + .enable_reg = 0x3cc0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core2_src", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + }, + }, +}; + +static struct clk_branch gmac_core2_clk = { + .halt_reg = 0x3c20, + .halt_bit = 5, + .hwcg_reg = 0x3cd4, + .hwcg_bit = 6, + .clkr = { + .enable_reg = 0x3cd4, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core2_clk", + .parent_names = (const char *[]){ + "gmac_core2_src", + }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_dyn_rcg gmac_core3_src = { + .ns_reg[0] = 0x3cec, + .ns_reg[1] = 0x3cf0, + .md_reg[0] = 0x3ce4, + .md_reg[1] = 0x3ce8, + .bank_reg = 0x3ce0, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_gmac, + .clkr = { + .enable_reg = 0x3ce0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core3_src", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + }, + }, +}; + +static struct clk_branch gmac_core3_clk = { + .halt_reg = 0x3c20, + .halt_bit = 6, + .hwcg_reg = 0x3cf4, + .hwcg_bit = 6, + .clkr = { + .enable_reg = 0x3cf4, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core3_clk", + .parent_names = (const char *[]){ + "gmac_core3_src", + }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_dyn_rcg gmac_core4_src = { + .ns_reg[0] = 0x3d0c, + .ns_reg[1] = 0x3d10, + .md_reg[0] = 0x3d04, + .md_reg[1] = 0x3d08, + .bank_reg = 0x3d00, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_gmac, + .clkr = { + .enable_reg = 0x3d00, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core4_src", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + }, + }, +}; + +static struct clk_branch gmac_core4_clk = { + .halt_reg = 0x3c20, + .halt_bit = 7, + .hwcg_reg = 0x3d14, + .hwcg_bit = 6, + .clkr = { + .enable_reg = 0x3d14, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core4_clk", + .parent_names = (const char *[]){ + "gmac_core4_src", + }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static const struct freq_tbl clk_tbl_nss_tcm[] = { + { 266000000, P_PLL0, 3, 0, 0 }, + { 400000000, P_PLL0, 2, 0, 0 }, + { } +}; + +static struct clk_dyn_rcg nss_tcm_src = { + .ns_reg[0] = 0x3dc4, + .ns_reg[1] = 0x3dc8, + .bank_reg = 0x3dc0, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 4, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 4, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_nss_tcm, + .clkr = { + .enable_reg = 0x3dc0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "nss_tcm_src", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + }, + }, +}; + +static struct clk_branch nss_tcm_clk = { + .halt_reg = 0x3c20, + .halt_bit = 14, + .clkr = { + .enable_reg = 0x3dd0, + .enable_mask = BIT(6) | BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "nss_tcm_clk", + .parent_names = (const char *[]){ + "nss_tcm_src", + }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static const struct freq_tbl clk_tbl_nss[] = { + { 110000000, P_PLL18, 1, 1, 5 }, + { 275000000, P_PLL18, 2, 0, 0 }, + { 550000000, P_PLL18, 1, 0, 0 }, + { 733000000, P_PLL18, 1, 0, 0 }, + { } +}; + +static struct clk_dyn_rcg ubi32_core1_src_clk = { + .ns_reg[0] = 0x3d2c, + .ns_reg[1] = 0x3d30, + .md_reg[0] = 0x3d24, + .md_reg[1] = 0x3d28, + .bank_reg = 0x3d20, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_nss, + .clkr = { + .enable_reg = 0x3d20, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "ubi32_core1_src_clk", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, + }, + }, +}; + +static struct clk_dyn_rcg ubi32_core2_src_clk = { + .ns_reg[0] = 0x3d4c, + .ns_reg[1] = 0x3d50, + .md_reg[0] = 0x3d44, + .md_reg[1] = 0x3d48, + .bank_reg = 0x3d40, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_nss, + .clkr = { + .enable_reg = 0x3d40, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "ubi32_core2_src_clk", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, + }, + }, +}; + static struct clk_regmap *gcc_ipq806x_clks[] = { [PLL0] = &pll0.clkr, [PLL0_VOTE] = &pll0_vote, @@ -2211,6 +2728,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = { [PLL8_VOTE] = &pll8_vote, [PLL14] = &pll14.clkr, [PLL14_VOTE] = &pll14_vote, + [PLL18] = &pll18.clkr, [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr, [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr, [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr, @@ -2307,6 +2825,18 @@ static struct clk_regmap *gcc_ipq806x_clks[] = { [USB_FS1_SYSTEM_CLK] = &usb_fs1_sys_clk.clkr, [EBI2_CLK] = &ebi2_clk.clkr, [EBI2_AON_CLK] = &ebi2_aon_clk.clkr, + [GMAC_CORE1_CLK_SRC] = &gmac_core1_src.clkr, + [GMAC_CORE1_CLK] = &gmac_core1_clk.clkr, + [GMAC_CORE2_CLK_SRC] = &gmac_core2_src.clkr, + [GMAC_CORE2_CLK] = &gmac_core2_clk.clkr, + [GMAC_CORE3_CLK_SRC] = &gmac_core3_src.clkr, + [GMAC_CORE3_CLK] = &gmac_core3_clk.clkr, + [GMAC_CORE4_CLK_SRC] = &gmac_core4_src.clkr, + [GMAC_CORE4_CLK] = &gmac_core4_clk.clkr, + [UBI32_CORE1_CLK_SRC] = &ubi32_core1_src_clk.clkr, + [UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr, + [NSSTCM_CLK_SRC] = &nss_tcm_src.clkr, + [NSSTCM_CLK] = &nss_tcm_clk.clkr, }; static const struct qcom_reset_map gcc_ipq806x_resets[] = { @@ -2425,6 +2955,48 @@ static const struct qcom_reset_map gcc_ipq806x_resets[] = { [USB30_1_PHY_RESET] = { 0x3b58, 0 }, [NSSFB0_RESET] = { 0x3b60, 6 }, [NSSFB1_RESET] = { 0x3b60, 7 }, + [UBI32_CORE1_CLKRST_CLAMP_RESET] = { 0x3d3c, 3}, + [UBI32_CORE1_CLAMP_RESET] = { 0x3d3c, 2 }, + [UBI32_CORE1_AHB_RESET] = { 0x3d3c, 1 }, + [UBI32_CORE1_AXI_RESET] = { 0x3d3c, 0 }, + [UBI32_CORE2_CLKRST_CLAMP_RESET] = { 0x3d5c, 3 }, + [UBI32_CORE2_CLAMP_RESET] = { 0x3d5c, 2 }, + [UBI32_CORE2_AHB_RESET] = { 0x3d5c, 1 }, + [UBI32_CORE2_AXI_RESET] = { 0x3d5c, 0 }, + [GMAC_CORE1_RESET] = { 0x3cbc, 0 }, + [GMAC_CORE2_RESET] = { 0x3cdc, 0 }, + [GMAC_CORE3_RESET] = { 0x3cfc, 0 }, + [GMAC_CORE4_RESET] = { 0x3d1c, 0 }, + [GMAC_AHB_RESET] = { 0x3e24, 0 }, + [NSS_CH0_RST_RX_CLK_N_RESET] = { 0x3b60, 0 }, + [NSS_CH0_RST_TX_CLK_N_RESET] = { 0x3b60, 1 }, + [NSS_CH0_RST_RX_125M_N_RESET] = { 0x3b60, 2 }, + [NSS_CH0_HW_RST_RX_125M_N_RESET] = { 0x3b60, 3 }, + [NSS_CH0_RST_TX_125M_N_RESET] = { 0x3b60, 4 }, + [NSS_CH1_RST_RX_CLK_N_RESET] = { 0x3b60, 5 }, + [NSS_CH1_RST_TX_CLK_N_RESET] = { 0x3b60, 6 }, + [NSS_CH1_RST_RX_125M_N_RESET] = { 0x3b60, 7 }, + [NSS_CH1_HW_RST_RX_125M_N_RESET] = { 0x3b60, 8 }, + [NSS_CH1_RST_TX_125M_N_RESET] = { 0x3b60, 9 }, + [NSS_CH2_RST_RX_CLK_N_RESET] = { 0x3b60, 10 }, + [NSS_CH2_RST_TX_CLK_N_RESET] = { 0x3b60, 11 }, + [NSS_CH2_RST_RX_125M_N_RESET] = { 0x3b60, 12 }, + [NSS_CH2_HW_RST_RX_125M_N_RESET] = { 0x3b60, 13 }, + [NSS_CH2_RST_TX_125M_N_RESET] = { 0x3b60, 14 }, + [NSS_CH3_RST_RX_CLK_N_RESET] = { 0x3b60, 15 }, + [NSS_CH3_RST_TX_CLK_N_RESET] = { 0x3b60, 16 }, + [NSS_CH3_RST_RX_125M_N_RESET] = { 0x3b60, 17 }, + [NSS_CH3_HW_RST_RX_125M_N_RESET] = { 0x3b60, 18 }, + [NSS_CH3_RST_TX_125M_N_RESET] = { 0x3b60, 19 }, + [NSS_RST_RX_250M_125M_N_RESET] = { 0x3b60, 20 }, + [NSS_RST_TX_250M_125M_N_RESET] = { 0x3b60, 21 }, + [NSS_QSGMII_TXPI_RST_N_RESET] = { 0x3b60, 22 }, + [NSS_QSGMII_CDR_RST_N_RESET] = { 0x3b60, 23 }, + [NSS_SGMII2_CDR_RST_N_RESET] = { 0x3b60, 24 }, + [NSS_SGMII3_CDR_RST_N_RESET] = { 0x3b60, 25 }, + [NSS_CAL_PRBS_RST_N_RESET] = { 0x3b60, 26 }, + [NSS_LCKDT_RST_N_RESET] = { 0x3b60, 27 }, + [NSS_SRDS_N_RESET] = { 0x3b60, 28 }, }; static const struct regmap_config gcc_ipq806x_regmap_config = { @@ -2453,6 +3025,8 @@ static int gcc_ipq806x_probe(struct platform_device *pdev) { struct clk *clk; struct device *dev = &pdev->dev; + struct regmap *regmap; + int ret; /* Temporary until RPM clocks supported */ clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 25000000); @@ -2463,7 +3037,25 @@ static int gcc_ipq806x_probe(struct platform_device *pdev) if (IS_ERR(clk)) return PTR_ERR(clk); - return qcom_cc_probe(pdev, &gcc_ipq806x_desc); + ret = qcom_cc_probe(pdev, &gcc_ipq806x_desc); + if (ret) + return ret; + + regmap = dev_get_regmap(dev, NULL); + if (!regmap) + return -ENODEV; + + /* Setup PLL18 static bits */ + regmap_update_bits(regmap, 0x31a4, 0xffffffc0, 0x40000400); + regmap_write(regmap, 0x31b0, 0x3080); + + /* Set GMAC footswitch sleep/wakeup values */ + regmap_write(regmap, 0x3cb8, 8); + regmap_write(regmap, 0x3cd8, 8); + regmap_write(regmap, 0x3cf8, 8); + regmap_write(regmap, 0x3d18, 8); + + return 0; } static int gcc_ipq806x_remove(struct platform_device *pdev) diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index ba0532efd3ae..332c8ef8dae2 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1544,6 +1544,8 @@ static int ahash_init(struct ahash_request *req) state->current_buf = 0; state->buf_dma = 0; + state->buflen_0 = 0; + state->buflen_1 = 0; return 0; } diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 26a544b505f1..5095337205b8 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -56,7 +56,7 @@ /* Buffer, its dma address and lock */ struct buf_data { - u8 buf[RN_BUF_SIZE]; + u8 buf[RN_BUF_SIZE] ____cacheline_aligned; dma_addr_t addr; struct completion filled; u32 hw_desc[DESC_JOB_O_LEN]; diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 933e4b338459..7992164ea9ec 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -174,6 +174,8 @@ #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ #define AT_XDMAC_MAX_CHAN 0x20 +#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ +#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ #define AT_XDMAC_DMA_BUSWIDTHS\ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ @@ -192,20 +194,17 @@ struct at_xdmac_chan { struct dma_chan chan; void __iomem *ch_regs; u32 mask; /* Channel Mask */ - u32 cfg[2]; /* Channel Configuration Register */ - #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */ - #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */ + u32 cfg; /* Channel Configuration Register */ u8 perid; /* Peripheral ID */ u8 perif; /* Peripheral Interface */ u8 memif; /* Memory Interface */ - u32 per_src_addr; - u32 per_dst_addr; u32 save_cc; u32 save_cim; u32 save_cnda; u32 save_cndc; unsigned long status; struct tasklet_struct tasklet; + struct dma_slave_config sconfig; spinlock_t lock; @@ -415,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) struct at_xdmac_desc *desc = txd_to_at_desc(tx); struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); dma_cookie_t cookie; + unsigned long irqflags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); cookie = dma_cookie_assign(tx); dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", @@ -425,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) if (list_is_singular(&atchan->xfers_list)) at_xdmac_start_xfer(atchan, desc); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); return cookie; } @@ -494,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec, return chan; } +static int at_xdmac_compute_chan_conf(struct dma_chan *chan, + enum dma_transfer_direction direction) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + int csize, dwidth; + + if (direction == DMA_DEV_TO_MEM) { + atchan->cfg = + AT91_XDMAC_DT_PERID(atchan->perid) + | AT_XDMAC_CC_DAM_INCREMENTED_AM + | AT_XDMAC_CC_SAM_FIXED_AM + | AT_XDMAC_CC_DIF(atchan->memif) + | AT_XDMAC_CC_SIF(atchan->perif) + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED + | AT_XDMAC_CC_DSYNC_PER2MEM + | AT_XDMAC_CC_MBSIZE_SIXTEEN + | AT_XDMAC_CC_TYPE_PER_TRAN; + csize = ffs(atchan->sconfig.src_maxburst) - 1; + if (csize < 0) { + dev_err(chan2dev(chan), "invalid src maxburst value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); + dwidth = ffs(atchan->sconfig.src_addr_width) - 1; + if (dwidth < 0) { + dev_err(chan2dev(chan), "invalid src addr width value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); + } else if (direction == DMA_MEM_TO_DEV) { + atchan->cfg = + AT91_XDMAC_DT_PERID(atchan->perid) + | AT_XDMAC_CC_DAM_FIXED_AM + | AT_XDMAC_CC_SAM_INCREMENTED_AM + | AT_XDMAC_CC_DIF(atchan->perif) + | AT_XDMAC_CC_SIF(atchan->memif) + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED + | AT_XDMAC_CC_DSYNC_MEM2PER + | AT_XDMAC_CC_MBSIZE_SIXTEEN + | AT_XDMAC_CC_TYPE_PER_TRAN; + csize = ffs(atchan->sconfig.dst_maxburst) - 1; + if (csize < 0) { + dev_err(chan2dev(chan), "invalid src maxburst value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); + dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; + if (dwidth < 0) { + dev_err(chan2dev(chan), "invalid dst addr width value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); + } + + dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); + + return 0; +} + +/* + * Only check that maxburst and addr width values are supported by the + * the controller but not that the configuration is good to perform the + * transfer since we don't know the direction at this stage. + */ +static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig) +{ + if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) + || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) + return -EINVAL; + + if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) + || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) + return -EINVAL; + + return 0; +} + static int at_xdmac_set_slave_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); - u8 dwidth; - int csize; - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = - AT91_XDMAC_DT_PERID(atchan->perid) - | AT_XDMAC_CC_DAM_INCREMENTED_AM - | AT_XDMAC_CC_SAM_FIXED_AM - | AT_XDMAC_CC_DIF(atchan->memif) - | AT_XDMAC_CC_SIF(atchan->perif) - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED - | AT_XDMAC_CC_DSYNC_PER2MEM - | AT_XDMAC_CC_MBSIZE_SIXTEEN - | AT_XDMAC_CC_TYPE_PER_TRAN; - csize = at_xdmac_csize(sconfig->src_maxburst); - if (csize < 0) { - dev_err(chan2dev(chan), "invalid src maxburst value\n"); + if (at_xdmac_check_slave_config(sconfig)) { + dev_err(chan2dev(chan), "invalid slave configuration\n"); return -EINVAL; } - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize); - dwidth = ffs(sconfig->src_addr_width) - 1; - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); - - - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] = - AT91_XDMAC_DT_PERID(atchan->perid) - | AT_XDMAC_CC_DAM_FIXED_AM - | AT_XDMAC_CC_SAM_INCREMENTED_AM - | AT_XDMAC_CC_DIF(atchan->perif) - | AT_XDMAC_CC_SIF(atchan->memif) - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED - | AT_XDMAC_CC_DSYNC_MEM2PER - | AT_XDMAC_CC_MBSIZE_SIXTEEN - | AT_XDMAC_CC_TYPE_PER_TRAN; - csize = at_xdmac_csize(sconfig->dst_maxburst); - if (csize < 0) { - dev_err(chan2dev(chan), "invalid src maxburst value\n"); - return -EINVAL; - } - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize); - dwidth = ffs(sconfig->dst_addr_width) - 1; - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); - - /* Src and dst addr are needed to configure the link list descriptor. */ - atchan->per_src_addr = sconfig->src_addr; - atchan->per_dst_addr = sconfig->dst_addr; - dev_dbg(chan2dev(chan), - "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n", - __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG], - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG], - atchan->per_src_addr, atchan->per_dst_addr); + memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); return 0; } @@ -563,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct scatterlist *sg; int i; unsigned int xfer_size = 0; + unsigned long irqflags; + struct dma_async_tx_descriptor *ret = NULL; if (!sgl) return NULL; @@ -578,7 +613,10 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, flags); /* Protect dma_sconfig field that can be modified by set_slave_conf. */ - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); + + if (at_xdmac_compute_chan_conf(chan, direction)) + goto spin_unlock; /* Prepare descriptors. */ for_each_sg(sgl, sg, sg_len, i) { @@ -589,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); if (unlikely(!len)) { dev_err(chan2dev(chan), "sg data length is zero\n"); - spin_unlock_bh(&atchan->lock); - return NULL; + goto spin_unlock; } dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", __func__, i, len, mem); @@ -600,20 +637,18 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) list_splice_init(&first->descs_list, &atchan->free_descs_list); - spin_unlock_bh(&atchan->lock); - return NULL; + goto spin_unlock; } /* Linked list descriptor setup. */ if (direction == DMA_DEV_TO_MEM) { - desc->lld.mbr_sa = atchan->per_src_addr; + desc->lld.mbr_sa = atchan->sconfig.src_addr; desc->lld.mbr_da = mem; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; } else { desc->lld.mbr_sa = mem; - desc->lld.mbr_da = atchan->per_dst_addr; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; + desc->lld.mbr_da = atchan->sconfig.dst_addr; } + desc->lld.mbr_cfg = atchan->cfg; dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) @@ -645,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, xfer_size += len; } - spin_unlock_bh(&atchan->lock); first->tx_dma_desc.flags = flags; first->xfer_size = xfer_size; first->direction = direction; + ret = &first->tx_dma_desc; - return &first->tx_dma_desc; +spin_unlock: + spin_unlock_irqrestore(&atchan->lock, irqflags); + return ret; } static struct dma_async_tx_descriptor * @@ -664,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, struct at_xdmac_desc *first = NULL, *prev = NULL; unsigned int periods = buf_len / period_len; int i; + unsigned long irqflags; dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", __func__, &buf_addr, buf_len, period_len, @@ -679,32 +717,34 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, return NULL; } + if (at_xdmac_compute_chan_conf(chan, direction)) + return NULL; + for (i = 0; i < periods; i++) { struct at_xdmac_desc *desc = NULL; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); desc = at_xdmac_get_desc(atchan); if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) list_splice_init(&first->descs_list, &atchan->free_descs_list); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); return NULL; } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); dev_dbg(chan2dev(chan), "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", __func__, desc, &desc->tx_dma_desc.phys); if (direction == DMA_DEV_TO_MEM) { - desc->lld.mbr_sa = atchan->per_src_addr; + desc->lld.mbr_sa = atchan->sconfig.src_addr; desc->lld.mbr_da = buf_addr + i * period_len; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; } else { desc->lld.mbr_sa = buf_addr + i * period_len; - desc->lld.mbr_da = atchan->per_dst_addr; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; + desc->lld.mbr_da = atchan->sconfig.dst_addr; } + desc->lld.mbr_cfg = atchan->cfg; desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 | AT_XDMAC_MBR_UBC_NDEN | AT_XDMAC_MBR_UBC_NSEN @@ -766,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | AT_XDMAC_CC_SIF(0) | AT_XDMAC_CC_MBSIZE_SIXTEEN | AT_XDMAC_CC_TYPE_MEM_TRAN; + unsigned long irqflags; dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", __func__, &src, &dest, len, flags); @@ -798,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); desc = at_xdmac_get_desc(atchan); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) @@ -886,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, int residue; u32 cur_nda, mask, value; u8 dwidth = 0; + unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) @@ -894,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, if (!txstate) return ret; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); @@ -904,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, */ if (!desc->active_xfer) { dma_set_residue(txstate, desc->xfer_size); - spin_unlock_bh(&atchan->lock); - return ret; + goto spin_unlock; } residue = desc->xfer_size; @@ -936,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, } residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; - spin_unlock_bh(&atchan->lock); - dma_set_residue(txstate, residue); dev_dbg(chan2dev(chan), "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); +spin_unlock: + spin_unlock_irqrestore(&atchan->lock, flags); return ret; } @@ -964,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) { struct at_xdmac_desc *desc; + unsigned long flags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); /* * If channel is enabled, do nothing, advance_work will be triggered @@ -980,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) at_xdmac_start_xfer(atchan, desc); } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) @@ -1116,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan, { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); int ret; + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); ret = at_xdmac_set_slave_config(chan, config); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return ret; } @@ -1130,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) return 0; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) cpu_relax(); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1150,18 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); if (!at_xdmac_chan_is_paused(atchan)) { - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1171,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) struct at_xdmac_desc *desc, *_desc; struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) cpu_relax(); @@ -1184,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) at_xdmac_remove_xfer(atchan, desc); clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1194,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_desc *desc; int i; + unsigned long flags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); if (at_xdmac_chan_is_enabled(atchan)) { dev_err(chan2dev(chan), @@ -1226,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); spin_unlock: - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return i; } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2890d744bb1b..3ddfd1f6c23c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -487,7 +487,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->directions = device->directions; caps->residue_granularity = device->residue_granularity; - caps->cmd_pause = !!device->device_pause; + /* + * Some devices implement only pause (e.g. to get residuum) but no + * resume. However cmd_pause is advertised as pause AND resume. + */ + caps->cmd_pause = !!(device->device_pause && device->device_resume); caps->cmd_terminate = !!device->device_terminate_all; return 0; diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index 9b84def7a353..f42f71e37e73 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c @@ -384,7 +384,10 @@ static int hsu_dma_terminate_all(struct dma_chan *chan) spin_lock_irqsave(&hsuc->vchan.lock, flags); hsu_dma_stop_channel(hsuc); - hsuc->desc = NULL; + if (hsuc->desc) { + hsu_dma_desc_free(&hsuc->desc->vdesc); + hsuc->desc = NULL; + } vchan_get_all_descriptors(&hsuc->vchan, &head); spin_unlock_irqrestore(&hsuc->vchan.lock, flags); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a7d9d3029b14..340f9e607cd8 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan) struct pl330_dmac *pl330 = pch->dmac; LIST_HEAD(list); + pm_runtime_get_sync(pl330->ddma.dev); spin_lock_irqsave(&pch->lock, flags); spin_lock(&pl330->lock); _stop(pch->thread); @@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan) list_splice_tail_init(&pch->work_list, &pl330->desc_pool); list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); + pm_runtime_mark_last_busy(pl330->ddma.dev); + pm_runtime_put_autosuspend(pl330->ddma.dev); return 0; } diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 071c2c969eec..72791232e46b 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -186,8 +186,20 @@ struct ibft_kobject { static struct iscsi_boot_kset *boot_kset; +/* fully null address */ static const char nulls[16]; +/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */ +static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00 }; + +static int address_not_null(u8 *ip) +{ + return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16)); +} + /* * Helper functions to parse data properly. */ @@ -445,7 +457,7 @@ static umode_t ibft_check_nic_for(void *data, int type) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_IP_ADDR: - if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr))) + if (address_not_null(nic->ip_addr)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_SUBNET_MASK: @@ -456,21 +468,19 @@ static umode_t ibft_check_nic_for(void *data, int type) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_GATEWAY: - if (memcmp(nic->gateway, nulls, sizeof(nic->gateway))) + if (address_not_null(nic->gateway)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_PRIMARY_DNS: - if (memcmp(nic->primary_dns, nulls, - sizeof(nic->primary_dns))) + if (address_not_null(nic->primary_dns)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_SECONDARY_DNS: - if (memcmp(nic->secondary_dns, nulls, - sizeof(nic->secondary_dns))) + if (address_not_null(nic->secondary_dns)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_DHCP: - if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp))) + if (address_not_null(nic->dhcp)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_VLAN: @@ -536,23 +546,19 @@ static umode_t __init ibft_check_initiator_for(void *data, int type) rc = S_IRUGO; break; case ISCSI_BOOT_INI_ISNS_SERVER: - if (memcmp(init->isns_server, nulls, - sizeof(init->isns_server))) + if (address_not_null(init->isns_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_SLP_SERVER: - if (memcmp(init->slp_server, nulls, - sizeof(init->slp_server))) + if (address_not_null(init->slp_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_PRI_RADIUS_SERVER: - if (memcmp(init->pri_radius_server, nulls, - sizeof(init->pri_radius_server))) + if (address_not_null(init->pri_radius_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_SEC_RADIUS_SERVER: - if (memcmp(init->sec_radius_server, nulls, - sizeof(init->sec_radius_server))) + if (address_not_null(init->sec_radius_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_INITIATOR_NAME: diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c index 6b8115f34208..83f281dda1e0 100644 --- a/drivers/gpio/gpio-kempld.c +++ b/drivers/gpio/gpio-kempld.c @@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset) = container_of(chip, struct kempld_gpio_data, chip); struct kempld_device_data *pld = gpio->pld; - return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset); + return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset); } static int kempld_gpio_pincount(struct kempld_device_data *pld) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 59eaa23767d8..6bc612b8a49f 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -53,6 +53,11 @@ static DEFINE_MUTEX(gpio_lookup_lock); static LIST_HEAD(gpio_lookup_list); LIST_HEAD(gpio_chips); + +static void gpiochip_free_hogs(struct gpio_chip *chip); +static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); + + static inline void desc_set_label(struct gpio_desc *d, const char *label) { d->label = label; @@ -297,6 +302,7 @@ int gpiochip_add(struct gpio_chip *chip) err_remove_chip: acpi_gpiochip_remove(chip); + gpiochip_free_hogs(chip); of_gpiochip_remove(chip); spin_lock_irqsave(&gpio_lock, flags); list_del(&chip->list); @@ -313,10 +319,6 @@ int gpiochip_add(struct gpio_chip *chip) } EXPORT_SYMBOL_GPL(gpiochip_add); -/* Forward-declaration */ -static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); -static void gpiochip_free_hogs(struct gpio_chip *chip); - /** * gpiochip_remove() - unregister a gpio_chip * @chip: the chip to unregister diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e469c4b2e8cc..c25728bc388a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.cpu_core_id_base); sysfs_show_32bit_prop(buffer, "simd_id_base", dev->node_props.simd_id_base); - sysfs_show_32bit_prop(buffer, "capability", - dev->node_props.capability); sysfs_show_32bit_prop(buffer, "max_waves_per_simd", dev->node_props.max_waves_per_simd); sysfs_show_32bit_prop(buffer, "lds_size_in_kb", @@ -736,6 +734,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->gpu->kfd2kgd->get_fw_version( dev->gpu->kgd, KGD_ENGINE_MEC1)); + sysfs_show_32bit_prop(buffer, "capability", + dev->node_props.capability); } return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 40c1db9ad7c3..2f0ed11024eb 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -465,6 +465,9 @@ int drm_plane_helper_commit(struct drm_plane *plane, if (!crtc[i]) continue; + if (crtc[i]->cursor == plane) + continue; + /* There's no other way to figure out whether the crtc is running. */ ret = drm_crtc_vblank_get(crtc[i]); if (ret == 0) { diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index ffc305fc2076..eb7e61078a5b 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device, mutex_unlock(&dev->mode_config.mutex); - return ret; + return ret ? ret : count; } static ssize_t status_show(struct device *device, diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 007c7d7d8295..dc55c51964ab 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1667,12 +1667,15 @@ static int i915_sr_status(struct seq_file *m, void *unused) if (HAS_PCH_SPLIT(dev)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; - else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) + else if (IS_CRESTLINE(dev) || IS_G4X(dev) || + IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; else if (IS_I915GM(dev)) sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; else if (IS_PINEVIEW(dev)) sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; + else if (IS_VALLEYVIEW(dev)) + sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; intel_runtime_pm_put(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 53394f998a1f..2d0995e7afc3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3003,8 +3003,8 @@ int i915_vma_unbind(struct i915_vma *vma) } else if (vma->ggtt_view.pages) { sg_free_table(vma->ggtt_view.pages); kfree(vma->ggtt_view.pages); - vma->ggtt_view.pages = NULL; } + vma->ggtt_view.pages = NULL; } drm_mm_remove_node(&vma->node); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f27346e907b1..d714a4b5711e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -880,10 +880,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, DP_AUX_CH_CTL_RECEIVE_ERROR)) continue; if (status & DP_AUX_CH_CTL_DONE) - break; + goto done; } - if (status & DP_AUX_CH_CTL_DONE) - break; } if ((status & DP_AUX_CH_CTL_DONE) == 0) { @@ -892,6 +890,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, goto out; } +done: /* Check for timeout or receive error. * Timeouts occur when the sink is not connected */ diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 56e437e31580..ae628001fd97 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct intel_gmbus, adapter); struct drm_i915_private *dev_priv = bus->dev_priv; - int i, reg_offset; + int i = 0, inc, try = 0, reg_offset; int ret = 0; intel_aux_display_runtime_get(dev_priv); @@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter, reg_offset = dev_priv->gpio_mmio_base; +retry: I915_WRITE(GMBUS0 + reg_offset, bus->reg0); - for (i = 0; i < num; i++) { + for (; i < num; i += inc) { + inc = 1; if (gmbus_is_index_read(msgs, i, num)) { ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); - i += 1; /* set i to the index of the read xfer */ + inc = 2; /* an index read is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); } else { @@ -525,6 +527,18 @@ gmbus_xfer(struct i2c_adapter *adapter, adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); + /* + * Passive adapters sometimes NAK the first probe. Retry the first + * message once on -ENXIO for GMBUS transfers; the bit banging algorithm + * has retries internally. See also the retry loop in + * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. + */ + if (ret == -ENXIO && i == 0 && try++ == 0) { + DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n", + adapter->name); + goto retry; + } + goto out; timeout: diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 09df74b8e917..424e62197787 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1134,6 +1134,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); + if (ring->status_page.obj) { + I915_WRITE(RING_HWS_PGA(ring->mmio_base), + (u32)ring->status_page.gfx_addr); + POSTING_READ(RING_HWS_PGA(ring->mmio_base)); + } + I915_WRITE(RING_MODE_GEN7(ring), _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 441e2502b889..005b5e04de4d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring) GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4); - if (INTEL_REVID(dev) == SKL_REVID_C0 || - INTEL_REVID(dev) == SKL_REVID_D0) - /* WaBarrierPerformanceFixDisable:skl */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, - HDC_FENCE_DEST_SLM_DISABLE | - HDC_BARRIER_PERFORMANCE_DISABLE); - return 0; } @@ -1024,6 +1017,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); + if (INTEL_REVID(dev) == SKL_REVID_C0 || + INTEL_REVID(dev) == SKL_REVID_D0) + /* WaBarrierPerformanceFixDisable:skl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FENCE_DEST_SLM_DISABLE | + HDC_BARRIER_PERFORMANCE_DISABLE); + return skl_tune_iz_hashing(ring); } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e87d2f418de4..987b81f31b0e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) DRM_DEBUG_KMS("initialising analog device %d\n", device); - intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL); + intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) return false; diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 6e84df9369a6..ad4b9010dfb0 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1526,6 +1526,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector, return MODE_BANDWIDTH; } + if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 || + (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) { + return MODE_H_ILLEGAL; + } + if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 || mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 || diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 0b5af0fe8659..64f8b2f687d2 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -14,7 +14,7 @@ #define FERMI_TWOD_A 0x0000902d -#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x0000903d +#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039 #define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 #define KEPLER_INLINE_TO_MEMORY_B 0x0000a140 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c index 2f5eadd12a9b..fdb1dcf16a59 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c @@ -329,7 +329,6 @@ gm204_gr_init(struct nvkm_object *object) nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); for (gpc = 0; gpc < priv->gpc_nr; gpc++) { - printk(KERN_ERR "ppc %d %d\n", gpc, priv->ppc_nr[gpc]); for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000); nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c index e8778c67578e..c61102f70805 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c @@ -90,12 +90,14 @@ gf100_devinit_disable(struct nvkm_devinit *devinit) return disable; } -static int +int gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { + struct nvkm_devinit_impl *impl = (void *)oclass; struct nv50_devinit_priv *priv; + u64 disable; int ret; ret = nvkm_devinit_create(parent, engine, oclass, &priv); @@ -103,7 +105,8 @@ gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, if (ret) return ret; - if (nv_rd32(priv, 0x022500) & 0x00000001) + disable = impl->disable(&priv->base); + if (disable & (1ULL << NVDEV_ENGINE_DISP)) priv->base.post = true; return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c index b345a53e881d..87ca0ece37b4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c @@ -48,7 +48,7 @@ struct nvkm_oclass * gm107_devinit_oclass = &(struct nvkm_devinit_impl) { .base.handle = NV_SUBDEV(DEVINIT, 0x07), .base.ofuncs = &(struct nvkm_ofuncs) { - .ctor = nv50_devinit_ctor, + .ctor = gf100_devinit_ctor, .dtor = _nvkm_devinit_dtor, .init = nv50_devinit_init, .fini = _nvkm_devinit_fini, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c index 535172c5f1ad..1076fcf0d716 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c @@ -161,7 +161,7 @@ struct nvkm_oclass * gm204_devinit_oclass = &(struct nvkm_devinit_impl) { .base.handle = NV_SUBDEV(DEVINIT, 0x07), .base.ofuncs = &(struct nvkm_ofuncs) { - .ctor = nv50_devinit_ctor, + .ctor = gf100_devinit_ctor, .dtor = _nvkm_devinit_dtor, .init = nv50_devinit_init, .fini = _nvkm_devinit_fini, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h index b882b65ff3cd..9243521c80ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h @@ -15,6 +15,9 @@ int nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32); int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32); +int gf100_devinit_ctor(struct nvkm_object *, struct nvkm_object *, + struct nvkm_oclass *, void *, u32, + struct nvkm_object **); int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32); u64 gm107_devinit_disable(struct nvkm_devinit *); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 42b2ea3fdcf3..dac78ad24b31 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, else radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; - /* if there is no audio, set MINM_OVER_MAXP */ - if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; if (rdev->family < CHIP_RV770) radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; /* use frac fb div on APUs */ diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index a0c35bbc8546..ba50f3c1c2e0 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -5822,7 +5822,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) L2_CACHE_BIGK_FRAGMENT_SIZE(4)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c index f04205170b8a..cfa3a84a2af0 100644 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c @@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; - WREG32(HDMI0_ACR_PACKET_CONTROL + offset, + WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset, HDMI0_ACR_SOURCE | /* select SW CTS value */ HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 05e6d6ef5963..f848acfd3fc8 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2485,7 +2485,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 0926739c9fa7..9953356fe263 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) if (enable) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) { WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, HDMI_AVI_INFO_SEND | /* enable AVI info frames */ HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ @@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) if (!dig || !dig->afmt) return; - if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (enable && connector && + drm_detect_monitor_audio(radeon_connector_edid(connector))) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index aba2f428c0a8..64d3a771920d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1282,7 +1282,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) L2_CACHE_BIGK_FRAGMENT_SIZE(6)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 25b4ac967742..8f6d862a1882 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1112,7 +1112,7 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index dcb779647c57..25191f126f3b 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -460,9 +460,6 @@ void radeon_audio_detect(struct drm_connector *connector, if (!connector || !connector->encoder) return; - if (!radeon_encoder_is_digital(connector->encoder)) - return; - rdev = connector->encoder->dev->dev_private; if (!radeon_audio_chipset_supported(rdev)) @@ -471,26 +468,26 @@ void radeon_audio_detect(struct drm_connector *connector, radeon_encoder = to_radeon_encoder(connector->encoder); dig = radeon_encoder->enc_priv; - if (!dig->afmt) - return; - if (status == connector_status_connected) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector *radeon_connector; + int sink_type; + + if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { + radeon_encoder->audio = NULL; + return; + } + + radeon_connector = to_radeon_connector(connector); + sink_type = radeon_dp_getsinktype(radeon_connector); if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && - radeon_dp_getsinktype(radeon_connector) == - CONNECTOR_OBJECT_ID_DISPLAYPORT) + sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) radeon_encoder->audio = rdev->audio.dp_funcs; else radeon_encoder->audio = rdev->audio.hdmi_funcs; dig->afmt->pin = radeon_audio_get_pin(connector->encoder); - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); - } else { - radeon_audio_enable(rdev, dig->afmt->pin, 0); - dig->afmt->pin = NULL; - } + radeon_audio_enable(rdev, dig->afmt->pin, 0xf); } else { radeon_audio_enable(rdev, dig->afmt->pin, 0); dig->afmt->pin = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index d17d251dbd4f..cebb65e07e1d 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1379,10 +1379,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) { - radeon_connector_get_edid(connector); + if (radeon_audio != 0) radeon_audio_detect(connector, ret); - } exit: pm_runtime_mark_last_busy(connector->dev->dev); @@ -1719,10 +1717,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) { - radeon_connector_get_edid(connector); + if (radeon_audio != 0) radeon_audio_detect(connector, ret); - } out: pm_runtime_mark_last_busy(connector->dev->dev); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b7ca4c514621..a7fdfa4f0857 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev, if (r) DRM_ERROR("ib ring test failed (%d).\n", r); + /* + * Turks/Thames GPU will freeze whole laptop if DPM is not restarted + * after the CP ring have chew one packet at least. Hence here we stop + * and restart DPM after the radeon_ib_ring_tests(). + */ + if (rdev->pm.dpm_enabled && + (rdev->pm.pm_method == PM_METHOD_DPM) && + (rdev->family == CHIP_TURKS) && + (rdev->flags & RADEON_IS_MOBILITY)) { + mutex_lock(&rdev->pm.mutex); + radeon_dpm_disable(rdev); + radeon_dpm_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + } + if ((radeon_testing & 1)) { if (rdev->accel_working) radeon_test_moves(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 2b98ed3e684d..257b10be5cda 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -663,12 +663,17 @@ int radeon_dp_mst_probe(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; int ret; u8 msg[1]; if (!radeon_mst) return 0; + if (!ASIC_IS_DCE5(rdev)) + return 0; + if (dig_connector->dpcd[DP_DPCD_REV] < 0x12) return 0; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 7b2a7335cc5d..b0acf50d9558 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -576,6 +576,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (radeon_get_allowed_info_register(rdev, *value, value)) return -EINVAL; break; + case RADEON_INFO_VA_UNMAP_WORKING: + *value = true; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index de42fc4a22b8..9c3377ca17b7 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, /* make sure object fit at this offset */ eoffset = soffset + size; if (soffset >= eoffset) { - return -EINVAL; + r = -EINVAL; + goto error_unreserve; } last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; if (last_pfn > rdev->vm_manager.max_pfn) { dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", last_pfn, rdev->vm_manager.max_pfn); - return -EINVAL; + r = -EINVAL; + goto error_unreserve; } } else { @@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, soffset, tmp->bo, tmp->it.start, tmp->it.last); mutex_unlock(&vm->mutex); - return -EINVAL; + r = -EINVAL; + goto error_unreserve; } } @@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); if (!tmp) { mutex_unlock(&vm->mutex); - return -ENOMEM; + r = -ENOMEM; + goto error_unreserve; } tmp->it.start = bo_va->it.start; tmp->it.last = bo_va->it.last; @@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, r = radeon_vm_clear_bo(rdev, pt); if (r) { radeon_bo_unref(&pt); - radeon_bo_reserve(bo_va->bo, false); return r; } @@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, mutex_unlock(&vm->mutex); return 0; + +error_unreserve: + radeon_bo_unreserve(bo_va->bo); + return r; } /** diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index c54d6313a46d..01ee96acb398 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -921,7 +921,7 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 5326f753e107..4c679b802bc8 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4303,7 +4303,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) L2_CACHE_BIGK_FRAGMENT_SIZE(4)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile index 1055cb79096c..3f4c7b842028 100644 --- a/drivers/gpu/drm/vgem/Makefile +++ b/drivers/gpu/drm/vgem/Makefile @@ -1,4 +1,4 @@ ccflags-y := -Iinclude/drm -vgem-y := vgem_drv.o vgem_dma_buf.o +vgem-y := vgem_drv.o obj-$(CONFIG_DRM_VGEM) += vgem.o diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c deleted file mode 100644 index 0254438ad1a6..000000000000 --- a/drivers/gpu/drm/vgem/vgem_dma_buf.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright © 2012 Intel Corporation - * Copyright © 2014 The Chromium OS Authors - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Ben Widawsky - * - */ - -#include -#include "vgem_drv.h" - -struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj) -{ - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); - BUG_ON(obj->pages == NULL); - - return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE); -} - -int vgem_gem_prime_pin(struct drm_gem_object *gobj) -{ - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); - return vgem_gem_get_pages(obj); -} - -void vgem_gem_prime_unpin(struct drm_gem_object *gobj) -{ - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); - vgem_gem_put_pages(obj); -} - -void *vgem_gem_prime_vmap(struct drm_gem_object *gobj) -{ - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); - BUG_ON(obj->pages == NULL); - - return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); -} - -void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - vunmap(vaddr); -} - -struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct drm_vgem_gem_object *obj = NULL; - int ret; - - obj = kzalloc(sizeof(*obj), GFP_KERNEL); - if (obj == NULL) { - ret = -ENOMEM; - goto fail; - } - - ret = drm_gem_object_init(dev, &obj->base, dma_buf->size); - if (ret) { - ret = -ENOMEM; - goto fail_free; - } - - get_dma_buf(dma_buf); - - obj->base.dma_buf = dma_buf; - obj->use_dma_buf = true; - - return &obj->base; - -fail_free: - kfree(obj); -fail: - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index cb3b43525b2d..7a207ca547be 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -302,22 +302,13 @@ static const struct file_operations vgem_driver_fops = { }; static struct drm_driver vgem_driver = { - .driver_features = DRIVER_GEM | DRIVER_PRIME, + .driver_features = DRIVER_GEM, .gem_free_object = vgem_gem_free_object, .gem_vm_ops = &vgem_gem_vm_ops, .ioctls = vgem_ioctls, .fops = &vgem_driver_fops, .dumb_create = vgem_gem_dumb_create, .dumb_map_offset = vgem_gem_dumb_map, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = drm_gem_prime_export, - .gem_prime_import = vgem_gem_prime_import, - .gem_prime_pin = vgem_gem_prime_pin, - .gem_prime_unpin = vgem_gem_prime_unpin, - .gem_prime_get_sg_table = vgem_gem_prime_get_sg_table, - .gem_prime_vmap = vgem_gem_prime_vmap, - .gem_prime_vunmap = vgem_gem_prime_vunmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h index 57ab4d8f41f9..e9f92f7ee275 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.h +++ b/drivers/gpu/drm/vgem/vgem_drv.h @@ -43,15 +43,4 @@ struct drm_vgem_gem_object { extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj); extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj); -/* vgem_dma_buf.c */ -extern struct sg_table *vgem_gem_prime_get_sg_table( - struct drm_gem_object *gobj); -extern int vgem_gem_prime_pin(struct drm_gem_object *gobj); -extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj); -extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj); -extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); -extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf); - - #endif diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c index f3830db02d46..37f01702d081 100644 --- a/drivers/hwmon/nct6683.c +++ b/drivers/hwmon/nct6683.c @@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg, (*t)->dev_attr.attr.name, tg->base + i); if ((*t)->s2) { a2 = &su->u.a2; + sysfs_attr_init(&a2->dev_attr.attr); a2->dev_attr.attr.name = su->name; a2->nr = (*t)->u.s.nr + i; a2->index = (*t)->u.s.index; @@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg, *attrs = &a2->dev_attr.attr; } else { a = &su->u.a1; + sysfs_attr_init(&a->dev_attr.attr); a->dev_attr.attr.name = su->name; a->index = (*t)->u.index + i; a->dev_attr.attr.mode = diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 4fcb48103299..bd1c99deac71 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -995,6 +995,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg, (*t)->dev_attr.attr.name, tg->base + i); if ((*t)->s2) { a2 = &su->u.a2; + sysfs_attr_init(&a2->dev_attr.attr); a2->dev_attr.attr.name = su->name; a2->nr = (*t)->u.s.nr + i; a2->index = (*t)->u.s.index; @@ -1005,6 +1006,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg, *attrs = &a2->dev_attr.attr; } else { a = &su->u.a1; + sysfs_attr_init(&a->dev_attr.attr); a->dev_attr.attr.name = su->name; a->index = (*t)->u.index + i; a->dev_attr.attr.mode = diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 112e4d45e4a0..68800115876b 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data * ntc_thermistor_parse_dt(struct platform_device *pdev) { struct iio_channel *chan; + enum iio_chan_type type; struct device_node *np = pdev->dev.of_node; struct ntc_thermistor_platform_data *pdata; + int ret; if (!np) return NULL; @@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev) if (IS_ERR(chan)) return ERR_CAST(chan); + ret = iio_get_channel_type(chan, &type); + if (ret < 0) + return ERR_PTR(ret); + + if (type != IIO_VOLTAGE) + return ERR_PTR(-EINVAL); + if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv)) return ERR_PTR(-ENODEV); if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm)) diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index 99664ebc738d..ccf4cffe0ee1 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -44,7 +44,7 @@ #include /* Addresses to scan */ -static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d, +static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 }; diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index 8fe78d08e01c..7c6966434ee7 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver); MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver"); MODULE_AUTHOR("Wei Yan "); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:i2c-hix5hd2"); +MODULE_ALIAS("platform:hix5hd2-i2c"); diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 958c8db4ec30..297e9c9ac943 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) return -ENOMEM; i2c->quirks = s3c24xx_get_device_quirks(pdev); + i2c->sysreg = ERR_PTR(-ENOENT); if (pdata) memcpy(i2c->pdata, pdata, sizeof(*pdata)); else diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c index 89d8aa1d2818..df12c57e6ce0 100644 --- a/drivers/iio/adc/twl6030-gpadc.c +++ b/drivers/iio/adc/twl6030-gpadc.c @@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = { module_platform_driver(twl6030_gpadc_driver); -MODULE_ALIAS("platform: " DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Balaji T K "); MODULE_AUTHOR("Graeme Gregory "); MODULE_AUTHOR("Oleksandr Kozaruk adis; - uint16_t *tx; + unsigned int burst_length; + u8 *tx; if (st->variant->flags & ADIS16400_NO_BURST) return adis_update_scan_mode(indio_dev, scan_mask); @@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, kfree(adis->xfer); kfree(adis->buffer); + /* All but the timestamp channel */ + burst_length = (indio_dev->num_channels - 1) * sizeof(u16); + if (st->variant->flags & ADIS16400_BURST_DIAG_STAT) + burst_length += sizeof(u16); + adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL); if (!adis->xfer) return -ENOMEM; - adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16), - GFP_KERNEL); + adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); if (!adis->buffer) return -ENOMEM; - tx = adis->buffer + indio_dev->scan_bytes; - + tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); tx[1] = 0; adis->xfer[0].tx_buf = tx; adis->xfer[0].bits_per_word = 8; adis->xfer[0].len = 2; - adis->xfer[1].tx_buf = tx; + adis->xfer[1].rx_buf = adis->buffer; adis->xfer[1].bits_per_word = 8; - adis->xfer[1].len = indio_dev->scan_bytes; + adis->xfer[1].len = burst_length; spi_message_init(&adis->msg); spi_message_add_tail(&adis->xfer[0], &adis->msg); @@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p) struct adis16400_state *st = iio_priv(indio_dev); struct adis *adis = &st->adis; u32 old_speed_hz = st->adis.spi->max_speed_hz; + void *buffer; int ret; if (!adis->buffer) @@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p) spi_setup(st->adis.spi); } - iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, + if (st->variant->flags & ADIS16400_BURST_DIAG_STAT) + buffer = adis->buffer + sizeof(u16); + else + buffer = adis->buffer; + + iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index fa795dcd5f75..2fd68f2219a7 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c @@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, *val = st->variant->temp_scale_nano / 1000000; *val2 = (st->variant->temp_scale_nano % 1000000); return IIO_VAL_INT_PLUS_MICRO; + case IIO_PRESSURE: + /* 20 uBar = 0.002kPascal */ + *val = 0; + *val2 = 2000; + return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } @@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, } } -#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \ +#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ - .channel = 0, \ + .channel = chn, \ .extend_name = name, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ @@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, } #define ADIS16400_SUPPLY_CHAN(addr, bits) \ - ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY) + ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0) #define ADIS16400_AUX_ADC_CHAN(addr, bits) \ - ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC) + ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1) #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \ .type = IIO_ANGL_VEL, \ @@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = { .channels = adis16448_channels, .num_channels = ARRAY_SIZE(adis16448_channels), .flags = ADIS16400_HAS_PROD_ID | - ADIS16400_HAS_SERIAL_NUMBER, + ADIS16400_HAS_SERIAL_NUMBER | + ADIS16400_BURST_DIAG_STAT, .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */ .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */ .temp_scale_nano = 73860000, /* 0.07386 C */ @@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = { .debugfs_reg_access = adis_debugfs_reg_access, }; -static const unsigned long adis16400_burst_scan_mask[] = { - ~0UL, - 0, -}; - static const char * const adis16400_status_error_msgs[] = { [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", @@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = { BIT(ADIS16400_DIAG_STAT_POWER_LOW), }; +static void adis16400_setup_chan_mask(struct adis16400_state *st) +{ + const struct adis16400_chip_info *chip_info = st->variant; + unsigned i; + + for (i = 0; i < chip_info->num_channels; i++) { + const struct iio_chan_spec *ch = &chip_info->channels[i]; + + if (ch->scan_index >= 0 && + ch->scan_index != ADIS16400_SCAN_TIMESTAMP) + st->avail_scan_mask[0] |= BIT(ch->scan_index); + } +} + static int adis16400_probe(struct spi_device *spi) { struct adis16400_state *st; @@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi) indio_dev->info = &adis16400_info; indio_dev->modes = INDIO_DIRECT_MODE; - if (!(st->variant->flags & ADIS16400_NO_BURST)) - indio_dev->available_scan_masks = adis16400_burst_scan_mask; + if (!(st->variant->flags & ADIS16400_NO_BURST)) { + adis16400_setup_chan_mask(st); + indio_dev->available_scan_masks = st->avail_scan_mask; + } ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data); if (ret) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 66bd6a2ad83b..d95a0c300b03 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -445,10 +445,10 @@ static int c4iw_get_mib(struct ib_device *ibdev, cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6); memset(stats, 0, sizeof *stats); - stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs; - stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs; - stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs; - stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs; + stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs; + stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs; + stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs; + stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts; return 0; } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index cc64400d41ac..024b0f745035 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1090,7 +1090,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); + MLX4_CMD_WRAPPED); if (ret == -ENOMEM) pr_err("mcg table is full. Fail to register network rule.\n"); else if (ret == -ENXIO) @@ -1107,7 +1107,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) int err; err = mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); + MLX4_CMD_WRAPPED); if (err) pr_err("Fail to detach network rule. registration id = 0x%llx\n", reg_id); @@ -2041,77 +2041,52 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) { - char name[80]; - int eq_per_port = 0; - int added_eqs = 0; - int total_eqs = 0; - int i, j, eq; - - /* Legacy mode or comp_pool is not large enough */ - if (dev->caps.comp_pool == 0 || - dev->caps.num_ports > dev->caps.comp_pool) - return; - - eq_per_port = dev->caps.comp_pool / dev->caps.num_ports; - - /* Init eq table */ - added_eqs = 0; - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) - added_eqs += eq_per_port; - - total_eqs = dev->caps.num_comp_vectors + added_eqs; + int i, j, eq = 0, total_eqs = 0; - ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL); + ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, + sizeof(ibdev->eq_table[0]), GFP_KERNEL); if (!ibdev->eq_table) return; - ibdev->eq_added = added_eqs; - - eq = 0; - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { - for (j = 0; j < eq_per_port; j++) { - snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", - i, j, dev->persist->pdev->bus->name); - /* Set IRQ for specific name (per ring) */ - if (mlx4_assign_eq(dev, name, NULL, - &ibdev->eq_table[eq])) { - /* Use legacy (same as mlx4_en driver) */ - pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq); - ibdev->eq_table[eq] = - (eq % dev->caps.num_comp_vectors); - } - eq++; + for (i = 1; i <= dev->caps.num_ports; i++) { + for (j = 0; j < mlx4_get_eqs_per_port(dev, i); + j++, total_eqs++) { + if (i > 1 && mlx4_is_eq_shared(dev, total_eqs)) + continue; + ibdev->eq_table[eq] = total_eqs; + if (!mlx4_assign_eq(dev, i, + &ibdev->eq_table[eq])) + eq++; + else + ibdev->eq_table[eq] = -1; } } - /* Fill the reset of the vector with legacy EQ */ - for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++) - ibdev->eq_table[eq++] = i; + for (i = eq; i < dev->caps.num_comp_vectors; + ibdev->eq_table[i++] = -1) + ; /* Advertise the new number of EQs to clients */ - ibdev->ib_dev.num_comp_vectors = total_eqs; + ibdev->ib_dev.num_comp_vectors = eq; } static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) { int i; + int total_eqs = ibdev->ib_dev.num_comp_vectors; - /* no additional eqs were added */ + /* no eqs were allocated */ if (!ibdev->eq_table) return; /* Reset the advertised EQ number */ - ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; + ibdev->ib_dev.num_comp_vectors = 0; - /* Free only the added eqs */ - for (i = 0; i < ibdev->eq_added; i++) { - /* Don't free legacy eqs if used */ - if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors) - continue; + for (i = 0; i < total_eqs; i++) mlx4_release_eq(dev, ibdev->eq_table[i]); - } kfree(ibdev->eq_table); + ibdev->eq_table = NULL; } static void *mlx4_ib_add(struct mlx4_dev *dev) diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index fce3934372a1..ef80e6c99a68 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -523,7 +523,6 @@ struct mlx4_ib_dev { struct mlx4_ib_iboe iboe; int counters[MLX4_MAX_PORTS]; int *eq_table; - int eq_added; struct kobject *iov_parent; struct kobject *ports_parent; struct kobject *dev_ports_parent[MLX4_MFUNC_MAX]; diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig index 10df386c6344..bce263b92821 100644 --- a/drivers/infiniband/hw/mlx5/Kconfig +++ b/drivers/infiniband/hw/mlx5/Kconfig @@ -1,8 +1,6 @@ config MLX5_INFINIBAND tristate "Mellanox Connect-IB HCA support" - depends on NETDEVICES && ETHERNET && PCI - select NET_VENDOR_MELLANOX - select MLX5_CORE + depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE ---help--- This driver provides low-level InfiniBand support for Mellanox Connect-IB PCI Express host channel adapters (HCAs). diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 2ee6b1051975..e2bea9ab93b3 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -590,8 +590,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, { int err; - err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, - PAGE_SIZE * 2, &buf->buf); + err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf); if (err) return err; @@ -754,7 +753,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, return ERR_PTR(-EINVAL); entries = roundup_pow_of_two(entries + 1); - if (entries > dev->mdev->caps.gen.max_cqes) + if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) return ERR_PTR(-EINVAL); cq = kzalloc(sizeof(*cq), GFP_KERNEL); @@ -921,7 +920,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) int err; u32 fsel; - if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) + if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) return -ENOSYS; in = kzalloc(sizeof(*in), GFP_KERNEL); @@ -1076,7 +1075,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) int uninitialized_var(cqe_size); unsigned long flags; - if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { + if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { pr_info("Firmware does not support resize CQ\n"); return -ENOSYS; } @@ -1085,7 +1084,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) return -EINVAL; entries = roundup_pow_of_two(entries + 1); - if (entries > dev->mdev->caps.gen.max_cqes + 1) + if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) return -EINVAL; if (entries == ibcq->cqe + 1) diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 9cf9a37bb5ff..a770490ebbf1 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) packet_error = be16_to_cpu(out_mad->status); - dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ? + dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ? MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; out: @@ -137,3 +137,300 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) kfree(out_mad); return err; } + +int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, + struct ib_smp *out_mad) +{ + struct ib_smp *in_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + if (!in_mad) + return -ENOMEM; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, + out_mad); + + kfree(in_mad); + return err; +} + +int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, + __be64 *sys_image_guid) +{ + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!out_mad) + return -ENOMEM; + + err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); + if (err) + goto out; + + memcpy(sys_image_guid, out_mad->data + 4, 8); + +out: + kfree(out_mad); + + return err; +} + +int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, + u16 *max_pkeys) +{ + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!out_mad) + return -ENOMEM; + + err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); + if (err) + goto out; + + *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); + +out: + kfree(out_mad); + + return err; +} + +int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, + u32 *vendor_id) +{ + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!out_mad) + return -ENOMEM; + + err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); + if (err) + goto out; + + *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff; + +out: + kfree(out_mad); + + return err; +} + +int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; + + err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(node_desc, out_mad->data, 64); +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(node_guid, out_mad->data + 12, 8); +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; + in_mad->attr_mod = cpu_to_be32(index / 32); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, + out_mad); + if (err) + goto out; + + *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, + out_mad); + if (err) + goto out; + + memcpy(gid->raw, out_mad->data + 8, 8); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; + in_mad->attr_mod = cpu_to_be32(index / 8); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, + out_mad); + if (err) + goto out; + + memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int ext_active_speed; + int err = -ENOMEM; + + if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) { + mlx5_ib_warn(dev, "invalid port number %d\n", port); + return -EINVAL; + } + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + memset(props, 0, sizeof(*props)); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) { + mlx5_ib_warn(dev, "err %d\n", err); + goto out; + } + + props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); + props->lmc = out_mad->data[34] & 0x7; + props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); + props->sm_sl = out_mad->data[36] & 0xf; + props->state = out_mad->data[32] & 0xf; + props->phys_state = out_mad->data[33] >> 4; + props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); + props->gid_tbl_len = out_mad->data[50]; + props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); + props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len; + props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); + props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); + props->active_width = out_mad->data[31] & 0xf; + props->active_speed = out_mad->data[35] >> 4; + props->max_mtu = out_mad->data[41] & 0xf; + props->active_mtu = out_mad->data[36] >> 4; + props->subnet_timeout = out_mad->data[51] & 0x1f; + props->max_vl_num = out_mad->data[37] >> 4; + props->init_type_reply = out_mad->data[41] >> 4; + + /* Check if extended speeds (EDR/FDR/...) are supported */ + if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { + ext_active_speed = out_mad->data[62] >> 4; + + switch (ext_active_speed) { + case 1: + props->active_speed = 16; /* FDR */ + break; + case 2: + props->active_speed = 32; /* EDR */ + break; + } + } + + /* If reported active speed is QDR, check if is FDR-10 */ + if (props->active_speed == 4) { + if (mdev->port_caps[port - 1].ext_port_cap & + MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { + init_query_mad(in_mad); + in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(dev, 1, 1, port, + NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + /* Checking LinkSpeedActive for FDR-10 */ + if (out_mad->data[15] & 0x1) + props->active_speed = 8; + } + } + +out: + kfree(in_mad); + kfree(out_mad); + + return err; +} diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 57c9809e8b87..d4dea86052d6 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include "user.h" @@ -62,32 +63,168 @@ static char mlx5_version[] = DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; +static enum rdma_link_layer +mlx5_ib_port_link_layer(struct ib_device *device) +{ + struct mlx5_ib_dev *dev = to_mdev(device); + + switch (MLX5_CAP_GEN(dev->mdev, port_type)) { + case MLX5_CAP_PORT_TYPE_IB: + return IB_LINK_LAYER_INFINIBAND; + case MLX5_CAP_PORT_TYPE_ETH: + return IB_LINK_LAYER_ETHERNET; + default: + return IB_LINK_LAYER_UNSPECIFIED; + } +} + +static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) +{ + return !dev->mdev->issi; +} + +enum { + MLX5_VPORT_ACCESS_METHOD_MAD, + MLX5_VPORT_ACCESS_METHOD_HCA, + MLX5_VPORT_ACCESS_METHOD_NIC, +}; + +static int mlx5_get_vport_access_method(struct ib_device *ibdev) +{ + if (mlx5_use_mad_ifc(to_mdev(ibdev))) + return MLX5_VPORT_ACCESS_METHOD_MAD; + + if (mlx5_ib_port_link_layer(ibdev) == + IB_LINK_LAYER_ETHERNET) + return MLX5_VPORT_ACCESS_METHOD_NIC; + + return MLX5_VPORT_ACCESS_METHOD_HCA; +} + +static int mlx5_query_system_image_guid(struct ib_device *ibdev, + __be64 *sys_image_guid) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + u64 tmp; + int err; + + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_system_image_guid(ibdev, + sys_image_guid); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); + if (!err) + *sys_image_guid = cpu_to_be64(tmp); + return err; + + default: + return -EINVAL; + } +} + +static int mlx5_query_max_pkeys(struct ib_device *ibdev, + u16 *max_pkeys) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + case MLX5_VPORT_ACCESS_METHOD_NIC: + *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, + pkey_table_size)); + return 0; + + default: + return -EINVAL; + } +} + +static int mlx5_query_vendor_id(struct ib_device *ibdev, + u32 *vendor_id) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + case MLX5_VPORT_ACCESS_METHOD_NIC: + return mlx5_core_query_vendor_id(dev->mdev, vendor_id); + + default: + return -EINVAL; + } +} + +static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, + __be64 *node_guid) +{ + u64 tmp; + int err; + + switch (mlx5_get_vport_access_method(&dev->ib_dev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_node_guid(dev, node_guid); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); + if (!err) + *node_guid = cpu_to_be64(tmp); + return err; + + default: + return -EINVAL; + } +} + +struct mlx5_reg_node_desc { + u8 desc[64]; +}; + +static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) +{ + struct mlx5_reg_node_desc in; + + if (mlx5_use_mad_ifc(dev)) + return mlx5_query_mad_ifc_node_desc(dev, node_desc); + + memset(&in, 0, sizeof(in)); + + return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, + sizeof(struct mlx5_reg_node_desc), + MLX5_REG_NODE_DESC, 0, 0); +} + static int mlx5_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - struct mlx5_general_caps *gen; + struct mlx5_core_dev *mdev = dev->mdev; int err = -ENOMEM; int max_rq_sg; int max_sq_sg; - u64 flags; - gen = &dev->mdev->caps.gen; - in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); - out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); - if (!in_mad || !out_mad) - goto out; - - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + memset(props, 0, sizeof(*props)); + err = mlx5_query_system_image_guid(ibdev, + &props->sys_image_guid); + if (err) + return err; - err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad); + err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); if (err) - goto out; + return err; - memset(props, 0, sizeof(*props)); + err = mlx5_query_vendor_id(ibdev, &props->vendor_id); + if (err) + return err; props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | (fw_rev_min(dev->mdev) << 16) | @@ -96,18 +233,18 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN; - flags = gen->flags; - if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) + + if (MLX5_CAP_GEN(mdev, pkv)) props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; - if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) + if (MLX5_CAP_GEN(mdev, qkv)) props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; - if (flags & MLX5_DEV_CAP_FLAG_APM) + if (MLX5_CAP_GEN(mdev, apm)) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; - if (flags & MLX5_DEV_CAP_FLAG_XRC) + if (MLX5_CAP_GEN(mdev, xrc)) props->device_cap_flags |= IB_DEVICE_XRC; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; - if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) { + if (MLX5_CAP_GEN(mdev, sho)) { props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; /* At this stage no support for signature handover */ props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | @@ -116,221 +253,274 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->sig_guard_cap = IB_GUARD_T10DIF_CRC | IB_GUARD_T10DIF_CSUM; } - if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST) + if (MLX5_CAP_GEN(mdev, block_lb_mc)) props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; - props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & - 0xffffff; - props->vendor_part_id = be16_to_cpup((__be16 *)(out_mad->data + 30)); - props->hw_ver = be32_to_cpup((__be32 *)(out_mad->data + 32)); - memcpy(&props->sys_image_guid, out_mad->data + 4, 8); + props->vendor_part_id = mdev->pdev->device; + props->hw_ver = mdev->pdev->revision; props->max_mr_size = ~0ull; - props->page_size_cap = gen->min_page_sz; - props->max_qp = 1 << gen->log_max_qp; - props->max_qp_wr = gen->max_wqes; - max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); - max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / - sizeof(struct mlx5_wqe_data_seg); + props->page_size_cap = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); + props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); + props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); + max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / + sizeof(struct mlx5_wqe_data_seg); + max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) - + sizeof(struct mlx5_wqe_ctrl_seg)) / + sizeof(struct mlx5_wqe_data_seg); props->max_sge = min(max_rq_sg, max_sq_sg); - props->max_cq = 1 << gen->log_max_cq; - props->max_cqe = gen->max_cqes - 1; - props->max_mr = 1 << gen->log_max_mkey; - props->max_pd = 1 << gen->log_max_pd; - props->max_qp_rd_atom = 1 << gen->log_max_ra_req_qp; - props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp; - props->max_srq = 1 << gen->log_max_srq; - props->max_srq_wr = gen->max_srq_wqes - 1; - props->local_ca_ack_delay = gen->local_ca_ack_delay; + props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); + props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1; + props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); + props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); + props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); + props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); + props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); + props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; + props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq_sge = max_rq_sg - 1; props->max_fast_reg_page_list_len = (unsigned int)-1; - props->local_ca_ack_delay = gen->local_ca_ack_delay; props->atomic_cap = IB_ATOMIC_NONE; props->masked_atomic_cap = IB_ATOMIC_NONE; - props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); - props->max_mcast_grp = 1 << gen->log_max_mcg; - props->max_mcast_qp_attach = gen->max_qp_mcg; + props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); + props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG) + if (MLX5_CAP_GEN(mdev, pg)) props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; props->odp_caps = dev->odp_caps; #endif -out: - kfree(in_mad); - kfree(out_mad); - - return err; + return 0; } -int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, - struct ib_port_attr *props) +enum mlx5_ib_width { + MLX5_IB_WIDTH_1X = 1 << 0, + MLX5_IB_WIDTH_2X = 1 << 1, + MLX5_IB_WIDTH_4X = 1 << 2, + MLX5_IB_WIDTH_8X = 1 << 3, + MLX5_IB_WIDTH_12X = 1 << 4 +}; + +static int translate_active_width(struct ib_device *ibdev, u8 active_width, + u8 *ib_width) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - struct mlx5_general_caps *gen; - int ext_active_speed; - int err = -ENOMEM; - - gen = &dev->mdev->caps.gen; - if (port < 1 || port > gen->num_ports) { - mlx5_ib_warn(dev, "invalid port number %d\n", port); - return -EINVAL; + int err = 0; + + if (active_width & MLX5_IB_WIDTH_1X) { + *ib_width = IB_WIDTH_1X; + } else if (active_width & MLX5_IB_WIDTH_2X) { + mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", + (int)active_width); + err = -EINVAL; + } else if (active_width & MLX5_IB_WIDTH_4X) { + *ib_width = IB_WIDTH_4X; + } else if (active_width & MLX5_IB_WIDTH_8X) { + *ib_width = IB_WIDTH_8X; + } else if (active_width & MLX5_IB_WIDTH_12X) { + *ib_width = IB_WIDTH_12X; + } else { + mlx5_ib_dbg(dev, "Invalid active_width %d\n", + (int)active_width); + err = -EINVAL; } - in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); - out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); - if (!in_mad || !out_mad) - goto out; - - memset(props, 0, sizeof(*props)); - - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; - in_mad->attr_mod = cpu_to_be32(port); + return err; +} - err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); - if (err) { - mlx5_ib_warn(dev, "err %d\n", err); - goto out; +static int mlx5_mtu_to_ib_mtu(int mtu) +{ + switch (mtu) { + case 256: return 1; + case 512: return 2; + case 1024: return 3; + case 2048: return 4; + case 4096: return 5; + default: + pr_warn("invalid mtu\n"); + return -1; } +} +enum ib_max_vl_num { + __IB_MAX_VL_0 = 1, + __IB_MAX_VL_0_1 = 2, + __IB_MAX_VL_0_3 = 3, + __IB_MAX_VL_0_7 = 4, + __IB_MAX_VL_0_14 = 5, +}; - props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); - props->lmc = out_mad->data[34] & 0x7; - props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); - props->sm_sl = out_mad->data[36] & 0xf; - props->state = out_mad->data[32] & 0xf; - props->phys_state = out_mad->data[33] >> 4; - props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); - props->gid_tbl_len = out_mad->data[50]; - props->max_msg_sz = 1 << gen->log_max_msg; - props->pkey_tbl_len = gen->port[port - 1].pkey_table_len; - props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); - props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); - props->active_width = out_mad->data[31] & 0xf; - props->active_speed = out_mad->data[35] >> 4; - props->max_mtu = out_mad->data[41] & 0xf; - props->active_mtu = out_mad->data[36] >> 4; - props->subnet_timeout = out_mad->data[51] & 0x1f; - props->max_vl_num = out_mad->data[37] >> 4; - props->init_type_reply = out_mad->data[41] >> 4; - - /* Check if extended speeds (EDR/FDR/...) are supported */ - if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { - ext_active_speed = out_mad->data[62] >> 4; - - switch (ext_active_speed) { - case 1: - props->active_speed = 16; /* FDR */ - break; - case 2: - props->active_speed = 32; /* EDR */ - break; - } - } +enum mlx5_vl_hw_cap { + MLX5_VL_HW_0 = 1, + MLX5_VL_HW_0_1 = 2, + MLX5_VL_HW_0_2 = 3, + MLX5_VL_HW_0_3 = 4, + MLX5_VL_HW_0_4 = 5, + MLX5_VL_HW_0_5 = 6, + MLX5_VL_HW_0_6 = 7, + MLX5_VL_HW_0_7 = 8, + MLX5_VL_HW_0_14 = 15 +}; - /* If reported active speed is QDR, check if is FDR-10 */ - if (props->active_speed == 4) { - if (gen->ext_port_cap[port - 1] & - MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { - init_query_mad(in_mad); - in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; - in_mad->attr_mod = cpu_to_be32(port); - - err = mlx5_MAD_IFC(dev, 1, 1, port, - NULL, NULL, in_mad, out_mad); - if (err) - goto out; - - /* Checking LinkSpeedActive for FDR-10 */ - if (out_mad->data[15] & 0x1) - props->active_speed = 8; - } - } +static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, + u8 *max_vl_num) +{ + switch (vl_hw_cap) { + case MLX5_VL_HW_0: + *max_vl_num = __IB_MAX_VL_0; + break; + case MLX5_VL_HW_0_1: + *max_vl_num = __IB_MAX_VL_0_1; + break; + case MLX5_VL_HW_0_3: + *max_vl_num = __IB_MAX_VL_0_3; + break; + case MLX5_VL_HW_0_7: + *max_vl_num = __IB_MAX_VL_0_7; + break; + case MLX5_VL_HW_0_14: + *max_vl_num = __IB_MAX_VL_0_14; + break; -out: - kfree(in_mad); - kfree(out_mad); + default: + return -EINVAL; + } - return err; + return 0; } -static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, - union ib_gid *gid) +static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - int err = -ENOMEM; + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + struct mlx5_hca_vport_context *rep; + int max_mtu; + int oper_mtu; + int err; + u8 ib_link_width_oper; + u8 vl_hw_cap; - in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); - out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); - if (!in_mad || !out_mad) + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) { + err = -ENOMEM; goto out; + } - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; - in_mad->attr_mod = cpu_to_be32(port); + memset(props, 0, sizeof(*props)); + + err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep); + if (err) + goto out; - err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + props->lid = rep->lid; + props->lmc = rep->lmc; + props->sm_lid = rep->sm_lid; + props->sm_sl = rep->sm_sl; + props->state = rep->vport_state; + props->phys_state = rep->port_physical_state; + props->port_cap_flags = rep->cap_mask1; + props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); + props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); + props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); + props->bad_pkey_cntr = rep->pkey_violation_counter; + props->qkey_viol_cntr = rep->qkey_violation_counter; + props->subnet_timeout = rep->subnet_timeout; + props->init_type_reply = rep->init_type_reply; + + err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port); if (err) goto out; - memcpy(gid->raw, out_mad->data + 8, 8); + err = translate_active_width(ibdev, ib_link_width_oper, + &props->active_width); + if (err) + goto out; + err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB, + port); + if (err) + goto out; + + err = mlx5_query_port_max_mtu(mdev, &max_mtu, port); + if (err) + goto out; - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; - in_mad->attr_mod = cpu_to_be32(index / 8); + props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); - err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + err = mlx5_query_port_oper_mtu(mdev, &oper_mtu, port); if (err) goto out; - memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); + props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu); + + err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port); + if (err) + goto out; + err = translate_max_vl_num(ibdev, vl_hw_cap, + &props->max_vl_num); out: - kfree(in_mad); - kfree(out_mad); + kfree(rep); return err; } -static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, - u16 *pkey) +int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - int err = -ENOMEM; + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_port(ibdev, port, props); - in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); - out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); - if (!in_mad || !out_mad) - goto out; + case MLX5_VPORT_ACCESS_METHOD_HCA: + return mlx5_query_hca_port(ibdev, port, props); - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; - in_mad->attr_mod = cpu_to_be32(index / 32); + default: + return -EINVAL; + } +} - err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); - if (err) - goto out; +static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; - *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid); + + default: + return -EINVAL; + } -out: - kfree(in_mad); - kfree(out_mad); - return err; } -struct mlx5_reg_node_desc { - u8 desc[64]; -}; +static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + case MLX5_VPORT_ACCESS_METHOD_NIC: + return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index, + pkey); + default: + return -EINVAL; + } +} static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) @@ -392,7 +582,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, struct mlx5_ib_alloc_ucontext_req_v2 req; struct mlx5_ib_alloc_ucontext_resp resp; struct mlx5_ib_ucontext *context; - struct mlx5_general_caps *gen; struct mlx5_uuar_info *uuari; struct mlx5_uar *uars; int gross_uuars; @@ -403,7 +592,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, int i; size_t reqlen; - gen = &dev->mdev->caps.gen; if (!dev->ib_active) return ERR_PTR(-EAGAIN); @@ -436,14 +624,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; - resp.qp_tab_size = 1 << gen->log_max_qp; - resp.bf_reg_size = gen->bf_reg_size; - resp.cache_line_size = L1_CACHE_BYTES; - resp.max_sq_desc_sz = gen->max_sq_desc_sz; - resp.max_rq_desc_sz = gen->max_rq_desc_sz; - resp.max_send_wqebb = gen->max_wqes; - resp.max_recv_wr = gen->max_wqes; - resp.max_srq_recv_wr = gen->max_srq_wqes; + resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); + resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); + resp.cache_line_size = L1_CACHE_BYTES; + resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); + resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); + resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); + resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); + resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) @@ -493,7 +681,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, mutex_init(&context->db_page_mutex); resp.tot_uuars = req.total_num_uuars; - resp.num_ports = gen->num_ports; + resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); err = ib_copy_to_udata(udata, &resp, sizeof(resp) - sizeof(resp.reserved)); if (err) @@ -731,37 +919,15 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) static int init_node_data(struct mlx5_ib_dev *dev) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - int err = -ENOMEM; - - in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); - out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); - if (!in_mad || !out_mad) - goto out; - - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; - - err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); - if (err) - goto out; - - memcpy(dev->ib_dev.node_desc, out_mad->data, 64); - - in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + int err; - err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); if (err) - goto out; + return err; - dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32)); - memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); + dev->mdev->rev_id = dev->mdev->pdev->revision; -out: - kfree(in_mad); - kfree(out_mad); - return err; + return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); } static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, @@ -895,11 +1061,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, static void get_ext_port_caps(struct mlx5_ib_dev *dev) { - struct mlx5_general_caps *gen; int port; - gen = &dev->mdev->caps.gen; - for (port = 1; port <= gen->num_ports; port++) + for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) mlx5_query_ext_port_caps(dev, port); } @@ -907,11 +1071,9 @@ static int get_port_caps(struct mlx5_ib_dev *dev) { struct ib_device_attr *dprops = NULL; struct ib_port_attr *pprops = NULL; - struct mlx5_general_caps *gen; int err = -ENOMEM; int port; - gen = &dev->mdev->caps.gen; pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); if (!pprops) goto out; @@ -926,14 +1088,17 @@ static int get_port_caps(struct mlx5_ib_dev *dev) goto out; } - for (port = 1; port <= gen->num_ports; port++) { + for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) { err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); if (err) { - mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); + mlx5_ib_warn(dev, "query_port %d failed %d\n", + port, err); break; } - gen->port[port - 1].pkey_table_len = dprops->max_pkeys; - gen->port[port - 1].gid_table_len = pprops->gid_tbl_len; + dev->mdev->port_caps[port - 1].pkey_table_len = + dprops->max_pkeys; + dev->mdev->port_caps[port - 1].gid_table_len = + pprops->gid_tbl_len; mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", dprops->max_pkeys, pprops->gid_tbl_len); } @@ -1159,8 +1324,29 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) atomic_inc(&devr->p0->usecnt); atomic_set(&devr->s0->usecnt, 0); + memset(&attr, 0, sizeof(attr)); + attr.attr.max_sge = 1; + attr.attr.max_wr = 1; + attr.srq_type = IB_SRQT_BASIC; + devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); + if (IS_ERR(devr->s1)) { + ret = PTR_ERR(devr->s1); + goto error5; + } + devr->s1->device = &dev->ib_dev; + devr->s1->pd = devr->p0; + devr->s1->uobject = NULL; + devr->s1->event_handler = NULL; + devr->s1->srq_context = NULL; + devr->s1->srq_type = IB_SRQT_BASIC; + devr->s1->ext.xrc.cq = devr->c0; + atomic_inc(&devr->p0->usecnt); + atomic_set(&devr->s0->usecnt, 0); + return 0; +error5: + mlx5_ib_destroy_srq(devr->s0); error4: mlx5_ib_dealloc_xrcd(devr->x1); error3: @@ -1175,6 +1361,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) static void destroy_dev_resources(struct mlx5_ib_resources *devr) { + mlx5_ib_destroy_srq(devr->s1); mlx5_ib_destroy_srq(devr->s0); mlx5_ib_dealloc_xrcd(devr->x0); mlx5_ib_dealloc_xrcd(devr->x1); @@ -1188,6 +1375,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) int err; int i; + /* don't create IB instance over Eth ports, no RoCE yet! */ + if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) + return NULL; + printk_once(KERN_INFO "%s", mlx5_version); dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); @@ -1200,15 +1391,16 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) if (err) goto err_dealloc; - get_ext_port_caps(dev); + if (mlx5_use_mad_ifc(dev)) + get_ext_port_caps(dev); MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.node_type = RDMA_NODE_IB_CA; - dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey; - dev->num_ports = mdev->caps.gen.num_ports; + dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; + dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = dev->mdev->priv.eq_table.num_comp_vectors; @@ -1286,9 +1478,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; - mlx5_ib_internal_query_odp_caps(dev); + mlx5_ib_internal_fill_odp_caps(dev); - if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) { + if (MLX5_CAP_GEN(mdev, xrc)) { dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; dev->ib_dev.uverbs_cmd_mask |= diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index dff1cfcdf476..873dc354766a 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -415,6 +415,7 @@ struct mlx5_ib_resources { struct ib_xrcd *x1; struct ib_pd *p0; struct ib_srq *s0; + struct ib_srq *s1; }; struct mlx5_ib_dev { @@ -594,6 +595,22 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); +int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, + struct ib_smp *out_mad); +int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, + __be64 *sys_image_guid); +int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, + u16 *max_pkeys); +int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, + u32 *vendor_id); +int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); +int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); +int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey); +int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid); +int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props); int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props); int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); @@ -617,7 +634,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING extern struct workqueue_struct *mlx5_ib_page_fault_wq; -int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev); +void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault); void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp); @@ -631,9 +648,9 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, unsigned long end); #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ -static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev) +static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) { - return 0; + return; } static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 71c593583864..bc9a0de897cb 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -975,8 +975,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, struct mlx5_ib_mr *mr; int inlen; int err; - bool pg_cap = !!(dev->mdev->caps.gen.flags & - MLX5_DEV_CAP_FLAG_ON_DMND_PG); + bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 5099db08afd2..aa8391e75385 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -109,40 +109,33 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, ib_umem_odp_unmap_dma_pages(umem, start, end); } -#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do { \ - if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name) \ - ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name; \ -} while (0) - -int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev) +void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) { - int err; - struct mlx5_odp_caps hw_caps; struct ib_odp_caps *caps = &dev->odp_caps; memset(caps, 0, sizeof(*caps)); - if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)) - return 0; - - err = mlx5_query_odp_caps(dev->mdev, &hw_caps); - if (err) - goto out; + if (!MLX5_CAP_GEN(dev->mdev, pg)) + return; caps->general_caps = IB_ODP_SUPPORT; - COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.ud_odp_caps, - SEND); - COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, - SEND); - COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, - RECV); - COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, - WRITE); - COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, - READ); - -out: - return err; + + if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send)) + caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND; + + if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send)) + caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND; + + if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive)) + caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV; + + if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write)) + caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE; + + if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read)) + caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ; + + return; } static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index d35f62d4f4c5..203c8a45e095 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -220,13 +220,11 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) { - struct mlx5_general_caps *gen; int wqe_size; int wq_size; - gen = &dev->mdev->caps.gen; /* Sanity check RQ size before proceeding */ - if (cap->max_recv_wr > gen->max_wqes) + if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) return -EINVAL; if (!has_rq) { @@ -246,10 +244,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); qp->rq.wqe_cnt = wq_size / wqe_size; - if (wqe_size > gen->max_rq_desc_sz) { + if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", wqe_size, - gen->max_rq_desc_sz); + MLX5_CAP_GEN(dev->mdev, + max_wqe_sz_rq)); return -EINVAL; } qp->rq.wqe_shift = ilog2(wqe_size); @@ -330,11 +329,9 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr) static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, struct mlx5_ib_qp *qp) { - struct mlx5_general_caps *gen; int wqe_size; int wq_size; - gen = &dev->mdev->caps.gen; if (!attr->cap.max_send_wr) return 0; @@ -343,9 +340,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, if (wqe_size < 0) return wqe_size; - if (wqe_size > gen->max_sq_desc_sz) { + if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", - wqe_size, gen->max_sq_desc_sz); + wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); return -EINVAL; } @@ -358,9 +355,10 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; - if (qp->sq.wqe_cnt > gen->max_wqes) { + if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", - qp->sq.wqe_cnt, gen->max_wqes); + qp->sq.wqe_cnt, + 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); return -ENOMEM; } qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); @@ -375,13 +373,11 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) { - struct mlx5_general_caps *gen; int desc_sz = 1 << qp->sq.wqe_shift; - gen = &dev->mdev->caps.gen; - if (desc_sz > gen->max_sq_desc_sz) { + if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", - desc_sz, gen->max_sq_desc_sz); + desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); return -EINVAL; } @@ -393,9 +389,10 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev, qp->sq.wqe_cnt = ucmd->sq_wqe_count; - if (qp->sq.wqe_cnt > gen->max_wqes) { + if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", - qp->sq.wqe_cnt, gen->max_wqes); + qp->sq.wqe_cnt, + 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); return -EINVAL; } @@ -768,7 +765,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); - err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); + err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf); if (err) { mlx5_ib_dbg(dev, "err %d\n", err); goto err_uuar; @@ -866,22 +863,21 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct ib_udata *udata, struct mlx5_ib_qp *qp) { struct mlx5_ib_resources *devr = &dev->devr; + struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_ib_create_qp_resp resp; struct mlx5_create_qp_mbox_in *in; - struct mlx5_general_caps *gen; struct mlx5_ib_create_qp ucmd; int inlen = sizeof(*in); int err; mlx5_ib_odp_create_qp(qp); - gen = &dev->mdev->caps.gen; mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { - if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { + if (!MLX5_CAP_GEN(mdev, block_lb_mc)) { mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); return -EINVAL; } else { @@ -914,15 +910,17 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (pd) { if (pd->uobject) { + __u32 max_wqes = + 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || ucmd.rq_wqe_count != qp->rq.wqe_cnt) { mlx5_ib_dbg(dev, "invalid rq params\n"); return -EINVAL; } - if (ucmd.sq_wqe_count > gen->max_wqes) { + if (ucmd.sq_wqe_count > max_wqes) { mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", - ucmd.sq_wqe_count, gen->max_wqes); + ucmd.sq_wqe_count, max_wqes); return -EINVAL; } err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); @@ -1014,7 +1012,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); } else { in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); - in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); + in->ctx.rq_type_srqn |= + cpu_to_be32(to_msrq(devr->s1)->msrq.srqn); } } @@ -1226,7 +1225,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { - struct mlx5_general_caps *gen; struct mlx5_ib_dev *dev; struct mlx5_ib_qp *qp; u16 xrcdn = 0; @@ -1244,12 +1242,11 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, } dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); } - gen = &dev->mdev->caps.gen; switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: case IB_QPT_XRC_INI: - if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) { + if (!MLX5_CAP_GEN(dev->mdev, xrc)) { mlx5_ib_dbg(dev, "XRC not supported\n"); return ERR_PTR(-ENOSYS); } @@ -1356,9 +1353,6 @@ enum { static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) { - struct mlx5_general_caps *gen; - - gen = &dev->mdev->caps.gen; if (rate == IB_RATE_PORT_CURRENT) { return 0; } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { @@ -1366,7 +1360,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) } else { while (rate != IB_RATE_2_5_GBPS && !(1 << (rate + MLX5_STAT_RATE_OFFSET) & - gen->stat_rate_support)) + MLX5_CAP_GEN(dev->mdev, stat_rate_support))) --rate; } @@ -1377,10 +1371,8 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, struct mlx5_qp_path *path, u8 port, int attr_mask, u32 path_flags, const struct ib_qp_attr *attr) { - struct mlx5_general_caps *gen; int err; - gen = &dev->mdev->caps.gen; path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; @@ -1391,9 +1383,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, path->rlid = cpu_to_be16(ah->dlid); if (ah->ah_flags & IB_AH_GRH) { - if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { + if (ah->grh.sgid_index >= + dev->mdev->port_caps[port - 1].gid_table_len) { pr_err("sgid_index (%u) too large. max is %d\n", - ah->grh.sgid_index, gen->port[port - 1].gid_table_len); + ah->grh.sgid_index, + dev->mdev->port_caps[port - 1].gid_table_len); return -EINVAL; } path->grh_mlid |= 1 << 7; @@ -1570,7 +1564,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, struct mlx5_ib_qp *qp = to_mqp(ibqp); struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_qp_context *context; - struct mlx5_general_caps *gen; struct mlx5_modify_qp_mbox_in *in; struct mlx5_ib_pd *pd; enum mlx5_qp_state mlx5_cur, mlx5_new; @@ -1579,7 +1572,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, int mlx5_st; int err; - gen = &dev->mdev->caps.gen; in = kzalloc(sizeof(*in), GFP_KERNEL); if (!in) return -ENOMEM; @@ -1619,7 +1611,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, err = -EINVAL; goto out; } - context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg; + context->mtu_msgmax = (attr->path_mtu << 5) | + (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg); } if (attr_mask & IB_QP_DEST_QPN) @@ -1777,11 +1770,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; - struct mlx5_general_caps *gen; int err = -EINVAL; int port; - gen = &dev->mdev->caps.gen; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; @@ -1793,21 +1784,25 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; if ((attr_mask & IB_QP_PORT) && - (attr->port_num == 0 || attr->port_num > gen->num_ports)) + (attr->port_num == 0 || + attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) goto out; if (attr_mask & IB_QP_PKEY_INDEX) { port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; - if (attr->pkey_index >= gen->port[port - 1].pkey_table_len) + if (attr->pkey_index >= + dev->mdev->port_caps[port - 1].pkey_table_len) goto out; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && - attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp)) + attr->max_rd_atomic > + (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) goto out; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && - attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp)) + attr->max_dest_rd_atomic > + (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) goto out; if (cur_state == new_state && cur_state == IB_QPS_RESET) { @@ -3009,7 +3004,7 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at ib_ah_attr->port_num = path->port; if (ib_ah_attr->port_num == 0 || - ib_ah_attr->port_num > dev->caps.gen.num_ports) + ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports)) return; ib_ah_attr->sl = path->sl & 0xf; @@ -3135,12 +3130,10 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct mlx5_general_caps *gen; struct mlx5_ib_xrcd *xrcd; int err; - gen = &dev->mdev->caps.gen; - if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) + if (!MLX5_CAP_GEN(dev->mdev, xrc)) return ERR_PTR(-ENOSYS); xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 02d77a29764d..e008505e96e9 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -165,7 +165,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, return err; } - if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { + if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) { mlx5_ib_dbg(dev, "buf alloc failed\n"); err = -ENOMEM; goto err_db; @@ -236,7 +236,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_general_caps *gen; struct mlx5_ib_srq *srq; int desc_size; int buf_size; @@ -245,13 +244,13 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, int uninitialized_var(inlen); int is_xrc; u32 flgs, xrcdn; + __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); - gen = &dev->mdev->caps.gen; /* Sanity check SRQ size before proceeding */ - if (init_attr->attr.max_wr >= gen->max_srq_wqes) { + if (init_attr->attr.max_wr >= max_srq_wqes) { mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", init_attr->attr.max_wr, - gen->max_srq_wqes); + max_srq_wqes); return ERR_PTR(-EINVAL); } @@ -303,7 +302,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); in->ctx.db_record = cpu_to_be64(srq->db.dma); - err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen); + err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc); kvfree(in); if (err) { mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 327529ee85eb..575a072d765f 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -65,6 +65,8 @@ static int isert_rdma_accept(struct isert_conn *isert_conn); struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); +static void isert_release_work(struct work_struct *work); + static inline bool isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) { @@ -547,11 +549,11 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc, return 0; err_prot_mr: - ib_dereg_mr(desc->pi_ctx->prot_mr); + ib_dereg_mr(pi_ctx->prot_mr); err_prot_frpl: - ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); + ib_free_fast_reg_page_list(pi_ctx->prot_frpl); err_pi_ctx: - kfree(desc->pi_ctx); + kfree(pi_ctx); return ret; } @@ -648,6 +650,7 @@ isert_init_conn(struct isert_conn *isert_conn) mutex_init(&isert_conn->mutex); spin_lock_init(&isert_conn->pool_lock); INIT_LIST_HEAD(&isert_conn->fr_pool); + INIT_WORK(&isert_conn->release_work, isert_release_work); } static void @@ -925,6 +928,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, { struct isert_np *isert_np = cma_id->context; struct isert_conn *isert_conn; + bool terminating = false; if (isert_np->np_cm_id == cma_id) return isert_np_cma_handler(cma_id->context, event); @@ -932,12 +936,25 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, isert_conn = cma_id->qp->qp_context; mutex_lock(&isert_conn->mutex); + terminating = (isert_conn->state == ISER_CONN_TERMINATING); isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->mutex); isert_info("conn %p completing wait\n", isert_conn); complete(&isert_conn->wait); + if (terminating) + goto out; + + mutex_lock(&isert_np->np_accept_mutex); + if (!list_empty(&isert_conn->accept_node)) { + list_del_init(&isert_conn->accept_node); + isert_put_conn(isert_conn); + queue_work(isert_release_wq, &isert_conn->release_work); + } + mutex_unlock(&isert_np->np_accept_mutex); + +out: return 0; } @@ -2380,7 +2397,6 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, page_off = offset % PAGE_SIZE; send_wr->sg_list = ib_sge; - send_wr->num_sge = sg_nents; send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; /* * Perform mapping of TCM scatterlist memory ib_sge dma_addr. @@ -2400,14 +2416,17 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, ib_sge->addr, ib_sge->length, ib_sge->lkey); page_off = 0; data_left -= ib_sge->length; + if (!data_left) + break; ib_sge++; isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); } + send_wr->num_sge = ++i; isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", send_wr->sg_list, send_wr->num_sge); - return sg_nents; + return send_wr->num_sge; } static int @@ -3366,7 +3385,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) isert_wait4flush(isert_conn); isert_wait4logout(isert_conn); - INIT_WORK(&isert_conn->release_work, isert_release_work); queue_work(isert_release_wq, &isert_conn->release_work); } @@ -3374,6 +3392,7 @@ static void isert_free_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; + isert_wait4flush(isert_conn); isert_put_conn(isert_conn); } diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 7752bd59d4b7..a353b7de6d22 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1063,9 +1063,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse) right = (packet[1] & 0x02) >> 1; middle = (packet[1] & 0x04) >> 2; - /* Divide 2 since trackpoint's speed is too fast */ - input_report_rel(dev2, REL_X, (char)x / 2); - input_report_rel(dev2, REL_Y, -((char)y / 2)); + input_report_rel(dev2, REL_X, (char)x); + input_report_rel(dev2, REL_Y, -((char)y)); input_report_key(dev2, BTN_LEFT, left); input_report_key(dev2, BTN_RIGHT, right); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 79363b687195..ce3d40004458 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param) return true; /* - * Some models have a revision higher then 20. Meaning param[2] may - * be 10 or 20, skip the rates check for these. + * Some hw_version >= 4 models have a revision higher then 20. Meaning + * that param[2] may be 10 or 20, skip the rates check for these. */ - if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) + if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f && + param[2] < 40) return true; for (i = 0; i < ARRAY_SIZE(rates); i++) @@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd) case 9: case 10: case 13: + case 14: etd->hw_version = 4; break; default: diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 630af73e98c4..35c8d0ceabee 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -150,6 +150,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = { {ANY_BOARD_ID, 2961}, 1024, 5112, 2024, 4832 }, + { + (const char * const []){"LEN2000", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, + 1024, 5113, 2021, 4832 + }, { (const char * const []){"LEN2001", NULL}, {ANY_BOARD_ID, ANY_BOARD_ID}, @@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0045", "LEN0047", "LEN0049", - "LEN2000", + "LEN2000", /* S540 */ "LEN2001", /* Edge E431 */ "LEN2002", /* Edge E531 */ "LEN2003", diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index e43d48956dea..e1c7e9e51045 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2930,6 +2930,7 @@ static void *alloc_coherent(struct device *dev, size_t size, size = PAGE_ALIGN(size); dma_mask = dev->coherent_dma_mask; flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); + flag |= __GFP_ZERO; page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); if (!page) { diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 68d43beccb7e..5ecfaf29933a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -422,6 +422,14 @@ static int dmar_map_gfx = 1; static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; +static int intel_iommu_ecs = 1; + +/* We only actually use ECS when PASID support (on the new bit 40) + * is also advertised. Some early implementations — the ones with + * PASID support on bit 28 — have issues even when we *only* use + * extended root/context tables. */ +#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ + ecap_pasid(iommu->ecap)) int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); @@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str) printk(KERN_INFO "Intel-IOMMU: disable supported super page\n"); intel_iommu_superpage = 0; + } else if (!strncmp(str, "ecs_off", 7)) { + printk(KERN_INFO + "Intel-IOMMU: disable extended context table support\n"); + intel_iommu_ecs = 0; } str += strcspn(str, ","); @@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu struct context_entry *context; u64 *entry; - if (ecap_ecs(iommu->ecap)) { + if (ecs_enabled(iommu)) { if (devfn >= 0x80) { devfn -= 0x80; entry = &root->hi; @@ -696,6 +708,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu return &context[devfn]; } +static int iommu_dummy(struct device *dev) +{ + return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; +} + static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) { struct dmar_drhd_unit *drhd = NULL; @@ -705,6 +722,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf u16 segment = 0; int i; + if (iommu_dummy(dev)) + return NULL; + if (dev_is_pci(dev)) { pdev = to_pci_dev(dev); segment = pci_domain_nr(pdev->bus); @@ -798,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu) if (context) free_pgtable_page(context); - if (!ecap_ecs(iommu->ecap)) + if (!ecs_enabled(iommu)) continue; context = iommu_context_addr(iommu, i, 0x80, 0); @@ -1133,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) unsigned long flag; addr = virt_to_phys(iommu->root_entry); - if (ecap_ecs(iommu->ecap)) + if (ecs_enabled(iommu)) addr |= DMA_RTADDR_RTT; raw_spin_lock_irqsave(&iommu->register_lock, flag); @@ -2969,11 +2989,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev) return __get_valid_domain_for_dev(dev); } -static int iommu_dummy(struct device *dev) -{ - return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; -} - /* Check if the dev needs to go through non-identity map and unmap process.*/ static int iommu_no_mapping(struct device *dev) { diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 9687f8afebff..1b7e155869f6 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -828,7 +828,14 @@ static int its_alloc_tables(struct its_node *its) u64 typer = readq_relaxed(its->base + GITS_TYPER); u32 ids = GITS_TYPER_DEVBITS(typer); - order = get_order((1UL << ids) * entry_size); + /* + * 'order' was initialized earlier to the default page + * granule of the the ITS. We can't have an allocation + * smaller than that. If the requested allocation + * is smaller, round up to the default page granule. + */ + order = max(get_order((1UL << ids) * entry_size), + order); if (order >= MAX_ORDER) { order = MAX_ORDER - 1; pr_warn("%s: Device Table too large, reduce its page order to %u\n", diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 57f09cb54464..269c2354c431 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -271,7 +271,7 @@ int gic_get_c0_fdc_int(void) GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); } -static void gic_handle_shared_int(void) +static void gic_handle_shared_int(bool chained) { unsigned int i, intr, virq; unsigned long *pcpu_mask; @@ -299,7 +299,10 @@ static void gic_handle_shared_int(void) while (intr != gic_shared_intrs) { virq = irq_linear_revmap(gic_irq_domain, GIC_SHARED_TO_HWIRQ(intr)); - do_IRQ(virq); + if (chained) + generic_handle_irq(virq); + else + do_IRQ(virq); /* go to next pending bit */ bitmap_clear(pending, intr, 1); @@ -431,7 +434,7 @@ static struct irq_chip gic_edge_irq_controller = { #endif }; -static void gic_handle_local_int(void) +static void gic_handle_local_int(bool chained) { unsigned long pending, masked; unsigned int intr, virq; @@ -445,7 +448,10 @@ static void gic_handle_local_int(void) while (intr != GIC_NUM_LOCAL_INTRS) { virq = irq_linear_revmap(gic_irq_domain, GIC_LOCAL_TO_HWIRQ(intr)); - do_IRQ(virq); + if (chained) + generic_handle_irq(virq); + else + do_IRQ(virq); /* go to next pending bit */ bitmap_clear(&pending, intr, 1); @@ -509,13 +515,14 @@ static struct irq_chip gic_all_vpes_local_irq_controller = { static void __gic_irq_dispatch(void) { - gic_handle_local_int(); - gic_handle_shared_int(); + gic_handle_local_int(false); + gic_handle_shared_int(false); } static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) { - __gic_irq_dispatch(); + gic_handle_local_int(true); + gic_handle_shared_int(true); } #ifdef CONFIG_MIPS_GIC_IPI diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index 4a9ce5b50c5b..6b2b582433bd 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -104,7 +104,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) irqd_set_trigger_type(data, flow_type); irq_setup_alt_chip(data, flow_type); - for (i = 0; i <= gc->num_ct; i++, ct++) + for (i = 0; i < gc->num_ct; i++, ct++) if (ct->type & flow_type) ctrl_off = ct->regs.type; diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index 1cc6ca8bfbda..85cfa4f8691f 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c @@ -2264,7 +2264,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp) return -1; } card->owner = THIS_MODULE; - init_timer(&card->listentimer); + setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card); strcpy(card->name, id); card->contrnr = contr; card->nbchan = profp->nbchannel; @@ -2331,8 +2331,6 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp) card->cipmask = 0x1FFF03FF; /* any */ card->cipmask2 = 0; - card->listentimer.data = (unsigned long)card; - card->listentimer.function = listentimerfunc; send_listen(card); mod_timer(&card->listentimer, jiffies + 60 * HZ); diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c index ead0a4fb7448..a0fdbc074b98 100644 --- a/drivers/isdn/hisax/st5481_usb.c +++ b/drivers/isdn/hisax/st5481_usb.c @@ -267,8 +267,8 @@ int st5481_setup_usb(struct st5481_adapter *adapter) } // The descriptor is wrong for some early samples of the ST5481 chip - altsetting->endpoint[3].desc.wMaxPacketSize = __constant_cpu_to_le16(32); - altsetting->endpoint[4].desc.wMaxPacketSize = __constant_cpu_to_le16(32); + altsetting->endpoint[3].desc.wMaxPacketSize = cpu_to_le16(32); + altsetting->endpoint[4].desc.wMaxPacketSize = cpu_to_le16(32); // Use alternative setting 3 on interface 0 to have 2B+D if ((status = usb_set_interface(dev, 0, 3)) < 0) { diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 7dc93aa004c8..312ffd3d0017 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -173,7 +173,7 @@ static void unmap_switcher(void) bool lguest_address_ok(const struct lguest *lg, unsigned long addr, unsigned long len) { - return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); + return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr); } /* diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 63953477a07c..eff7bdd7731d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -429,9 +429,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, /* blk-mq request-based interface */ *__clone = blk_get_request(bdev_get_queue(bdev), rq_data_dir(rq), GFP_ATOMIC); - if (IS_ERR(*__clone)) + if (IS_ERR(*__clone)) { /* ENOMEM, requeue */ + clear_mapinfo(m, map_context); return r; + } (*__clone)->bio = (*__clone)->biotail = NULL; (*__clone)->rq_disk = bdev->bd_disk; (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index d9b00b8565c6..16ba55ad7089 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -820,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args) } EXPORT_SYMBOL(dm_consume_args); +static bool __table_type_request_based(unsigned table_type) +{ + return (table_type == DM_TYPE_REQUEST_BASED || + table_type == DM_TYPE_MQ_REQUEST_BASED); +} + static int dm_table_set_type(struct dm_table *t) { unsigned i; @@ -852,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t) * Determine the type from the live device. * Default to bio-based if device is new. */ - if (live_md_type == DM_TYPE_REQUEST_BASED || - live_md_type == DM_TYPE_MQ_REQUEST_BASED) + if (__table_type_request_based(live_md_type)) request_based = 1; else bio_based = 1; @@ -903,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t) } t->type = DM_TYPE_MQ_REQUEST_BASED; - } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) { + } else if (list_empty(devices) && __table_type_request_based(live_md_type)) { /* inherit live MD type */ t->type = live_md_type; @@ -925,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) bool dm_table_request_based(struct dm_table *t) { - unsigned table_type = dm_table_get_type(t); - - return (table_type == DM_TYPE_REQUEST_BASED || - table_type == DM_TYPE_MQ_REQUEST_BASED); + return __table_type_request_based(dm_table_get_type(t)); } bool dm_table_mq_request_based(struct dm_table *t) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a930b72314ac..2caf492890d6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) dm_put(md); } -static void free_rq_clone(struct request *clone, bool must_be_mapped) +static void free_rq_clone(struct request *clone) { struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; - WARN_ON_ONCE(must_be_mapped && !clone->q); - blk_rq_unprep_clone(clone); if (md->type == DM_TYPE_MQ_REQUEST_BASED) @@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error) rq->sense_len = clone->sense_len; } - free_rq_clone(clone, true); + free_rq_clone(clone); if (!rq->q->mq_ops) blk_end_request_all(rq, error); else @@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq) } if (clone) - free_rq_clone(clone, false); + free_rq_clone(clone); } /* @@ -1164,6 +1162,7 @@ static void old_requeue_request(struct request *rq) spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, rq); + blk_run_queue_async(q); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -1724,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q, struct mapped_device *md = q->queuedata; struct dm_table *map = dm_get_live_table_fast(md); struct dm_target *ti; - sector_t max_sectors; - int max_size = 0; + sector_t max_sectors, max_size = 0; if (unlikely(!map)) goto out; @@ -1740,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q, max_sectors = min(max_io_len(bvm->bi_sector, ti), (sector_t) queue_max_sectors(q)); max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; - if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ - max_size = 0; + + /* + * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t + * to the targets' merge function since it holds sectors not bytes). + * Just doing this as an interim fix for stable@ because the more + * comprehensive cleanup of switching to sector_t will impact every + * DM target that implements a ->merge hook. + */ + if (max_size > INT_MAX) + max_size = INT_MAX; /* * merge_bvec_fn() returns number of bytes @@ -1749,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q, * max is precomputed maximal io size */ if (max_size && ti->type->merge) - max_size = ti->type->merge(ti, bvm, biovec, max_size); + max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); /* * If the target doesn't support merge method and some of the devices * provided their merge_bvec method (we know this by looking for the @@ -1971,8 +1977,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, dm_kill_unmapped_request(rq, r); return r; } - if (IS_ERR(clone)) - return DM_MAPIO_REQUEUE; + if (r != DM_MAPIO_REMAPPED) + return r; if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); @@ -2753,13 +2759,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { /* clone request is allocated at the end of the pdu */ tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); - if (!clone_rq(rq, md, tio, GFP_ATOMIC)) - return BLK_MQ_RQ_QUEUE_BUSY; + (void) clone_rq(rq, md, tio, GFP_ATOMIC); queue_kthread_work(&md->kworker, &tio->work); } else { /* Direct call is fine since .queue_rq allows allocations */ - if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) - dm_requeue_unmapped_original_request(md, rq); + if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { + /* Undo dm_start_request() before requeuing */ + rq_completed(md, rq_data_dir(rq), false); + return BLK_MQ_RQ_QUEUE_BUSY; + } } return BLK_MQ_RQ_QUEUE_OK; diff --git a/drivers/md/md.c b/drivers/md/md.c index 593a02476c78..4dbed4a67aaf 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) err = -EBUSY; } spin_unlock(&mddev->lock); - return err; + return err ?: len; } err = mddev_lock(mddev); if (err) @@ -4211,34 +4211,36 @@ action_store(struct mddev *mddev, const char *page, size_t len) if (!mddev->pers || !mddev->pers->sync_request) return -EINVAL; - if (cmd_match(page, "frozen")) - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - else - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { - flush_workqueue(md_misc_wq); - if (mddev->sync_thread) { - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - if (mddev_lock(mddev) == 0) { + if (cmd_match(page, "frozen")) + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + else + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && + mddev_lock(mddev) == 0) { + flush_workqueue(md_misc_wq); + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_reap_sync_thread(mddev); - mddev_unlock(mddev); } + mddev_unlock(mddev); } } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return -EBUSY; else if (cmd_match(page, "resync")) - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); else if (cmd_match(page, "recover")) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } else if (cmd_match(page, "reshape")) { int err; if (mddev->pers->start_reshape == NULL) return -EINVAL; err = mddev_lock(mddev); if (!err) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); err = mddev->pers->start_reshape(mddev); mddev_unlock(mddev); } @@ -4250,6 +4252,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) set_bit(MD_RECOVERY_CHECK, &mddev->recovery); else if (!cmd_match(page, "repair")) return -EINVAL; + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); } @@ -8259,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev) if (mddev_is_clustered(mddev)) md_cluster_ops->metadata_update_finish(mddev); clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e793ab6b3570..f55c3f35b746 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev) clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b9f2b9cc6060..b6793d2e051f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -749,6 +749,7 @@ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) static bool stripe_can_batch(struct stripe_head *sh) { return test_bit(STRIPE_BATCH_READY, &sh->state) && + !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && is_full_stripe_write(sh); } @@ -837,6 +838,15 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh < IO_THRESHOLD) md_wakeup_thread(conf->mddev->thread); + if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { + int seq = sh->bm_seq; + if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && + sh->batch_head->bm_seq > seq) + seq = sh->batch_head->bm_seq; + set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); + sh->batch_head->bm_seq = seq; + } + atomic_inc(&sh->count); unlock_out: unlock_two_stripes(head, sh); @@ -2987,14 +2997,32 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", (unsigned long long)(*bip)->bi_iter.bi_sector, (unsigned long long)sh->sector, dd_idx); - spin_unlock_irq(&sh->stripe_lock); if (conf->mddev->bitmap && firstwrite) { + /* Cannot hold spinlock over bitmap_startwrite, + * but must ensure this isn't added to a batch until + * we have added to the bitmap and set bm_seq. + * So set STRIPE_BITMAP_PENDING to prevent + * batching. + * If multiple add_stripe_bio() calls race here they + * much all set STRIPE_BITMAP_PENDING. So only the first one + * to complete "bitmap_startwrite" gets to set + * STRIPE_BIT_DELAY. This is important as once a stripe + * is added to a batch, STRIPE_BIT_DELAY cannot be changed + * any more. + */ + set_bit(STRIPE_BITMAP_PENDING, &sh->state); + spin_unlock_irq(&sh->stripe_lock); bitmap_startwrite(conf->mddev->bitmap, sh->sector, STRIPE_SECTORS, 0); - sh->bm_seq = conf->seq_flush+1; - set_bit(STRIPE_BIT_DELAY, &sh->state); + spin_lock_irq(&sh->stripe_lock); + clear_bit(STRIPE_BITMAP_PENDING, &sh->state); + if (!sh->batch_head) { + sh->bm_seq = conf->seq_flush+1; + set_bit(STRIPE_BIT_DELAY, &sh->state); + } } + spin_unlock_irq(&sh->stripe_lock); if (stripe_can_batch(sh)) stripe_add_to_batch_list(conf, sh); @@ -3392,6 +3420,8 @@ static void handle_stripe_fill(struct stripe_head *sh, set_bit(STRIPE_HANDLE, &sh->state); } +static void break_stripe_batch_list(struct stripe_head *head_sh, + unsigned long handle_flags); /* handle_stripe_clean_event * any written block on an uptodate or failed drive can be returned. * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but @@ -3405,7 +3435,6 @@ static void handle_stripe_clean_event(struct r5conf *conf, int discard_pending = 0; struct stripe_head *head_sh = sh; bool do_endio = false; - int wakeup_nr = 0; for (i = disks; i--; ) if (sh->dev[i].written) { @@ -3494,44 +3523,8 @@ static void handle_stripe_clean_event(struct r5conf *conf, if (atomic_dec_and_test(&conf->pending_full_writes)) md_wakeup_thread(conf->mddev->thread); - if (!head_sh->batch_head || !do_endio) - return; - for (i = 0; i < head_sh->disks; i++) { - if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) - wakeup_nr++; - } - while (!list_empty(&head_sh->batch_list)) { - int i; - sh = list_first_entry(&head_sh->batch_list, - struct stripe_head, batch_list); - list_del_init(&sh->batch_list); - - set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, - head_sh->state & ~((1 << STRIPE_ACTIVE) | - (1 << STRIPE_PREREAD_ACTIVE) | - STRIPE_EXPAND_SYNC_FLAG)); - sh->check_state = head_sh->check_state; - sh->reconstruct_state = head_sh->reconstruct_state; - for (i = 0; i < sh->disks; i++) { - if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) - wakeup_nr++; - sh->dev[i].flags = head_sh->dev[i].flags; - } - - spin_lock_irq(&sh->stripe_lock); - sh->batch_head = NULL; - spin_unlock_irq(&sh->stripe_lock); - if (sh->state & STRIPE_EXPAND_SYNC_FLAG) - set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); - } - - spin_lock_irq(&head_sh->stripe_lock); - head_sh->batch_head = NULL; - spin_unlock_irq(&head_sh->stripe_lock); - wake_up_nr(&conf->wait_for_overlap, wakeup_nr); - if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG) - set_bit(STRIPE_HANDLE, &head_sh->state); + if (head_sh->batch_head && do_endio) + break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); } static void handle_stripe_dirtying(struct r5conf *conf, @@ -4172,9 +4165,13 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) static int clear_batch_ready(struct stripe_head *sh) { + /* Return '1' if this is a member of batch, or + * '0' if it is a lone stripe or a head which can now be + * handled. + */ struct stripe_head *tmp; if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) - return 0; + return (sh->batch_head && sh->batch_head != sh); spin_lock(&sh->stripe_lock); if (!sh->batch_head) { spin_unlock(&sh->stripe_lock); @@ -4202,38 +4199,65 @@ static int clear_batch_ready(struct stripe_head *sh) return 0; } -static void check_break_stripe_batch_list(struct stripe_head *sh) +static void break_stripe_batch_list(struct stripe_head *head_sh, + unsigned long handle_flags) { - struct stripe_head *head_sh, *next; + struct stripe_head *sh, *next; int i; - - if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) - return; - - head_sh = sh; + int do_wakeup = 0; list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { list_del_init(&sh->batch_list); - set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, - head_sh->state & ~((1 << STRIPE_ACTIVE) | - (1 << STRIPE_PREREAD_ACTIVE) | - (1 << STRIPE_DEGRADED) | - STRIPE_EXPAND_SYNC_FLAG)); + WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | + (1 << STRIPE_SYNCING) | + (1 << STRIPE_REPLACED) | + (1 << STRIPE_PREREAD_ACTIVE) | + (1 << STRIPE_DELAYED) | + (1 << STRIPE_BIT_DELAY) | + (1 << STRIPE_FULL_WRITE) | + (1 << STRIPE_BIOFILL_RUN) | + (1 << STRIPE_COMPUTE_RUN) | + (1 << STRIPE_OPS_REQ_PENDING) | + (1 << STRIPE_DISCARD) | + (1 << STRIPE_BATCH_READY) | + (1 << STRIPE_BATCH_ERR) | + (1 << STRIPE_BITMAP_PENDING))); + WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | + (1 << STRIPE_REPLACED))); + + set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | + (1 << STRIPE_DEGRADED)), + head_sh->state & (1 << STRIPE_INSYNC)); + sh->check_state = head_sh->check_state; sh->reconstruct_state = head_sh->reconstruct_state; - for (i = 0; i < sh->disks; i++) + for (i = 0; i < sh->disks; i++) { + if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) + do_wakeup = 1; sh->dev[i].flags = head_sh->dev[i].flags & (~((1 << R5_WriteError) | (1 << R5_Overlap))); - + } spin_lock_irq(&sh->stripe_lock); sh->batch_head = NULL; spin_unlock_irq(&sh->stripe_lock); - - set_bit(STRIPE_HANDLE, &sh->state); + if (handle_flags == 0 || + sh->state & handle_flags) + set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); } + spin_lock_irq(&head_sh->stripe_lock); + head_sh->batch_head = NULL; + spin_unlock_irq(&head_sh->stripe_lock); + for (i = 0; i < head_sh->disks; i++) + if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) + do_wakeup = 1; + if (head_sh->state & handle_flags) + set_bit(STRIPE_HANDLE, &head_sh->state); + + if (do_wakeup) + wake_up(&head_sh->raid_conf->wait_for_overlap); } static void handle_stripe(struct stripe_head *sh) @@ -4258,7 +4282,8 @@ static void handle_stripe(struct stripe_head *sh) return; } - check_break_stripe_batch_list(sh); + if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) + break_stripe_batch_list(sh, 0); if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { spin_lock(&sh->stripe_lock); @@ -4312,6 +4337,7 @@ static void handle_stripe(struct stripe_head *sh) if (s.failed > conf->max_degraded) { sh->check_state = 0; sh->reconstruct_state = 0; + break_stripe_batch_list(sh, 0); if (s.to_read+s.to_write+s.written) handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); if (s.syncing + s.replacing) @@ -7328,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev) clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev->sync_thread = md_register_thread(md_do_sync, mddev, diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 7dc0dd86074b..896d603ad0da 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -337,9 +337,12 @@ enum { STRIPE_ON_RELEASE_LIST, STRIPE_BATCH_READY, STRIPE_BATCH_ERR, + STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add + * to batch yet. + */ }; -#define STRIPE_EXPAND_SYNC_FLAG \ +#define STRIPE_EXPAND_SYNC_FLAGS \ ((1 << STRIPE_EXPAND_SOURCE) |\ (1 << STRIPE_EXPAND_READY) |\ (1 << STRIPE_EXPANDING) |\ diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 3ef0f90b128f..157099243d61 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -97,6 +97,7 @@ config MEDIA_CONTROLLER config MEDIA_CONTROLLER_DVB bool "Enable Media controller for DVB" depends on MEDIA_CONTROLLER + depends on BROKEN ---help--- Enable the media controller API support for DVB. diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c index ae498b53ee40..46e3840c7a37 100644 --- a/drivers/mfd/da9052-core.c +++ b/drivers/mfd/da9052-core.c @@ -431,6 +431,10 @@ int da9052_adc_read_temp(struct da9052 *da9052) EXPORT_SYMBOL_GPL(da9052_adc_read_temp); static const struct mfd_cell da9052_subdev_info[] = { + { + .name = "da9052-regulator", + .id = 0, + }, { .name = "da9052-regulator", .id = 1, @@ -483,10 +487,6 @@ static const struct mfd_cell da9052_subdev_info[] = { .name = "da9052-regulator", .id = 13, }, - { - .name = "da9052-regulator", - .id = 14, - }, { .name = "da9052-onkey", }, diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2268438f3f63..19eb990d398c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3059,8 +3059,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph)))) return false; iph = ip_hdr(skb); - fk->addrs.src = iph->saddr; - fk->addrs.dst = iph->daddr; + iph_to_flow_copy_v4addrs(fk, iph); noff += iph->ihl << 2; if (!ip_is_fragment(iph)) proto = iph->protocol; @@ -3068,8 +3067,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6)))) return false; iph6 = ipv6_hdr(skb); - fk->addrs.src = (__force __be32)ipv6_addr_hash(&iph6->saddr); - fk->addrs.dst = (__force __be32)ipv6_addr_hash(&iph6->daddr); + iph_to_flow_copy_v6addrs(fk, iph6); noff += sizeof(*iph6); proto = iph6->nexthdr; } else { @@ -3103,7 +3101,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) hash = bond_eth_hash(skb); else hash = (__force u32)flow.ports.ports; - hash ^= (__force u32)flow.addrs.dst ^ (__force u32)flow.addrs.src; + hash ^= (__force u32)flow_get_u32_dst(&flow) ^ + (__force u32)flow_get_u32_src(&flow); hash ^= (hash >> 16); hash ^= (hash >> 8); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 7fba330ce702..39530fa142b0 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -703,8 +703,8 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, u32 high = 0; if (s->reg >= 0x100) { - ret = mv88e6xxx_reg_read(ds, REG_PORT(port), - s->reg - 0x100); + ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), + s->reg - 0x100); if (ret < 0) goto error; low = ret; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index eadcb053807e..9a8308553520 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -34,6 +34,7 @@ source "drivers/net/ethernet/adi/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/brocade/Kconfig" source "drivers/net/ethernet/calxeda/Kconfig" +source "drivers/net/ethernet/cavium/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/cirrus/Kconfig" source "drivers/net/ethernet/cisco/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 1367afcd0a8b..4395d99115a0 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_NET_BFIN) += adi/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ +obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index c75204909702..1e9c28d19ef8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -424,7 +424,7 @@ static void xgbe_tx_timer(unsigned long data) if (napi_schedule_prep(napi)) { /* Disable Tx and Rx interrupts */ if (pdata->per_channel_irq) - disable_irq(channel->dma_irq); + disable_irq_nosync(channel->dma_irq); else xgbe_disable_rx_tx_ints(pdata); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 77363d680532..a3b1c07ae0af 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2464,6 +2464,7 @@ static int b44_init_one(struct ssb_device *sdev, ssb_bus_may_powerdown(sdev->bus); err_out_free_dev: + netif_napi_del(&bp->napi); free_netdev(dev); out: @@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev) b44_unregister_phy_one(bp); ssb_device_disable(sdev, 0); ssb_bus_may_powerdown(sdev->bus); + netif_napi_del(&bp->napi); free_netdev(dev); ssb_pcihost_set_power_state(sdev, PCI_D3hot); ssb_set_drvdata(sdev, NULL); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 084a50a555de..909ad7a0d480 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -524,67 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) dma_unmap_addr_set(cb, dma_addr, 0); } -static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, - struct bcm_sysport_cb *cb) +static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, + struct bcm_sysport_cb *cb) { struct device *kdev = &priv->pdev->dev; struct net_device *ndev = priv->netdev; + struct sk_buff *skb, *rx_skb; dma_addr_t mapping; - int ret; - cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); - if (!cb->skb) { + /* Allocate a new SKB for a new packet */ + skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); + if (!skb) { + priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); - return -ENOMEM; + return NULL; } - mapping = dma_map_single(kdev, cb->skb->data, + mapping = dma_map_single(kdev, skb->data, RX_BUF_LENGTH, DMA_FROM_DEVICE); - ret = dma_mapping_error(kdev, mapping); - if (ret) { + if (dma_mapping_error(kdev, mapping)) { priv->mib.rx_dma_failed++; - bcm_sysport_free_cb(cb); + dev_kfree_skb_any(skb); netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); - return ret; + return NULL; } - dma_unmap_addr_set(cb, dma_addr, mapping); - dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); + /* Grab the current SKB on the ring */ + rx_skb = cb->skb; + if (likely(rx_skb)) + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), + RX_BUF_LENGTH, DMA_FROM_DEVICE); - priv->rx_bd_assign_index++; - priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); - priv->rx_bd_assign_ptr = priv->rx_bds + - (priv->rx_bd_assign_index * DESC_SIZE); + /* Put the new SKB on the ring */ + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_desc_set_addr(priv, cb->bd_addr, mapping); netif_dbg(priv, rx_status, ndev, "RX refill\n"); - return 0; + /* Return the current SKB to the caller */ + return rx_skb; } static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) { struct bcm_sysport_cb *cb; - int ret = 0; + struct sk_buff *skb; unsigned int i; for (i = 0; i < priv->num_rx_bds; i++) { - cb = &priv->rx_cbs[priv->rx_bd_assign_index]; - if (cb->skb) - continue; - - ret = bcm_sysport_rx_refill(priv, cb); - if (ret) - break; + cb = &priv->rx_cbs[i]; + skb = bcm_sysport_rx_refill(priv, cb); + if (skb) + dev_kfree_skb(skb); + if (!cb->skb) + return -ENOMEM; } - return ret; + return 0; } /* Poll the hardware for up to budget packets to process */ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int budget) { - struct device *kdev = &priv->pdev->dev; struct net_device *ndev = priv->netdev; unsigned int processed = 0, to_process; struct bcm_sysport_cb *cb; @@ -592,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int p_index; u16 len, status; struct bcm_rsb *rsb; - int ret; /* Determine how much we should process since last call */ p_index = rdma_readl(priv, RDMA_PROD_INDEX); @@ -610,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, while ((processed < to_process) && (processed < budget)) { cb = &priv->rx_cbs[priv->rx_read_ptr]; - skb = cb->skb; + skb = bcm_sysport_rx_refill(priv, cb); - processed++; - priv->rx_read_ptr++; - - if (priv->rx_read_ptr == priv->num_rx_bds) - priv->rx_read_ptr = 0; /* We do not have a backing SKB, so we do not a corresponding * DMA mapping for this incoming packet since @@ -627,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, netif_err(priv, rx_err, ndev, "out of memory!\n"); ndev->stats.rx_dropped++; ndev->stats.rx_errors++; - goto refill; + goto next; } - dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), - RX_BUF_LENGTH, DMA_FROM_DEVICE); - /* Extract the Receive Status Block prepended */ rsb = (struct bcm_rsb *)skb->data; len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; @@ -644,12 +638,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, p_index, priv->rx_c_index, priv->rx_read_ptr, len, status); + if (unlikely(len > RX_BUF_LENGTH)) { + netif_err(priv, rx_status, ndev, "oversized packet\n"); + ndev->stats.rx_length_errors++; + ndev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } + if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { netif_err(priv, rx_status, ndev, "fragmented packet!\n"); ndev->stats.rx_dropped++; ndev->stats.rx_errors++; - bcm_sysport_free_cb(cb); - goto refill; + dev_kfree_skb_any(skb); + goto next; } if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { @@ -658,8 +660,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ndev->stats.rx_over_errors++; ndev->stats.rx_dropped++; ndev->stats.rx_errors++; - bcm_sysport_free_cb(cb); - goto refill; + dev_kfree_skb_any(skb); + goto next; } skb_put(skb, len); @@ -686,10 +688,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ndev->stats.rx_bytes += len; napi_gro_receive(&priv->napi, skb); -refill: - ret = bcm_sysport_rx_refill(priv, cb); - if (ret) - priv->mib.alloc_rx_buff_failed++; +next: + processed++; + priv->rx_read_ptr++; + + if (priv->rx_read_ptr == priv->num_rx_bds) + priv->rx_read_ptr = 0; } return processed; @@ -1330,14 +1334,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv, static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) { + struct bcm_sysport_cb *cb; u32 reg; int ret; + int i; /* Initialize SW view of the RX ring */ priv->num_rx_bds = NUM_RX_DESC; priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; - priv->rx_bd_assign_ptr = priv->rx_bds; - priv->rx_bd_assign_index = 0; priv->rx_c_index = 0; priv->rx_read_ptr = 0; priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), @@ -1347,6 +1351,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) return -ENOMEM; } + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DESC_SIZE; + } + ret = bcm_sysport_alloc_rx_bufs(priv); if (ret) { netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 42a4b4a0bc14..f28bf545d7f4 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -663,8 +663,6 @@ struct bcm_sysport_priv { /* Receive queue */ void __iomem *rx_bds; - void __iomem *rx_bd_assign_ptr; - unsigned int rx_bd_assign_index; struct bcm_sysport_cb *rx_cbs; unsigned int num_rx_bds; unsigned int rx_read_ptr; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index a3b0f7a0c61e..7a4aaa3c01b6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -357,6 +357,7 @@ struct sw_tx_bd { struct sw_rx_page { struct page *page; DEFINE_DMA_UNMAP_ADDR(mapping); + unsigned int offset; }; union db_prod { @@ -381,9 +382,10 @@ union db_prod { #define PAGES_PER_SGE_SHIFT 0 #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) -#define SGE_PAGE_SIZE PAGE_SIZE -#define SGE_PAGE_SHIFT PAGE_SHIFT -#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) +#define SGE_PAGE_SHIFT 12 +#define SGE_PAGE_SIZE (1 << SGE_PAGE_SHIFT) +#define SGE_PAGE_MASK (~(SGE_PAGE_SIZE - 1)) +#define SGE_PAGE_ALIGN(addr) (((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK) #define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE) #define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \ SGE_PAGES), 0xffff) @@ -526,6 +528,12 @@ enum bnx2x_tpa_mode_t { TPA_MODE_GRO }; +struct bnx2x_alloc_pool { + struct page *page; + dma_addr_t dma; + unsigned int offset; +}; + struct bnx2x_fastpath { struct bnx2x *bp; /* parent */ @@ -599,6 +607,8 @@ struct bnx2x_fastpath { 4 (for the digits and to make it DWORD aligned) */ #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) char name[FP_NAME_SIZE]; + + struct bnx2x_alloc_pool page_pool; }; #define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var) @@ -1774,7 +1784,7 @@ struct bnx2x { int stats_state; /* used for synchronization of concurrent threads statistics handling */ - struct mutex stats_lock; + struct semaphore stats_lock; /* used by dmae command loader */ struct dmae_command stats_dmae; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2ef202d10948..e2a65334708d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -544,30 +544,49 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, u16 index, gfp_t gfp_mask) { - struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + struct bnx2x_alloc_pool *pool = &fp->page_pool; dma_addr_t mapping; - if (unlikely(page == NULL)) { - BNX2X_ERR("Can't alloc sge\n"); - return -ENOMEM; - } + if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) { - mapping = dma_map_page(&bp->pdev->dev, page, 0, - SGE_PAGES, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - __free_pages(page, PAGES_PER_SGE_SHIFT); - BNX2X_ERR("Can't map sge\n"); - return -ENOMEM; + /* put page reference used by the memory pool, since we + * won't be using this page as the mempool anymore. + */ + if (pool->page) + put_page(pool->page); + + pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); + if (unlikely(!pool->page)) { + BNX2X_ERR("Can't alloc sge\n"); + return -ENOMEM; + } + + pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, + pool->dma))) { + __free_pages(pool->page, PAGES_PER_SGE_SHIFT); + pool->page = NULL; + BNX2X_ERR("Can't map sge\n"); + return -ENOMEM; + } + pool->offset = 0; } - sw_buf->page = page; + get_page(pool->page); + sw_buf->page = pool->page; + sw_buf->offset = pool->offset; + + mapping = pool->dma + sw_buf->offset; dma_unmap_addr_set(sw_buf, mapping, mapping); sge->addr_hi = cpu_to_le32(U64_HI(mapping)); sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + pool->offset += SGE_PAGE_SIZE; + return 0; } @@ -629,20 +648,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, return err; } - /* Unmap the page as we're going to pass it to the stack */ - dma_unmap_page(&bp->pdev->dev, - dma_unmap_addr(&old_rx_pg, mapping), - SGE_PAGES, DMA_FROM_DEVICE); + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(&old_rx_pg, mapping), + SGE_PAGE_SIZE, DMA_FROM_DEVICE); /* Add one frag and update the appropriate fields in the skb */ if (fp->mode == TPA_MODE_LRO) - skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); + skb_fill_page_desc(skb, j, old_rx_pg.page, + old_rx_pg.offset, frag_len); else { /* GRO */ int rem; int offset = 0; for (rem = frag_len; rem > 0; rem -= gro_size) { int len = rem > gro_size ? gro_size : rem; skb_fill_page_desc(skb, frag_id++, - old_rx_pg.page, offset, len); + old_rx_pg.page, + old_rx_pg.offset + offset, + len); if (offset) get_page(old_rx_pg.page); offset += len; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index d7a71758e876..2b30081ec26d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -804,9 +804,13 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, if (!page) return; - dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), - SGE_PAGES, DMA_FROM_DEVICE); - __free_pages(page, PAGES_PER_SGE_SHIFT); + /* Since many fragments can share the same page, make sure to + * only unmap and free the page once. + */ + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), + SGE_PAGE_SIZE, DMA_FROM_DEVICE); + + put_page(page); sw_buf->page = NULL; sge->addr_hi = 0; @@ -964,6 +968,25 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, ((u8 *)fw_lo)[1] = mac[4]; } +static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp, + struct bnx2x_alloc_pool *pool) +{ + if (!pool->page) + return; + + /* Page was not fully fragmented. Unmap unused space */ + if (pool->offset < PAGE_SIZE) { + dma_addr_t dma = pool->dma + pool->offset; + int size = PAGE_SIZE - pool->offset; + + dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE); + } + + put_page(pool->page); + + pool->page = NULL; +} + static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, struct bnx2x_fastpath *fp, int last) { @@ -974,6 +997,8 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, for (i = 0; i < last; i++) bnx2x_free_rx_sge(bp, fp, i); + + bnx2x_free_rx_mem_pool(bp, &fp->page_pool); } static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index fd52ce95127e..33501bcddc48 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12054,7 +12054,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); mutex_init(&bp->drv_info_mutex); - mutex_init(&bp->stats_lock); + sema_init(&bp->stats_lock, 1); bp->drv_info_mng_owner = false; INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); @@ -13690,9 +13690,10 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) cancel_delayed_work_sync(&bp->sp_task); cancel_delayed_work_sync(&bp->period_task); - mutex_lock(&bp->stats_lock); - bp->stats_state = STATS_STATE_DISABLED; - mutex_unlock(&bp->stats_lock); + if (!down_timeout(&bp->stats_lock, HZ / 10)) { + bp->stats_state = STATS_STATE_DISABLED; + up(&bp->stats_lock); + } bnx2x_save_statistics(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 266b055c2360..69d699f0730a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1372,19 +1372,23 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) * that context in case someone is in the middle of a transition. * For other events, wait a bit until lock is taken. */ - if (!mutex_trylock(&bp->stats_lock)) { + if (down_trylock(&bp->stats_lock)) { if (event == STATS_EVENT_UPDATE) return; DP(BNX2X_MSG_STATS, "Unlikely stats' lock contention [event %d]\n", event); - mutex_lock(&bp->stats_lock); + if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) { + BNX2X_ERR("Failed to take stats lock [event %d]\n", + event); + return; + } } bnx2x_stats_stm[state][event].action(bp); bp->stats_state = bnx2x_stats_stm[state][event].next_state; - mutex_unlock(&bp->stats_lock); + up(&bp->stats_lock); if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", @@ -1970,7 +1974,11 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp, /* Wait for statistics to end [while blocking further requests], * then run supplied function 'safely'. */ - mutex_lock(&bp->stats_lock); + rc = down_timeout(&bp->stats_lock, HZ / 10); + if (unlikely(rc)) { + BNX2X_ERR("Failed to take statistics lock for safe execution\n"); + goto out_no_lock; + } bnx2x_stats_comp(bp); while (bp->stats_pending && cnt--) @@ -1988,7 +1996,7 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp, /* No need to restart statistics - if they're enabled, the timer * will restart the statistics. */ - mutex_unlock(&bp->stats_lock); - + up(&bp->stats_lock); +out_no_lock: return rc; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 6043734ea613..b43b2cb9b830 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2770,12 +2770,79 @@ static int bcmgenet_close(struct net_device *dev) return ret; } +static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = ring->priv; + u32 p_index, c_index, intsts, intmsk; + struct netdev_queue *txq; + unsigned int free_bds; + unsigned long flags; + bool txq_stopped; + + if (!netif_msg_tx_err(priv)) + return; + + txq = netdev_get_tx_queue(priv->dev, ring->queue); + + spin_lock_irqsave(&ring->lock, flags); + if (ring->index == DESC_INDEX) { + intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; + } else { + intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = 1 << ring->index; + } + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); + p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); + txq_stopped = netif_tx_queue_stopped(txq); + free_bds = ring->free_bds; + spin_unlock_irqrestore(&ring->lock, flags); + + netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" + "TX queue status: %s, interrupts: %s\n" + "(sw)free_bds: %d (sw)size: %d\n" + "(sw)p_index: %d (hw)p_index: %d\n" + "(sw)c_index: %d (hw)c_index: %d\n" + "(sw)clean_p: %d (sw)write_p: %d\n" + "(sw)cb_ptr: %d (sw)end_ptr: %d\n", + ring->index, ring->queue, + txq_stopped ? "stopped" : "active", + intsts & intmsk ? "enabled" : "disabled", + free_bds, ring->size, + ring->prod_index, p_index & DMA_P_INDEX_MASK, + ring->c_index, c_index & DMA_C_INDEX_MASK, + ring->clean_ptr, ring->write_ptr, + ring->cb_ptr, ring->end_ptr); +} + static void bcmgenet_timeout(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); + u32 int0_enable = 0; + u32 int1_enable = 0; + unsigned int q; netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + bcmgenet_disable_tx_napi(priv); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + bcmgenet_dump_tx_queue(&priv->tx_rings[q]); + bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); + + bcmgenet_tx_reclaim_all(dev); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + int1_enable |= (1 << q); + + int0_enable = UMAC_IRQ_TXDMA_DONE; + + /* Re-enable TX interrupts if disabled */ + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + + bcmgenet_enable_tx_napi(priv); + dev->trans_start = jiffies; dev->stats.tx_errors++; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index e7651b3c6c57..420949cc55aa 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) phy_name = "external RGMII (no delay)"; else phy_name = "external RGMII (TX delay)"; - reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg |= RGMII_MODE_EN | id_mode_dis; - bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); break; @@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) return -EINVAL; } + /* This is an external PHY (xMII), so we need to enable the RGMII + * block for the interface to work + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg |= RGMII_MODE_EN | id_mode_dis; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + if (init) dev_info(kdev, "configuring instance for %s\n", phy_name); diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 594a2ab36d31..68f3c13c9ef6 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -2414,7 +2414,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, if (status == BFA_STATUS_OK) bfa_ioc_lpu_start(ioc); else - bfa_nw_iocpf_timeout(ioc); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); return status; } @@ -3029,7 +3029,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) } if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { - bfa_nw_iocpf_timeout(ioc); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); } else { ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; mod_timer(&ioc->iocpf_timer, jiffies + diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 37072a83f9d6..caae6cb2bc1a 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3701,10 +3701,6 @@ bnad_pci_probe(struct pci_dev *pdev, setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, ((unsigned long)bnad)); - /* Now start the timer before calling IOC */ - mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, - jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); - /* * Start the chip * If the call back comes with error, we bail out. diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c index ebf462d8082f..badea368bdc8 100644 --- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -30,6 +30,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name) { const struct firmware *fw; + u32 n; if (request_firmware(&fw, fw_name, &pdev->dev)) { pr_alert("Can't locate firmware %s\n", fw_name); @@ -40,6 +41,12 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, *bfi_image_size = fw->size/sizeof(u32); bfi_fw = fw; + /* Convert loaded firmware to host order as it is stored in file + * as sequence of LE32 integers. + */ + for (n = 0; n < *bfi_image_size; n++) + le32_to_cpus(*bfi_image + n); + return *bfi_image; error: return NULL; diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig new file mode 100644 index 000000000000..fc3d8e3ee807 --- /dev/null +++ b/drivers/net/ethernet/cavium/Kconfig @@ -0,0 +1,40 @@ +# +# Cavium ethernet device configuration +# + +config NET_VENDOR_CAVIUM + tristate "Cavium ethernet drivers" + depends on PCI && 64BIT + ---help--- + Enable support for the Cavium ThunderX Network Interface + Controller (NIC). The NIC provides the controller and DMA + engines to move network traffic to/from the memory. The NIC + works closely with TNS, BGX and SerDes to implement the + functions replacing and virtualizing those of a typical + standalone PCIe NIC chip. + + If you have a Cavium Thunder board, say Y. + +if NET_VENDOR_CAVIUM + +config THUNDER_NIC_PF + tristate "Thunder Physical function driver" + default NET_VENDOR_CAVIUM + select THUNDER_NIC_BGX + ---help--- + This driver supports Thunder's NIC physical function. + +config THUNDER_NIC_VF + tristate "Thunder Virtual function driver" + default NET_VENDOR_CAVIUM + ---help--- + This driver supports Thunder's NIC virtual function + +config THUNDER_NIC_BGX + tristate "Thunder MAC interface driver (BGX)" + default NET_VENDOR_CAVIUM + ---help--- + This driver supports programming and controlling of MAC + interface from NIC physical function driver. + +endif # NET_VENDOR_CAVIUM diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile new file mode 100644 index 000000000000..7aac4780d050 --- /dev/null +++ b/drivers/net/ethernet/cavium/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Cavium ethernet device drivers. +# + +obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/ diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile new file mode 100644 index 000000000000..5c4615ccaa14 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for Cavium's Thunder ethernet device +# + +obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o +obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o +obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o + +nicpf-y := nic_main.o +nicvf-y := nicvf_main.o nicvf_queues.o +nicvf-y += nicvf_ethtool.o diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h new file mode 100644 index 000000000000..a3b43e50a576 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -0,0 +1,422 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef NIC_H +#define NIC_H + +#include +#include +#include +#include "thunder_bgx.h" + +/* PCI device IDs */ +#define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E +#define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011 +#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034 +#define PCI_DEVICE_ID_THUNDER_BGX 0xA026 + +/* PCI BAR nos */ +#define PCI_CFG_REG_BAR_NUM 0 +#define PCI_MSIX_REG_BAR_NUM 4 + +/* NIC SRIOV VF count */ +#define MAX_NUM_VFS_SUPPORTED 128 +#define DEFAULT_NUM_VF_ENABLED 8 + +#define NIC_TNS_BYPASS_MODE 0 +#define NIC_TNS_MODE 1 + +/* NIC priv flags */ +#define NIC_SRIOV_ENABLED BIT(0) + +/* Min/Max packet size */ +#define NIC_HW_MIN_FRS 64 +#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ + +/* Max pkinds */ +#define NIC_MAX_PKIND 16 + +/* Rx Channels */ +/* Receive channel configuration in TNS bypass mode + * Below is configuration in TNS bypass mode + * BGX0-LMAC0-CHAN0 - VNIC CHAN0 + * BGX0-LMAC1-CHAN0 - VNIC CHAN16 + * ... + * BGX1-LMAC0-CHAN0 - VNIC CHAN128 + * ... + * BGX1-LMAC3-CHAN0 - VNIC CHAN174 + */ +#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */ +#define NIC_CHANS_PER_INF 128 +#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF) +#define NIC_CPI_COUNT 2048 /* No of channel parse indices */ + +/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */ +#define NIC_MAX_BGX MAX_BGX_PER_CN88XX +#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX) +#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */ +#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX) + +/* Tx scheduling */ +#define NIC_MAX_TL4 1024 +#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */ +#define NIC_MAX_TL3 256 +#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */ +#define NIC_MAX_TL2 64 +#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */ +#define NIC_MAX_TL1 2 + +/* TNS bypass mode */ +#define NIC_TL2_PER_BGX 32 +#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX) +#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF) + +/* NIC VF Interrupts */ +#define NICVF_INTR_CQ 0 +#define NICVF_INTR_SQ 1 +#define NICVF_INTR_RBDR 2 +#define NICVF_INTR_PKT_DROP 3 +#define NICVF_INTR_TCP_TIMER 4 +#define NICVF_INTR_MBOX 5 +#define NICVF_INTR_QS_ERR 6 + +#define NICVF_INTR_CQ_SHIFT 0 +#define NICVF_INTR_SQ_SHIFT 8 +#define NICVF_INTR_RBDR_SHIFT 16 +#define NICVF_INTR_PKT_DROP_SHIFT 20 +#define NICVF_INTR_TCP_TIMER_SHIFT 21 +#define NICVF_INTR_MBOX_SHIFT 22 +#define NICVF_INTR_QS_ERR_SHIFT 23 + +#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT) +#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT) +#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT) +#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT) +#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT) +#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT) +#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT) + +/* MSI-X interrupts */ +#define NIC_PF_MSIX_VECTORS 10 +#define NIC_VF_MSIX_VECTORS 20 + +#define NIC_PF_INTR_ID_ECC0_SBE 0 +#define NIC_PF_INTR_ID_ECC0_DBE 1 +#define NIC_PF_INTR_ID_ECC1_SBE 2 +#define NIC_PF_INTR_ID_ECC1_DBE 3 +#define NIC_PF_INTR_ID_ECC2_SBE 4 +#define NIC_PF_INTR_ID_ECC2_DBE 5 +#define NIC_PF_INTR_ID_ECC3_SBE 6 +#define NIC_PF_INTR_ID_ECC3_DBE 7 +#define NIC_PF_INTR_ID_MBOX0 8 +#define NIC_PF_INTR_ID_MBOX1 9 + +/* Global timer for CQ timer thresh interrupts + * Calculated for SCLK of 700Mhz + * value written should be a 1/16th of what is expected + * + * 1 tick per 0.05usec = value of 2.2 + * This 10% would be covered in CQ timer thresh value + */ +#define NICPF_CLK_PER_INT_TICK 2 + +struct nicvf_cq_poll { + u8 cq_idx; /* Completion queue index */ + struct napi_struct napi; +}; + +#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */ +#define NIC_MAX_RSS_HASH_BITS 8 +#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS) +#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */ + +struct nicvf_rss_info { + bool enable; +#define RSS_L2_EXTENDED_HASH_ENA BIT(0) +#define RSS_IP_HASH_ENA BIT(1) +#define RSS_TCP_HASH_ENA BIT(2) +#define RSS_TCP_SYN_DIS BIT(3) +#define RSS_UDP_HASH_ENA BIT(4) +#define RSS_L4_EXTENDED_HASH_ENA BIT(5) +#define RSS_ROCE_ENA BIT(6) +#define RSS_L3_BI_DIRECTION_ENA BIT(7) +#define RSS_L4_BI_DIRECTION_ENA BIT(8) + u64 cfg; + u8 hash_bits; + u16 rss_size; + u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; + u64 key[RSS_HASH_KEY_SIZE]; +} ____cacheline_aligned_in_smp; + +enum rx_stats_reg_offset { + RX_OCTS = 0x0, + RX_UCAST = 0x1, + RX_BCAST = 0x2, + RX_MCAST = 0x3, + RX_RED = 0x4, + RX_RED_OCTS = 0x5, + RX_ORUN = 0x6, + RX_ORUN_OCTS = 0x7, + RX_FCS = 0x8, + RX_L2ERR = 0x9, + RX_DRP_BCAST = 0xa, + RX_DRP_MCAST = 0xb, + RX_DRP_L3BCAST = 0xc, + RX_DRP_L3MCAST = 0xd, + RX_STATS_ENUM_LAST, +}; + +enum tx_stats_reg_offset { + TX_OCTS = 0x0, + TX_UCAST = 0x1, + TX_BCAST = 0x2, + TX_MCAST = 0x3, + TX_DROP = 0x4, + TX_STATS_ENUM_LAST, +}; + +struct nicvf_hw_stats { + u64 rx_bytes_ok; + u64 rx_ucast_frames_ok; + u64 rx_bcast_frames_ok; + u64 rx_mcast_frames_ok; + u64 rx_fcs_errors; + u64 rx_l2_errors; + u64 rx_drop_red; + u64 rx_drop_red_bytes; + u64 rx_drop_overrun; + u64 rx_drop_overrun_bytes; + u64 rx_drop_bcast; + u64 rx_drop_mcast; + u64 rx_drop_l3_bcast; + u64 rx_drop_l3_mcast; + u64 tx_bytes_ok; + u64 tx_ucast_frames_ok; + u64 tx_bcast_frames_ok; + u64 tx_mcast_frames_ok; + u64 tx_drops; +}; + +struct nicvf_drv_stats { + /* Rx */ + u64 rx_frames_ok; + u64 rx_frames_64; + u64 rx_frames_127; + u64 rx_frames_255; + u64 rx_frames_511; + u64 rx_frames_1023; + u64 rx_frames_1518; + u64 rx_frames_jumbo; + u64 rx_drops; + /* Tx */ + u64 tx_frames_ok; + u64 tx_drops; + u64 tx_busy; + u64 tx_tso; +}; + +struct nicvf { + struct net_device *netdev; + struct pci_dev *pdev; + u8 vf_id; + u8 node; + u8 tns_mode; + u16 mtu; + struct queue_set *qs; + void __iomem *reg_base; + bool link_up; + u8 duplex; + u32 speed; + struct page *rb_page; + u32 rb_page_offset; + bool rb_alloc_fail; + bool rb_work_scheduled; + struct delayed_work rbdr_work; + struct tasklet_struct rbdr_task; + struct tasklet_struct qs_err_task; + struct tasklet_struct cq_task; + struct nicvf_cq_poll *napi[8]; + struct nicvf_rss_info rss_info; + u8 cpi_alg; + /* Interrupt coalescing settings */ + u32 cq_coalesce_usecs; + + u32 msg_enable; + struct nicvf_hw_stats stats; + struct nicvf_drv_stats drv_stats; + struct bgx_stats bgx_stats; + struct work_struct reset_task; + + /* MSI-X */ + bool msix_enabled; + u8 num_vec; + struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS]; + char irq_name[NIC_VF_MSIX_VECTORS][20]; + bool irq_allocated[NIC_VF_MSIX_VECTORS]; + + bool pf_ready_to_rcv_msg; + bool pf_acked; + bool pf_nacked; + bool bgx_stats_acked; +} ____cacheline_aligned_in_smp; + +/* PF <--> VF Mailbox communication + * Eight 64bit registers are shared between PF and VF. + * Separate set for each VF. + * Writing '1' into last register mbx7 means end of message. + */ + +/* PF <--> VF mailbox communication */ +#define NIC_PF_VF_MAILBOX_SIZE 2 +#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */ + +/* Mailbox message types */ +#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */ +#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */ +#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */ +#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */ +#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */ +#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */ +#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */ +#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */ +#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */ +#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */ +#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */ +#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */ +#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */ +#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */ +#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */ +#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */ +#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */ +#define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */ +#define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */ + +struct nic_cfg_msg { + u8 msg; + u8 vf_id; + u8 tns_mode; + u8 node_id; + u8 mac_addr[ETH_ALEN]; +}; + +/* Qset configuration */ +struct qs_cfg_msg { + u8 msg; + u8 num; + u64 cfg; +}; + +/* Receive queue configuration */ +struct rq_cfg_msg { + u8 msg; + u8 qs_num; + u8 rq_num; + u64 cfg; +}; + +/* Send queue configuration */ +struct sq_cfg_msg { + u8 msg; + u8 qs_num; + u8 sq_num; + u64 cfg; +}; + +/* Set VF's MAC address */ +struct set_mac_msg { + u8 msg; + u8 vf_id; + u8 mac_addr[ETH_ALEN]; +}; + +/* Set Maximum frame size */ +struct set_frs_msg { + u8 msg; + u8 vf_id; + u16 max_frs; +}; + +/* Set CPI algorithm type */ +struct cpi_cfg_msg { + u8 msg; + u8 vf_id; + u8 rq_cnt; + u8 cpi_alg; +}; + +/* Get RSS table size */ +struct rss_sz_msg { + u8 msg; + u8 vf_id; + u16 ind_tbl_size; +}; + +/* Set RSS configuration */ +struct rss_cfg_msg { + u8 msg; + u8 vf_id; + u8 hash_bits; + u8 tbl_len; + u8 tbl_offset; +#define RSS_IND_TBL_LEN_PER_MBX_MSG 8 + u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG]; +}; + +struct bgx_stats_msg { + u8 msg; + u8 vf_id; + u8 rx; + u8 idx; + u64 stats; +}; + +/* Physical interface link status */ +struct bgx_link_status { + u8 msg; + u8 link_up; + u8 duplex; + u32 speed; +}; + +/* 128 bit shared memory between PF and each VF */ +union nic_mbx { + struct { u8 msg; } msg; + struct nic_cfg_msg nic_cfg; + struct qs_cfg_msg qs; + struct rq_cfg_msg rq; + struct sq_cfg_msg sq; + struct set_mac_msg mac; + struct set_frs_msg frs; + struct cpi_cfg_msg cpi_cfg; + struct rss_sz_msg rss_size; + struct rss_cfg_msg rss_cfg; + struct bgx_stats_msg bgx_stats; + struct bgx_link_status link_status; +}; + +#define NIC_NODE_ID_MASK 0x03 +#define NIC_NODE_ID_SHIFT 44 + +static inline int nic_get_node_id(struct pci_dev *pdev) +{ + u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM); + return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK); +} + +int nicvf_set_real_num_queues(struct net_device *netdev, + int tx_queues, int rx_queues); +int nicvf_open(struct net_device *netdev); +int nicvf_stop(struct net_device *netdev); +int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx); +void nicvf_config_rss(struct nicvf *nic); +void nicvf_set_rss_key(struct nicvf *nic); +void nicvf_set_ethtool_ops(struct net_device *netdev); +void nicvf_update_stats(struct nicvf *nic); +void nicvf_update_lmac_stats(struct nicvf *nic); + +#endif /* NIC_H */ diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c new file mode 100644 index 000000000000..6e0c03169a55 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -0,0 +1,932 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include "nic_reg.h" +#include "nic.h" +#include "q_struct.h" +#include "thunder_bgx.h" + +#define DRV_NAME "thunder-nic" +#define DRV_VERSION "1.0" + +struct nicpf { + struct pci_dev *pdev; + u8 rev_id; + u8 node; + unsigned int flags; + u8 num_vf_en; /* No of VF enabled */ + bool vf_enabled[MAX_NUM_VFS_SUPPORTED]; + void __iomem *reg_base; /* Register start address */ + struct pkind_cfg pkind; +#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF)) +#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) +#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) + u8 vf_lmac_map[MAX_LMAC]; + struct delayed_work dwork; + struct workqueue_struct *check_link; + u8 link[MAX_LMAC]; + u8 duplex[MAX_LMAC]; + u32 speed[MAX_LMAC]; + u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; + u16 rss_ind_tbl_size; + bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; + + /* MSI-X */ + bool msix_enabled; + u8 num_vec; + struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; + bool irq_allocated[NIC_PF_MSIX_VECTORS]; +}; + +/* Supported devices */ +static const struct pci_device_id nic_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Sunil Goutham"); +MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, nic_id_table); + +/* The Cavium ThunderX network controller can *only* be found in SoCs + * containing the ThunderX ARM64 CPU implementation. All accesses to the device + * registers on this platform are implicitly strongly ordered with respect + * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use + * with no memory barriers in this driver. The readq()/writeq() functions add + * explicit ordering operation which in this case are redundant, and only + * add overhead. + */ + +/* Register read/write APIs */ +static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val) +{ + writeq_relaxed(val, nic->reg_base + offset); +} + +static u64 nic_reg_read(struct nicpf *nic, u64 offset) +{ + return readq_relaxed(nic->reg_base + offset); +} + +/* PF -> VF mailbox communication APIs */ +static void nic_enable_mbx_intr(struct nicpf *nic) +{ + /* Enable mailbox interrupt for all 128 VFs */ + nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull); + nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull); +} + +static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) +{ + nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf)); +} + +static u64 nic_get_mbx_addr(int vf) +{ + return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT); +} + +/* Send a mailbox message to VF + * @vf: vf to which this message to be sent + * @mbx: Message to be sent + */ +static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) +{ + void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf); + u64 *msg = (u64 *)mbx; + + /* In first revision HW, mbox interrupt is triggerred + * when PF writes to MBOX(1), in next revisions when + * PF writes to MBOX(0) + */ + if (nic->rev_id == 0) { + /* see the comment for nic_reg_write()/nic_reg_read() + * functions above + */ + writeq_relaxed(msg[0], mbx_addr); + writeq_relaxed(msg[1], mbx_addr + 8); + } else { + writeq_relaxed(msg[1], mbx_addr + 8); + writeq_relaxed(msg[0], mbx_addr); + } +} + +/* Responds to VF's READY message with VF's + * ID, node, MAC address e.t.c + * @vf: VF which sent READY message + */ +static void nic_mbx_send_ready(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + int bgx_idx, lmac; + const char *mac; + + mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; + mbx.nic_cfg.vf_id = vf; + + mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; + + bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); + if (mac) + ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac); + + mbx.nic_cfg.node_id = nic->node; + nic_send_msg_to_vf(nic, vf, &mbx); +} + +/* ACKs VF's mailbox message + * @vf: VF to which ACK to be sent + */ +static void nic_mbx_send_ack(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_ACK; + nic_send_msg_to_vf(nic, vf, &mbx); +} + +/* NACKs VF's mailbox message that PF is not able to + * complete the action + * @vf: VF to which ACK to be sent + */ +static void nic_mbx_send_nack(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_NACK; + nic_send_msg_to_vf(nic, vf, &mbx); +} + +/* Flush all in flight receive packets to memory and + * bring down an active RQ + */ +static int nic_rcv_queue_sw_sync(struct nicpf *nic) +{ + u16 timeout = ~0x00; + + nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); + /* Wait till sync cycle is finished */ + while (timeout) { + if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) + break; + timeout--; + } + nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); + if (!timeout) { + dev_err(&nic->pdev->dev, "Receive queue software sync failed"); + return 1; + } + return 0; +} + +/* Get BGX Rx/Tx stats and respond to VF's request */ +static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) +{ + int bgx_idx, lmac; + union nic_mbx mbx = {}; + + bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); + + mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; + mbx.bgx_stats.vf_id = bgx->vf_id; + mbx.bgx_stats.rx = bgx->rx; + mbx.bgx_stats.idx = bgx->idx; + if (bgx->rx) + mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx, + lmac, bgx->idx); + else + mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx, + lmac, bgx->idx); + nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); +} + +/* Update hardware min/max frame size */ +static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) +{ + if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { + dev_err(&nic->pdev->dev, + "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", + vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); + return 1; + } + new_frs += ETH_HLEN; + if (new_frs <= nic->pkind.maxlen) + return 0; + + nic->pkind.maxlen = new_frs; + nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); + return 0; +} + +/* Set minimum transmit packet size */ +static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) +{ + int lmac; + u64 lmac_cfg; + + /* Max value that can be set is 60 */ + if (size > 60) + size = 60; + + for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { + lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); + lmac_cfg &= ~(0xF << 2); + lmac_cfg |= ((size / 4) << 2); + nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); + } +} + +/* Function to check number of LMACs present and set VF::LMAC mapping. + * Mapping will be used while initializing channels. + */ +static void nic_set_lmac_vf_mapping(struct nicpf *nic) +{ + unsigned bgx_map = bgx_get_map(nic->node); + int bgx, next_bgx_lmac = 0; + int lmac, lmac_cnt = 0; + u64 lmac_credit; + + nic->num_vf_en = 0; + + for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { + if (!(bgx_map & (1 << bgx))) + continue; + lmac_cnt = bgx_get_lmac_count(nic->node, bgx); + for (lmac = 0; lmac < lmac_cnt; lmac++) + nic->vf_lmac_map[next_bgx_lmac++] = + NIC_SET_VF_LMAC_MAP(bgx, lmac); + nic->num_vf_en += lmac_cnt; + + /* Program LMAC credits */ + lmac_credit = (1ull << 1); /* channel credit enable */ + lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ + /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ + lmac_credit |= (((((48 * 1024) / lmac_cnt) - + NIC_HW_MAX_FRS) / 16) << 12); + lmac = bgx * MAX_LMAC_PER_BGX; + for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) + nic_reg_write(nic, + NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), + lmac_credit); + } +} + +#define BGX0_BLOCK 8 +#define BGX1_BLOCK 9 + +static void nic_init_hw(struct nicpf *nic) +{ + int i; + + /* Reset NIC, in case the driver is repeatedly inserted and removed */ + nic_reg_write(nic, NIC_PF_SOFT_RESET, 1); + + /* Enable NIC HW block */ + nic_reg_write(nic, NIC_PF_CFG, 0x3); + + /* Enable backpressure */ + nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03); + + /* Disable TNS mode on both interfaces */ + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, + (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), + (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); + nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, + (1ULL << 63) | BGX0_BLOCK); + nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), + (1ULL << 63) | BGX1_BLOCK); + + /* PKIND configuration */ + nic->pkind.minlen = 0; + nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; + nic->pkind.lenerr_en = 1; + nic->pkind.rx_hdr = 0; + nic->pkind.hdr_sl = 0; + + for (i = 0; i < NIC_MAX_PKIND; i++) + nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), + *(u64 *)&nic->pkind); + + nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); + + /* Timer config */ + nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); +} + +/* Channel parse index configuration */ +static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) +{ + u32 vnic, bgx, lmac, chan; + u32 padd, cpi_count = 0; + u64 cpi_base, cpi, rssi_base, rssi; + u8 qset, rq_idx = 0; + + vnic = cfg->vf_id; + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); + + chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); + cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); + rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); + + /* Rx channel configuration */ + nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), + (1ull << 63) | (vnic << 0)); + nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), + ((u64)cfg->cpi_alg << 62) | (cpi_base << 48)); + + if (cfg->cpi_alg == CPI_ALG_NONE) + cpi_count = 1; + else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ + cpi_count = 8; + else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ + cpi_count = 16; + else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ + cpi_count = NIC_MAX_CPI_PER_LMAC; + + /* RSS Qset, Qidx mapping */ + qset = cfg->vf_id; + rssi = rssi_base; + for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { + nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), + (qset << 3) | rq_idx); + rq_idx++; + } + + rssi = 0; + cpi = cpi_base; + for (; cpi < (cpi_base + cpi_count); cpi++) { + /* Determine port to channel adder */ + if (cfg->cpi_alg != CPI_ALG_DIFF) + padd = cpi % cpi_count; + else + padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ + + /* Leave RSS_SIZE as '0' to disable RSS */ + nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), + (vnic << 24) | (padd << 16) | (rssi_base + rssi)); + + if ((rssi + 1) >= cfg->rq_cnt) + continue; + + if (cfg->cpi_alg == CPI_ALG_VLAN) + rssi++; + else if (cfg->cpi_alg == CPI_ALG_VLAN16) + rssi = ((cpi - cpi_base) & 0xe) >> 1; + else if (cfg->cpi_alg == CPI_ALG_DIFF) + rssi = ((cpi - cpi_base) & 0x38) >> 3; + } + nic->cpi_base[cfg->vf_id] = cpi_base; +} + +/* Responsds to VF with its RSS indirection table size */ +static void nic_send_rss_size(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + u64 *msg; + + msg = (u64 *)&mbx; + + mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; + mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; + nic_send_msg_to_vf(nic, vf, &mbx); +} + +/* Receive side scaling configuration + * configure: + * - RSS index + * - indir table i.e hash::RQ mapping + * - no of hash bits to consider + */ +static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) +{ + u8 qset, idx = 0; + u64 cpi_cfg, cpi_base, rssi_base, rssi; + + cpi_base = nic->cpi_base[cfg->vf_id]; + cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3)); + rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset; + + rssi = rssi_base; + qset = cfg->vf_id; + + for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { + nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), + (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); + idx++; + } + + cpi_cfg &= ~(0xFULL << 20); + cpi_cfg |= (cfg->hash_bits << 20); + nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg); +} + +/* 4 level transmit side scheduler configutation + * for TNS bypass mode + * + * Sample configuration for SQ0 + * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 + * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 + * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 + * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 + * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 + * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 + * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 + * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 + */ +static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx) +{ + u32 bgx, lmac, chan; + u32 tl2, tl3, tl4; + u32 rr_quantum; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); + /* 24 bytes for FCS, IPG and preamble */ + rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); + + tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); + tl4 += sq_idx; + tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); + nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | + ((u64)vnic << NIC_QS_ID_SHIFT) | + ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4); + nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), + ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum); + + nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); + chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); + nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); + /* Enable backpressure on the channel */ + nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); + + tl2 = tl3 >> 2; + nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); + nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); + /* No priorities as of now */ + nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); +} + +/* Interrupt handler to handle mailbox messages from VFs */ +static void nic_handle_mbx_intr(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + u64 *mbx_data; + u64 mbx_addr; + u64 reg_addr; + int bgx, lmac; + int i; + int ret = 0; + + nic->mbx_lock[vf] = true; + + mbx_addr = nic_get_mbx_addr(vf); + mbx_data = (u64 *)&mbx; + + for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { + *mbx_data = nic_reg_read(nic, mbx_addr); + mbx_data++; + mbx_addr += sizeof(u64); + } + + dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n", + __func__, mbx.msg.msg, vf); + switch (mbx.msg.msg) { + case NIC_MBOX_MSG_READY: + nic_mbx_send_ready(nic, vf); + nic->link[vf] = 0; + nic->duplex[vf] = 0; + nic->speed[vf] = 0; + ret = 1; + break; + case NIC_MBOX_MSG_QS_CFG: + reg_addr = NIC_PF_QSET_0_127_CFG | + (mbx.qs.num << NIC_QS_ID_SHIFT); + nic_reg_write(nic, reg_addr, mbx.qs.cfg); + break; + case NIC_MBOX_MSG_RQ_CFG: + reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | + (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.rq.cfg); + break; + case NIC_MBOX_MSG_RQ_BP_CFG: + reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | + (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.rq.cfg); + break; + case NIC_MBOX_MSG_RQ_SW_SYNC: + ret = nic_rcv_queue_sw_sync(nic); + break; + case NIC_MBOX_MSG_RQ_DROP_CFG: + reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | + (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.rq.cfg); + break; + case NIC_MBOX_MSG_SQ_CFG: + reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | + (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.sq.cfg); + nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num); + break; + case NIC_MBOX_MSG_SET_MAC: + lmac = mbx.mac.vf_id; + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); + bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); + break; + case NIC_MBOX_MSG_SET_MAX_FRS: + ret = nic_update_hw_frs(nic, mbx.frs.max_frs, + mbx.frs.vf_id); + break; + case NIC_MBOX_MSG_CPI_CFG: + nic_config_cpi(nic, &mbx.cpi_cfg); + break; + case NIC_MBOX_MSG_RSS_SIZE: + nic_send_rss_size(nic, vf); + goto unlock; + case NIC_MBOX_MSG_RSS_CFG: + case NIC_MBOX_MSG_RSS_CFG_CONT: + nic_config_rss(nic, &mbx.rss_cfg); + break; + case NIC_MBOX_MSG_CFG_DONE: + /* Last message of VF config msg sequence */ + nic->vf_enabled[vf] = true; + goto unlock; + case NIC_MBOX_MSG_SHUTDOWN: + /* First msg in VF teardown sequence */ + nic->vf_enabled[vf] = false; + break; + case NIC_MBOX_MSG_BGX_STATS: + nic_get_bgx_stats(nic, &mbx.bgx_stats); + goto unlock; + default: + dev_err(&nic->pdev->dev, + "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); + break; + } + + if (!ret) + nic_mbx_send_ack(nic, vf); + else if (mbx.msg.msg != NIC_MBOX_MSG_READY) + nic_mbx_send_nack(nic, vf); +unlock: + nic->mbx_lock[vf] = false; +} + +static void nic_mbx_intr_handler (struct nicpf *nic, int mbx) +{ + u64 intr; + u8 vf, vf_per_mbx_reg = 64; + + intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); + dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr); + for (vf = 0; vf < vf_per_mbx_reg; vf++) { + if (intr & (1ULL << vf)) { + dev_dbg(&nic->pdev->dev, "Intr from VF %d\n", + vf + (mbx * vf_per_mbx_reg)); + if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en) + break; + nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); + nic_clear_mbx_intr(nic, vf, mbx); + } + } +} + +static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq) +{ + struct nicpf *nic = (struct nicpf *)nic_irq; + + nic_mbx_intr_handler(nic, 0); + + return IRQ_HANDLED; +} + +static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq) +{ + struct nicpf *nic = (struct nicpf *)nic_irq; + + nic_mbx_intr_handler(nic, 1); + + return IRQ_HANDLED; +} + +static int nic_enable_msix(struct nicpf *nic) +{ + int i, ret; + + nic->num_vec = NIC_PF_MSIX_VECTORS; + + for (i = 0; i < nic->num_vec; i++) + nic->msix_entries[i].entry = i; + + ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); + if (ret) { + dev_err(&nic->pdev->dev, + "Request for #%d msix vectors failed\n", + nic->num_vec); + return ret; + } + + nic->msix_enabled = 1; + return 0; +} + +static void nic_disable_msix(struct nicpf *nic) +{ + if (nic->msix_enabled) { + pci_disable_msix(nic->pdev); + nic->msix_enabled = 0; + nic->num_vec = 0; + } +} + +static void nic_free_all_interrupts(struct nicpf *nic) +{ + int irq; + + for (irq = 0; irq < nic->num_vec; irq++) { + if (nic->irq_allocated[irq]) + free_irq(nic->msix_entries[irq].vector, nic); + nic->irq_allocated[irq] = false; + } +} + +static int nic_register_interrupts(struct nicpf *nic) +{ + int ret; + + /* Enable MSI-X */ + ret = nic_enable_msix(nic); + if (ret) + return ret; + + /* Register mailbox interrupt handlers */ + ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector, + nic_mbx0_intr_handler, 0, "NIC Mbox0", nic); + if (ret) + goto fail; + + nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true; + + ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector, + nic_mbx1_intr_handler, 0, "NIC Mbox1", nic); + if (ret) + goto fail; + + nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true; + + /* Enable mailbox interrupt */ + nic_enable_mbx_intr(nic); + return 0; + +fail: + dev_err(&nic->pdev->dev, "Request irq failed\n"); + nic_free_all_interrupts(nic); + return ret; +} + +static void nic_unregister_interrupts(struct nicpf *nic) +{ + nic_free_all_interrupts(nic); + nic_disable_msix(nic); +} + +static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) +{ + int pos = 0; + int err; + u16 total_vf_cnt; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n"); + return -ENODEV; + } + + pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt); + if (total_vf_cnt < nic->num_vf_en) + nic->num_vf_en = total_vf_cnt; + + if (!total_vf_cnt) + return 0; + + err = pci_enable_sriov(pdev, nic->num_vf_en); + if (err) { + dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n", + nic->num_vf_en); + nic->num_vf_en = 0; + return err; + } + + dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n", + nic->num_vf_en); + + nic->flags |= NIC_SRIOV_ENABLED; + return 0; +} + +/* Poll for BGX LMAC link status and update corresponding VF + * if there is a change, valid only if internal L2 switch + * is not present otherwise VF link is always treated as up + */ +static void nic_poll_for_link(struct work_struct *work) +{ + union nic_mbx mbx = {}; + struct nicpf *nic; + struct bgx_link_status link; + u8 vf, bgx, lmac; + + nic = container_of(work, struct nicpf, dwork.work); + + mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; + + for (vf = 0; vf < nic->num_vf_en; vf++) { + /* Poll only if VF is UP */ + if (!nic->vf_enabled[vf]) + continue; + + /* Get BGX, LMAC indices for the VF */ + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + /* Get interface link status */ + bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); + + /* Inform VF only if link status changed */ + if (nic->link[vf] == link.link_up) + continue; + + if (!nic->mbx_lock[vf]) { + nic->link[vf] = link.link_up; + nic->duplex[vf] = link.duplex; + nic->speed[vf] = link.speed; + + /* Send a mbox message to VF with current link status */ + mbx.link_status.link_up = link.link_up; + mbx.link_status.duplex = link.duplex; + mbx.link_status.speed = link.speed; + nic_send_msg_to_vf(nic, vf, &mbx); + } + } + queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2); +} + +static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct nicpf *nic; + int err; + + BUILD_BUG_ON(sizeof(union nic_mbx) > 16); + + nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL); + if (!nic) + return -ENOMEM; + + pci_set_drvdata(pdev, nic); + + nic->pdev = pdev; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to get usable DMA configuration\n"); + goto err_release_regions; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n"); + goto err_release_regions; + } + + /* MAP PF's configuration registers */ + nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!nic->reg_base) { + dev_err(dev, "Cannot map config register space, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id); + + nic->node = nic_get_node_id(pdev); + + nic_set_lmac_vf_mapping(nic); + + /* Initialize hardware */ + nic_init_hw(nic); + + /* Set RSS TBL size for each VF */ + nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; + + /* Register interrupts */ + err = nic_register_interrupts(nic); + if (err) + goto err_release_regions; + + /* Configure SRIOV */ + err = nic_sriov_init(pdev, nic); + if (err) + goto err_unregister_interrupts; + + /* Register a physical link status poll fn() */ + nic->check_link = alloc_workqueue("check_link_status", + WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + if (!nic->check_link) { + err = -ENOMEM; + goto err_disable_sriov; + } + + INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link); + queue_delayed_work(nic->check_link, &nic->dwork, 0); + + return 0; + +err_disable_sriov: + if (nic->flags & NIC_SRIOV_ENABLED) + pci_disable_sriov(pdev); +err_unregister_interrupts: + nic_unregister_interrupts(nic); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void nic_remove(struct pci_dev *pdev) +{ + struct nicpf *nic = pci_get_drvdata(pdev); + + if (nic->flags & NIC_SRIOV_ENABLED) + pci_disable_sriov(pdev); + + if (nic->check_link) { + /* Destroy work Queue */ + cancel_delayed_work(&nic->dwork); + flush_workqueue(nic->check_link); + destroy_workqueue(nic->check_link); + } + + nic_unregister_interrupts(nic); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver nic_driver = { + .name = DRV_NAME, + .id_table = nic_id_table, + .probe = nic_probe, + .remove = nic_remove, +}; + +static int __init nic_init_module(void) +{ + pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); + + return pci_register_driver(&nic_driver); +} + +static void __exit nic_cleanup_module(void) +{ + pci_unregister_driver(&nic_driver); +} + +module_init(nic_init_module); +module_exit(nic_cleanup_module); diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h new file mode 100644 index 000000000000..58197bb2f805 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef NIC_REG_H +#define NIC_REG_H + +#define NIC_PF_REG_COUNT 29573 +#define NIC_VF_REG_COUNT 249 + +/* Physical function register offsets */ +#define NIC_PF_CFG (0x0000) +#define NIC_PF_STATUS (0x0010) +#define NIC_PF_INTR_TIMER_CFG (0x0030) +#define NIC_PF_BIST_STATUS (0x0040) +#define NIC_PF_SOFT_RESET (0x0050) +#define NIC_PF_TCP_TIMER (0x0060) +#define NIC_PF_BP_CFG (0x0080) +#define NIC_PF_RRM_CFG (0x0088) +#define NIC_PF_CQM_CF (0x00A0) +#define NIC_PF_CNM_CF (0x00A8) +#define NIC_PF_CNM_STATUS (0x00B0) +#define NIC_PF_CQ_AVG_CFG (0x00C0) +#define NIC_PF_RRM_AVG_CFG (0x00C8) +#define NIC_PF_INTF_0_1_SEND_CFG (0x0200) +#define NIC_PF_INTF_0_1_BP_CFG (0x0208) +#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210) +#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220) +#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240) +#define NIC_PF_MAILBOX_INT (0x0410) +#define NIC_PF_MAILBOX_INT_W1S (0x0430) +#define NIC_PF_MAILBOX_ENA_W1C (0x0450) +#define NIC_PF_MAILBOX_ENA_W1S (0x0470) +#define NIC_PF_RX_ETYPE_0_7 (0x0500) +#define NIC_PF_PKIND_0_15_CFG (0x0600) +#define NIC_PF_ECC0_FLIP0 (0x1000) +#define NIC_PF_ECC1_FLIP0 (0x1008) +#define NIC_PF_ECC2_FLIP0 (0x1010) +#define NIC_PF_ECC3_FLIP0 (0x1018) +#define NIC_PF_ECC0_FLIP1 (0x1080) +#define NIC_PF_ECC1_FLIP1 (0x1088) +#define NIC_PF_ECC2_FLIP1 (0x1090) +#define NIC_PF_ECC3_FLIP1 (0x1098) +#define NIC_PF_ECC0_CDIS (0x1100) +#define NIC_PF_ECC1_CDIS (0x1108) +#define NIC_PF_ECC2_CDIS (0x1110) +#define NIC_PF_ECC3_CDIS (0x1118) +#define NIC_PF_BIST0_STATUS (0x1280) +#define NIC_PF_BIST1_STATUS (0x1288) +#define NIC_PF_BIST2_STATUS (0x1290) +#define NIC_PF_BIST3_STATUS (0x1298) +#define NIC_PF_ECC0_SBE_INT (0x2000) +#define NIC_PF_ECC0_SBE_INT_W1S (0x2008) +#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010) +#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018) +#define NIC_PF_ECC0_DBE_INT (0x2100) +#define NIC_PF_ECC0_DBE_INT_W1S (0x2108) +#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110) +#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118) +#define NIC_PF_ECC1_SBE_INT (0x2200) +#define NIC_PF_ECC1_SBE_INT_W1S (0x2208) +#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210) +#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218) +#define NIC_PF_ECC1_DBE_INT (0x2300) +#define NIC_PF_ECC1_DBE_INT_W1S (0x2308) +#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310) +#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318) +#define NIC_PF_ECC2_SBE_INT (0x2400) +#define NIC_PF_ECC2_SBE_INT_W1S (0x2408) +#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410) +#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418) +#define NIC_PF_ECC2_DBE_INT (0x2500) +#define NIC_PF_ECC2_DBE_INT_W1S (0x2508) +#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510) +#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518) +#define NIC_PF_ECC3_SBE_INT (0x2600) +#define NIC_PF_ECC3_SBE_INT_W1S (0x2608) +#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610) +#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618) +#define NIC_PF_ECC3_DBE_INT (0x2700) +#define NIC_PF_ECC3_DBE_INT_W1S (0x2708) +#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710) +#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718) +#define NIC_PF_CPI_0_2047_CFG (0x200000) +#define NIC_PF_RSSI_0_4097_RQ (0x220000) +#define NIC_PF_LMAC_0_7_CFG (0x240000) +#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) +#define NIC_PF_LMAC_0_7_CREDIT (0x244000) +#define NIC_PF_CHAN_0_255_TX_CFG (0x400000) +#define NIC_PF_CHAN_0_255_RX_CFG (0x420000) +#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000) +#define NIC_PF_CHAN_0_255_CREDIT (0x460000) +#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000) +#define NIC_PF_SW_SYNC_RX (0x490000) +#define NIC_PF_SW_SYNC_RX_DONE (0x490008) +#define NIC_PF_TL2_0_63_CFG (0x500000) +#define NIC_PF_TL2_0_63_PRI (0x520000) +#define NIC_PF_TL2_0_63_SH_STATUS (0x580000) +#define NIC_PF_TL3A_0_63_CFG (0x5F0000) +#define NIC_PF_TL3_0_255_CFG (0x600000) +#define NIC_PF_TL3_0_255_CHAN (0x620000) +#define NIC_PF_TL3_0_255_PIR (0x640000) +#define NIC_PF_TL3_0_255_SW_XOFF (0x660000) +#define NIC_PF_TL3_0_255_CNM_RATE (0x680000) +#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000) +#define NIC_PF_TL4A_0_255_CFG (0x6F0000) +#define NIC_PF_TL4_0_1023_CFG (0x800000) +#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000) +#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000) +#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000) +#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000) +#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030) +#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000) +#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100) +#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000) +#define NIC_PF_QSET_0_127_CFG (0x20010000) +#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400) +#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420) +#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500) +#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600) +#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00) +#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08) +#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00) + +#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000) +#define NIC_PF_MSIX_VEC_0_CTL (0x000008) +#define NIC_PF_MSIX_PBA_0 (0x0F0000) + +/* Virtual function register offsets */ +#define NIC_VNIC_CFG (0x000020) +#define NIC_VF_PF_MAILBOX_0_1 (0x000130) +#define NIC_VF_INT (0x000200) +#define NIC_VF_INT_W1S (0x000220) +#define NIC_VF_ENA_W1C (0x000240) +#define NIC_VF_ENA_W1S (0x000260) + +#define NIC_VNIC_RSS_CFG (0x0020E0) +#define NIC_VNIC_RSS_KEY_0_4 (0x002200) +#define NIC_VNIC_TX_STAT_0_4 (0x004000) +#define NIC_VNIC_RX_STAT_0_13 (0x004100) +#define NIC_QSET_RQ_GEN_CFG (0x010010) + +#define NIC_QSET_CQ_0_7_CFG (0x010400) +#define NIC_QSET_CQ_0_7_CFG2 (0x010408) +#define NIC_QSET_CQ_0_7_THRESH (0x010410) +#define NIC_QSET_CQ_0_7_BASE (0x010420) +#define NIC_QSET_CQ_0_7_HEAD (0x010428) +#define NIC_QSET_CQ_0_7_TAIL (0x010430) +#define NIC_QSET_CQ_0_7_DOOR (0x010438) +#define NIC_QSET_CQ_0_7_STATUS (0x010440) +#define NIC_QSET_CQ_0_7_STATUS2 (0x010448) +#define NIC_QSET_CQ_0_7_DEBUG (0x010450) + +#define NIC_QSET_RQ_0_7_CFG (0x010600) +#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700) + +#define NIC_QSET_SQ_0_7_CFG (0x010800) +#define NIC_QSET_SQ_0_7_THRESH (0x010810) +#define NIC_QSET_SQ_0_7_BASE (0x010820) +#define NIC_QSET_SQ_0_7_HEAD (0x010828) +#define NIC_QSET_SQ_0_7_TAIL (0x010830) +#define NIC_QSET_SQ_0_7_DOOR (0x010838) +#define NIC_QSET_SQ_0_7_STATUS (0x010840) +#define NIC_QSET_SQ_0_7_DEBUG (0x010848) +#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860) +#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900) + +#define NIC_QSET_RBDR_0_1_CFG (0x010C00) +#define NIC_QSET_RBDR_0_1_THRESH (0x010C10) +#define NIC_QSET_RBDR_0_1_BASE (0x010C20) +#define NIC_QSET_RBDR_0_1_HEAD (0x010C28) +#define NIC_QSET_RBDR_0_1_TAIL (0x010C30) +#define NIC_QSET_RBDR_0_1_DOOR (0x010C38) +#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40) +#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48) +#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50) + +#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000) +#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008) +#define NIC_VF_MSIX_PBA (0x0F0000) + +/* Offsets within registers */ +#define NIC_MSIX_VEC_SHIFT 4 +#define NIC_Q_NUM_SHIFT 18 +#define NIC_QS_ID_SHIFT 21 +#define NIC_VF_NUM_SHIFT 21 + +/* Port kind configuration register */ +struct pkind_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_42_63:22; + u64 hdr_sl:5; /* Header skip length */ + u64 rx_hdr:3; /* TNS Receive header present */ + u64 lenerr_en:1;/* L2 length error check enable */ + u64 reserved_32_32:1; + u64 maxlen:16; /* Max frame size */ + u64 minlen:16; /* Min frame size */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 minlen:16; + u64 maxlen:16; + u64 reserved_32_32:1; + u64 lenerr_en:1; + u64 rx_hdr:3; + u64 hdr_sl:5; + u64 reserved_42_63:22; +#endif +}; + +#endif /* NIC_REG_H */ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c new file mode 100644 index 000000000000..16bd2d772db9 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -0,0 +1,600 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +/* ETHTOOL Support for VNIC_VF Device*/ + +#include + +#include "nic_reg.h" +#include "nic.h" +#include "nicvf_queues.h" +#include "q_struct.h" +#include "thunder_bgx.h" + +#define DRV_NAME "thunder-nicvf" +#define DRV_VERSION "1.0" + +struct nicvf_stat { + char name[ETH_GSTRING_LEN]; + unsigned int index; +}; + +#define NICVF_HW_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \ +} + +#define NICVF_DRV_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \ +} + +static const struct nicvf_stat nicvf_hw_stats[] = { + NICVF_HW_STAT(rx_bytes_ok), + NICVF_HW_STAT(rx_ucast_frames_ok), + NICVF_HW_STAT(rx_bcast_frames_ok), + NICVF_HW_STAT(rx_mcast_frames_ok), + NICVF_HW_STAT(rx_fcs_errors), + NICVF_HW_STAT(rx_l2_errors), + NICVF_HW_STAT(rx_drop_red), + NICVF_HW_STAT(rx_drop_red_bytes), + NICVF_HW_STAT(rx_drop_overrun), + NICVF_HW_STAT(rx_drop_overrun_bytes), + NICVF_HW_STAT(rx_drop_bcast), + NICVF_HW_STAT(rx_drop_mcast), + NICVF_HW_STAT(rx_drop_l3_bcast), + NICVF_HW_STAT(rx_drop_l3_mcast), + NICVF_HW_STAT(tx_bytes_ok), + NICVF_HW_STAT(tx_ucast_frames_ok), + NICVF_HW_STAT(tx_bcast_frames_ok), + NICVF_HW_STAT(tx_mcast_frames_ok), +}; + +static const struct nicvf_stat nicvf_drv_stats[] = { + NICVF_DRV_STAT(rx_frames_ok), + NICVF_DRV_STAT(rx_frames_64), + NICVF_DRV_STAT(rx_frames_127), + NICVF_DRV_STAT(rx_frames_255), + NICVF_DRV_STAT(rx_frames_511), + NICVF_DRV_STAT(rx_frames_1023), + NICVF_DRV_STAT(rx_frames_1518), + NICVF_DRV_STAT(rx_frames_jumbo), + NICVF_DRV_STAT(rx_drops), + NICVF_DRV_STAT(tx_frames_ok), + NICVF_DRV_STAT(tx_busy), + NICVF_DRV_STAT(tx_tso), + NICVF_DRV_STAT(tx_drops), +}; + +static const struct nicvf_stat nicvf_queue_stats[] = { + { "bytes", 0 }, + { "frames", 1 }, +}; + +static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats); +static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats); +static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats); + +static int nicvf_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct nicvf *nic = netdev_priv(netdev); + + cmd->supported = 0; + cmd->transceiver = XCVR_EXTERNAL; + if (nic->speed <= 1000) { + cmd->port = PORT_MII; + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->port = PORT_FIBRE; + cmd->autoneg = AUTONEG_DISABLE; + } + cmd->duplex = nic->duplex; + ethtool_cmd_speed_set(cmd, nic->speed); + + return 0; +} + +static void nicvf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nicvf *nic = netdev_priv(netdev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info)); +} + +static u32 nicvf_get_msglevel(struct net_device *netdev) +{ + struct nicvf *nic = netdev_priv(netdev); + + return nic->msg_enable; +} + +static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl) +{ + struct nicvf *nic = netdev_priv(netdev); + + nic->msg_enable = lvl; +} + +static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + int stats, qidx; + + if (sset != ETH_SS_STATS) + return; + + for (stats = 0; stats < nicvf_n_hw_stats; stats++) { + memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + for (stats = 0; stats < nicvf_n_drv_stats; stats++) { + memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { + for (stats = 0; stats < nicvf_n_queue_stats; stats++) { + sprintf(data, "rxq%d: %s", qidx, + nicvf_queue_stats[stats].name); + data += ETH_GSTRING_LEN; + } + } + + for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { + for (stats = 0; stats < nicvf_n_queue_stats; stats++) { + sprintf(data, "txq%d: %s", qidx, + nicvf_queue_stats[stats].name); + data += ETH_GSTRING_LEN; + } + } + + for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) { + sprintf(data, "bgx_rxstat%d: ", stats); + data += ETH_GSTRING_LEN; + } + + for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) { + sprintf(data, "bgx_txstat%d: ", stats); + data += ETH_GSTRING_LEN; + } +} + +static int nicvf_get_sset_count(struct net_device *netdev, int sset) +{ + if (sset != ETH_SS_STATS) + return -EINVAL; + + return nicvf_n_hw_stats + nicvf_n_drv_stats + + (nicvf_n_queue_stats * + (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + + BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; +} + +static void nicvf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nicvf *nic = netdev_priv(netdev); + int stat, qidx; + + nicvf_update_stats(nic); + + /* Update LMAC stats */ + nicvf_update_lmac_stats(nic); + + for (stat = 0; stat < nicvf_n_hw_stats; stat++) + *(data++) = ((u64 *)&nic->stats) + [nicvf_hw_stats[stat].index]; + for (stat = 0; stat < nicvf_n_drv_stats; stat++) + *(data++) = ((u64 *)&nic->drv_stats) + [nicvf_drv_stats[stat].index]; + + for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { + for (stat = 0; stat < nicvf_n_queue_stats; stat++) + *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) + [nicvf_queue_stats[stat].index]; + } + + for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { + for (stat = 0; stat < nicvf_n_queue_stats; stat++) + *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) + [nicvf_queue_stats[stat].index]; + } + + for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++) + *(data++) = nic->bgx_stats.rx_stats[stat]; + for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++) + *(data++) = nic->bgx_stats.tx_stats[stat]; +} + +static int nicvf_get_regs_len(struct net_device *dev) +{ + return sizeof(u64) * NIC_VF_REG_COUNT; +} + +static void nicvf_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *reg) +{ + struct nicvf *nic = netdev_priv(dev); + u64 *p = (u64 *)reg; + u64 reg_offset; + int mbox, key, stat, q; + int i = 0; + + regs->version = 0; + memset(p, 0, NIC_VF_REG_COUNT); + + p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG); + /* Mailbox registers */ + for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++) + p[i++] = nicvf_reg_read(nic, + NIC_VF_PF_MAILBOX_0_1 | (mbox << 3)); + + p[i++] = nicvf_reg_read(nic, NIC_VF_INT); + p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S); + p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C); + p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S); + p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); + + for (key = 0; key < RSS_HASH_KEY_SIZE; key++) + p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3)); + + /* Tx/Rx statistics */ + for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++) + p[i++] = nicvf_reg_read(nic, + NIC_VNIC_TX_STAT_0_4 | (stat << 3)); + + for (i = 0; i < RX_STATS_ENUM_LAST; i++) + p[i++] = nicvf_reg_read(nic, + NIC_VNIC_RX_STAT_0_13 | (stat << 3)); + + p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG); + + /* All completion queue's registers */ + for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) { + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q); + } + + /* All receive queue's registers */ + for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) { + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q); + p[i++] = nicvf_queue_reg_read(nic, + NIC_QSET_RQ_0_7_STAT_0_1, q); + reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3); + p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); + } + + for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) { + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); + reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); + p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); + } + + for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) { + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q); + p[i++] = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_STATUS0, q); + p[i++] = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_STATUS1, q); + reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS; + p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); + } +} + +static int nicvf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + struct nicvf *nic = netdev_priv(netdev); + + cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs; + return 0; +} + +static void nicvf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + + ring->rx_max_pending = MAX_RCV_BUF_COUNT; + ring->rx_pending = qs->rbdr_len; + ring->tx_max_pending = MAX_SND_QUEUE_LEN; + ring->tx_pending = qs->sq_len; +} + +static int nicvf_get_rss_hash_opts(struct nicvf *nic, + struct ethtool_rxnfc *info) +{ + info->data = 0; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case IPV4_FLOW: + case IPV6_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int nicvf_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rules) +{ + struct nicvf *nic = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = nic->qs->rq_cnt; + ret = 0; + break; + case ETHTOOL_GRXFH: + return nicvf_get_rss_hash_opts(nic, info); + default: + break; + } + return ret; +} + +static int nicvf_set_rss_hash_opts(struct nicvf *nic, + struct ethtool_rxnfc *info) +{ + struct nicvf_rss_info *rss = &nic->rss_info; + u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); + + if (!rss->enable) + netdev_err(nic->netdev, + "RSS is disabled, hash cannot be set\n"); + + netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n", + info->flow_type, info->data); + + if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST)) + return -EINVAL; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_cfg &= ~(1ULL << RSS_HASH_TCP); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_cfg |= (1ULL << RSS_HASH_TCP); + break; + default: + return -EINVAL; + } + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_cfg &= ~(1ULL << RSS_HASH_UDP); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_cfg |= (1ULL << RSS_HASH_UDP); + break; + default: + return -EINVAL; + } + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_cfg &= ~(1ULL << RSS_HASH_L4ETC); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_cfg |= (1ULL << RSS_HASH_L4ETC); + break; + default: + return -EINVAL; + } + break; + case IPV4_FLOW: + case IPV6_FLOW: + rss_cfg = RSS_HASH_IP; + break; + default: + return -EINVAL; + } + + nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg); + return 0; +} + +static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) +{ + struct nicvf *nic = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_SRXFH: + return nicvf_set_rss_hash_opts(nic, info); + default: + break; + } + return -EOPNOTSUPP; +} + +static u32 nicvf_get_rxfh_key_size(struct net_device *netdev) +{ + return RSS_HASH_KEY_SIZE * sizeof(u64); +} + +static u32 nicvf_get_rxfh_indir_size(struct net_device *dev) +{ + struct nicvf *nic = netdev_priv(dev); + + return nic->rss_info.rss_size; +} + +static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, + u8 *hfunc) +{ + struct nicvf *nic = netdev_priv(dev); + struct nicvf_rss_info *rss = &nic->rss_info; + int idx; + + if (indir) { + for (idx = 0; idx < rss->rss_size; idx++) + indir[idx] = rss->ind_tbl[idx]; + } + + if (hkey) + memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64)); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *hkey, u8 hfunc) +{ + struct nicvf *nic = netdev_priv(dev); + struct nicvf_rss_info *rss = &nic->rss_info; + int idx; + + if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) { + rss->enable = false; + rss->hash_bits = 0; + return -EIO; + } + + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + rss->enable = true; + if (indir) { + for (idx = 0; idx < rss->rss_size; idx++) + rss->ind_tbl[idx] = indir[idx]; + } + + if (hkey) { + memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64)); + nicvf_set_rss_key(nic); + } + + nicvf_config_rss(nic); + return 0; +} + +/* Get no of queues device supports and current queue count */ +static void nicvf_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct nicvf *nic = netdev_priv(dev); + + memset(channel, 0, sizeof(*channel)); + + channel->max_rx = MAX_RCV_QUEUES_PER_QS; + channel->max_tx = MAX_SND_QUEUES_PER_QS; + + channel->rx_count = nic->qs->rq_cnt; + channel->tx_count = nic->qs->sq_cnt; +} + +/* Set no of Tx, Rx queues to be used */ +static int nicvf_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct nicvf *nic = netdev_priv(dev); + int err = 0; + + if (!channel->rx_count || !channel->tx_count) + return -EINVAL; + if (channel->rx_count > MAX_RCV_QUEUES_PER_QS) + return -EINVAL; + if (channel->tx_count > MAX_SND_QUEUES_PER_QS) + return -EINVAL; + + nic->qs->rq_cnt = channel->rx_count; + nic->qs->sq_cnt = channel->tx_count; + nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); + + err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt); + if (err) + return err; + + if (!netif_running(dev)) + return err; + + nicvf_stop(dev); + nicvf_open(dev); + netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", + nic->qs->sq_cnt, nic->qs->rq_cnt); + + return err; +} + +static const struct ethtool_ops nicvf_ethtool_ops = { + .get_settings = nicvf_get_settings, + .get_link = ethtool_op_get_link, + .get_drvinfo = nicvf_get_drvinfo, + .get_msglevel = nicvf_get_msglevel, + .set_msglevel = nicvf_set_msglevel, + .get_strings = nicvf_get_strings, + .get_sset_count = nicvf_get_sset_count, + .get_ethtool_stats = nicvf_get_ethtool_stats, + .get_regs_len = nicvf_get_regs_len, + .get_regs = nicvf_get_regs, + .get_coalesce = nicvf_get_coalesce, + .get_ringparam = nicvf_get_ringparam, + .get_rxnfc = nicvf_get_rxnfc, + .set_rxnfc = nicvf_set_rxnfc, + .get_rxfh_key_size = nicvf_get_rxfh_key_size, + .get_rxfh_indir_size = nicvf_get_rxfh_indir_size, + .get_rxfh = nicvf_get_rxfh, + .set_rxfh = nicvf_set_rxfh, + .get_channels = nicvf_get_channels, + .set_channels = nicvf_set_channels, + .get_ts_info = ethtool_op_get_ts_info, +}; + +void nicvf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &nicvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c new file mode 100644 index 000000000000..02da802d3288 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -0,0 +1,1331 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nic_reg.h" +#include "nic.h" +#include "nicvf_queues.h" +#include "thunder_bgx.h" + +#define DRV_NAME "thunder-nicvf" +#define DRV_VERSION "1.0" + +/* Supported devices */ +static const struct pci_device_id nicvf_id_table[] = { + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_THUNDER_NIC_VF, + PCI_VENDOR_ID_CAVIUM, 0xA11E) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, + PCI_VENDOR_ID_CAVIUM, 0xA11E) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Sunil Goutham"); +MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, nicvf_id_table); + +static int debug = 0x00; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Debug message level bitmap"); + +static int cpi_alg = CPI_ALG_NONE; +module_param(cpi_alg, int, S_IRUGO); +MODULE_PARM_DESC(cpi_alg, + "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); + +static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, + struct sk_buff *skb) +{ + if (skb->len <= 64) + nic->drv_stats.rx_frames_64++; + else if (skb->len <= 127) + nic->drv_stats.rx_frames_127++; + else if (skb->len <= 255) + nic->drv_stats.rx_frames_255++; + else if (skb->len <= 511) + nic->drv_stats.rx_frames_511++; + else if (skb->len <= 1023) + nic->drv_stats.rx_frames_1023++; + else if (skb->len <= 1518) + nic->drv_stats.rx_frames_1518++; + else + nic->drv_stats.rx_frames_jumbo++; +} + +/* The Cavium ThunderX network controller can *only* be found in SoCs + * containing the ThunderX ARM64 CPU implementation. All accesses to the device + * registers on this platform are implicitly strongly ordered with respect + * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use + * with no memory barriers in this driver. The readq()/writeq() functions add + * explicit ordering operation which in this case are redundant, and only + * add overhead. + */ + +/* Register read/write APIs */ +void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val) +{ + writeq_relaxed(val, nic->reg_base + offset); +} + +u64 nicvf_reg_read(struct nicvf *nic, u64 offset) +{ + return readq_relaxed(nic->reg_base + offset); +} + +void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, + u64 qidx, u64 val) +{ + void __iomem *addr = nic->reg_base + offset; + + writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT)); +} + +u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) +{ + void __iomem *addr = nic->reg_base + offset; + + return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT)); +} + +/* VF -> PF mailbox communication */ + +static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) +{ + u64 *msg = (u64 *)mbx; + + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); +} + +int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) +{ + int timeout = NIC_MBOX_MSG_TIMEOUT; + int sleep = 10; + + nic->pf_acked = false; + nic->pf_nacked = false; + + nicvf_write_to_mbx(nic, mbx); + + /* Wait for previous message to be acked, timeout 2sec */ + while (!nic->pf_acked) { + if (nic->pf_nacked) + return -EINVAL; + msleep(sleep); + if (nic->pf_acked) + break; + timeout -= sleep; + if (!timeout) { + netdev_err(nic->netdev, + "PF didn't ack to mbox msg %d from VF%d\n", + (mbx->msg.msg & 0xFF), nic->vf_id); + return -EBUSY; + } + } + return 0; +} + +/* Checks if VF is able to comminicate with PF +* and also gets the VNIC number this VF is associated to. +*/ +static int nicvf_check_pf_ready(struct nicvf *nic) +{ + int timeout = 5000, sleep = 20; + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_READY; + + nic->pf_ready_to_rcv_msg = false; + + nicvf_write_to_mbx(nic, &mbx); + + while (!nic->pf_ready_to_rcv_msg) { + msleep(sleep); + if (nic->pf_ready_to_rcv_msg) + break; + timeout -= sleep; + if (!timeout) { + netdev_err(nic->netdev, + "PF didn't respond to READY msg\n"); + return 0; + } + } + return 1; +} + +static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) +{ + if (bgx->rx) + nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats; + else + nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats; +} + +static void nicvf_handle_mbx_intr(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + u64 *mbx_data; + u64 mbx_addr; + int i; + + mbx_addr = NIC_VF_PF_MAILBOX_0_1; + mbx_data = (u64 *)&mbx; + + for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { + *mbx_data = nicvf_reg_read(nic, mbx_addr); + mbx_data++; + mbx_addr += sizeof(u64); + } + + netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg); + switch (mbx.msg.msg) { + case NIC_MBOX_MSG_READY: + nic->pf_ready_to_rcv_msg = true; + nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; + nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; + nic->node = mbx.nic_cfg.node_id; + ether_addr_copy(nic->netdev->dev_addr, mbx.nic_cfg.mac_addr); + nic->link_up = false; + nic->duplex = 0; + nic->speed = 0; + break; + case NIC_MBOX_MSG_ACK: + nic->pf_acked = true; + break; + case NIC_MBOX_MSG_NACK: + nic->pf_nacked = true; + break; + case NIC_MBOX_MSG_RSS_SIZE: + nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size; + nic->pf_acked = true; + break; + case NIC_MBOX_MSG_BGX_STATS: + nicvf_read_bgx_stats(nic, &mbx.bgx_stats); + nic->pf_acked = true; + nic->bgx_stats_acked = true; + break; + case NIC_MBOX_MSG_BGX_LINK_CHANGE: + nic->pf_acked = true; + nic->link_up = mbx.link_status.link_up; + nic->duplex = mbx.link_status.duplex; + nic->speed = mbx.link_status.speed; + if (nic->link_up) { + netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n", + nic->netdev->name, nic->speed, + nic->duplex == DUPLEX_FULL ? + "Full duplex" : "Half duplex"); + netif_carrier_on(nic->netdev); + netif_tx_wake_all_queues(nic->netdev); + } else { + netdev_info(nic->netdev, "%s: Link is Down\n", + nic->netdev->name); + netif_carrier_off(nic->netdev); + netif_tx_stop_all_queues(nic->netdev); + } + break; + default: + netdev_err(nic->netdev, + "Invalid message from PF, msg 0x%x\n", mbx.msg.msg); + break; + } + nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0); +} + +static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev) +{ + union nic_mbx mbx = {}; + + mbx.mac.msg = NIC_MBOX_MSG_SET_MAC; + mbx.mac.vf_id = nic->vf_id; + ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr); + + return nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_config_cpi(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG; + mbx.cpi_cfg.vf_id = nic->vf_id; + mbx.cpi_cfg.cpi_alg = nic->cpi_alg; + mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; + + nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_get_rss_size(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; + mbx.rss_size.vf_id = nic->vf_id; + nicvf_send_msg_to_pf(nic, &mbx); +} + +void nicvf_config_rss(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + struct nicvf_rss_info *rss = &nic->rss_info; + int ind_tbl_len = rss->rss_size; + int i, nextq = 0; + + mbx.rss_cfg.vf_id = nic->vf_id; + mbx.rss_cfg.hash_bits = rss->hash_bits; + while (ind_tbl_len) { + mbx.rss_cfg.tbl_offset = nextq; + mbx.rss_cfg.tbl_len = min(ind_tbl_len, + RSS_IND_TBL_LEN_PER_MBX_MSG); + mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ? + NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG; + + for (i = 0; i < mbx.rss_cfg.tbl_len; i++) + mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++]; + + nicvf_send_msg_to_pf(nic, &mbx); + + ind_tbl_len -= mbx.rss_cfg.tbl_len; + } +} + +void nicvf_set_rss_key(struct nicvf *nic) +{ + struct nicvf_rss_info *rss = &nic->rss_info; + u64 key_addr = NIC_VNIC_RSS_KEY_0_4; + int idx; + + for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) { + nicvf_reg_write(nic, key_addr, rss->key[idx]); + key_addr += sizeof(u64); + } +} + +static int nicvf_rss_init(struct nicvf *nic) +{ + struct nicvf_rss_info *rss = &nic->rss_info; + int idx; + + nicvf_get_rss_size(nic); + + if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) { + rss->enable = false; + rss->hash_bits = 0; + return 0; + } + + rss->enable = true; + + /* Using the HW reset value for now */ + rss->key[0] = 0xFEED0BADFEED0BADULL; + rss->key[1] = 0xFEED0BADFEED0BADULL; + rss->key[2] = 0xFEED0BADFEED0BADULL; + rss->key[3] = 0xFEED0BADFEED0BADULL; + rss->key[4] = 0xFEED0BADFEED0BADULL; + + nicvf_set_rss_key(nic); + + rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA; + nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg); + + rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size)); + + for (idx = 0; idx < rss->rss_size; idx++) + rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, + nic->qs->rq_cnt); + nicvf_config_rss(nic); + return 1; +} + +int nicvf_set_real_num_queues(struct net_device *netdev, + int tx_queues, int rx_queues) +{ + int err = 0; + + err = netif_set_real_num_tx_queues(netdev, tx_queues); + if (err) { + netdev_err(netdev, + "Failed to set no of Tx queues: %d\n", tx_queues); + return err; + } + + err = netif_set_real_num_rx_queues(netdev, rx_queues); + if (err) + netdev_err(netdev, + "Failed to set no of Rx queues: %d\n", rx_queues); + return err; +} + +static int nicvf_init_resources(struct nicvf *nic) +{ + int err; + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; + + /* Enable Qset */ + nicvf_qset_config(nic, true); + + /* Initialize queues and HW for data transfer */ + err = nicvf_config_data_transfer(nic, true); + if (err) { + netdev_err(nic->netdev, + "Failed to alloc/config VF's QSet resources\n"); + return err; + } + + /* Send VF config done msg to PF */ + nicvf_write_to_mbx(nic, &mbx); + + return 0; +} + +static void nicvf_snd_pkt_handler(struct net_device *netdev, + struct cmp_queue *cq, + struct cqe_send_t *cqe_tx, int cqe_type) +{ + struct sk_buff *skb = NULL; + struct nicvf *nic = netdev_priv(netdev); + struct snd_queue *sq; + struct sq_hdr_subdesc *hdr; + + sq = &nic->qs->sq[cqe_tx->sq_idx]; + + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); + if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) + return; + + netdev_dbg(nic->netdev, + "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", + __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, + cqe_tx->sqe_ptr, hdr->subdesc_cnt); + + nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); + nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); + skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; + /* For TSO offloaded packets only one head SKB needs to be freed */ + if (skb) { + prefetch(skb); + dev_consume_skb_any(skb); + } +} + +static void nicvf_rcv_pkt_handler(struct net_device *netdev, + struct napi_struct *napi, + struct cmp_queue *cq, + struct cqe_rx_t *cqe_rx, int cqe_type) +{ + struct sk_buff *skb; + struct nicvf *nic = netdev_priv(netdev); + int err = 0; + + /* Check for errors */ + err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); + if (err && !cqe_rx->rb_cnt) + return; + + skb = nicvf_get_rcv_skb(nic, cqe_rx); + if (!skb) { + netdev_dbg(nic->netdev, "Packet not received\n"); + return; + } + + if (netif_msg_pktdata(nic)) { + netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name, + skb, skb->len); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, true); + } + + nicvf_set_rx_frame_cnt(nic, skb); + + skb_record_rx_queue(skb, cqe_rx->rq_idx); + if (netdev->hw_features & NETIF_F_RXCSUM) { + /* HW by default verifies TCP/UDP/SCTP checksums */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); + } + + skb->protocol = eth_type_trans(skb, netdev); + + if (napi && (netdev->features & NETIF_F_GRO)) + napi_gro_receive(napi, skb); + else + netif_receive_skb(skb); +} + +static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, + struct napi_struct *napi, int budget) +{ + int processed_cqe, work_done = 0; + int cqe_count, cqe_head; + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + struct cmp_queue *cq = &qs->cq[cq_idx]; + struct cqe_rx_t *cq_desc; + + spin_lock_bh(&cq->lock); +loop: + processed_cqe = 0; + /* Get no of valid CQ entries to process */ + cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); + cqe_count &= CQ_CQE_COUNT; + if (!cqe_count) + goto done; + + /* Get head of the valid CQ entries */ + cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; + cqe_head &= 0xFFFF; + + netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", + __func__, cqe_count, cqe_head); + while (processed_cqe < cqe_count) { + /* Get the CQ descriptor */ + cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); + cqe_head++; + cqe_head &= (cq->dmem.q_len - 1); + /* Initiate prefetch for next descriptor */ + prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); + + if ((work_done >= budget) && napi && + (cq_desc->cqe_type != CQE_TYPE_SEND)) { + break; + } + + netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", + cq_desc->cqe_type); + switch (cq_desc->cqe_type) { + case CQE_TYPE_RX: + nicvf_rcv_pkt_handler(netdev, napi, cq, + cq_desc, CQE_TYPE_RX); + work_done++; + break; + case CQE_TYPE_SEND: + nicvf_snd_pkt_handler(netdev, cq, + (void *)cq_desc, CQE_TYPE_SEND); + break; + case CQE_TYPE_INVALID: + case CQE_TYPE_RX_SPLIT: + case CQE_TYPE_RX_TCP: + case CQE_TYPE_SEND_PTP: + /* Ignore for now */ + break; + } + processed_cqe++; + } + netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", + __func__, processed_cqe, work_done, budget); + + /* Ring doorbell to inform H/W to reuse processed CQEs */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, + cq_idx, processed_cqe); + + if ((work_done < budget) && napi) + goto loop; + +done: + spin_unlock_bh(&cq->lock); + return work_done; +} + +static int nicvf_poll(struct napi_struct *napi, int budget) +{ + u64 cq_head; + int work_done = 0; + struct net_device *netdev = napi->dev; + struct nicvf *nic = netdev_priv(netdev); + struct nicvf_cq_poll *cq; + struct netdev_queue *txq; + + cq = container_of(napi, struct nicvf_cq_poll, napi); + work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); + + txq = netdev_get_tx_queue(netdev, cq->cq_idx); + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); + + if (work_done < budget) { + /* Slow packet rate, exit polling */ + napi_complete(napi); + /* Re-enable interrupts */ + cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, + cq->cq_idx); + nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx); + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD, + cq->cq_idx, cq_head); + nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx); + } + return work_done; +} + +/* Qset error interrupt handler + * + * As of now only CQ errors are handled + */ +static void nicvf_handle_qs_err(unsigned long data) +{ + struct nicvf *nic = (struct nicvf *)data; + struct queue_set *qs = nic->qs; + int qidx; + u64 status; + + netif_tx_disable(nic->netdev); + + /* Check if it is CQ err */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, + qidx); + if (!(status & CQ_ERR_MASK)) + continue; + /* Process already queued CQEs and reconfig CQ */ + nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); + nicvf_sq_disable(nic, qidx); + nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0); + nicvf_cmp_queue_config(nic, qs, qidx, true); + nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx); + nicvf_sq_enable(nic, &qs->sq[qidx], qidx); + + nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); + } + + netif_tx_start_all_queues(nic->netdev); + /* Re-enable Qset error interrupt */ + nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); +} + +static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq) +{ + struct nicvf *nic = (struct nicvf *)nicvf_irq; + u64 intr; + + intr = nicvf_reg_read(nic, NIC_VF_INT); + /* Check for spurious interrupt */ + if (!(intr & NICVF_INTR_MBOX_MASK)) + return IRQ_HANDLED; + + nicvf_handle_mbx_intr(nic); + + return IRQ_HANDLED; +} + +static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq) +{ + u64 qidx, intr, clear_intr = 0; + u64 cq_intr, rbdr_intr, qs_err_intr; + struct nicvf *nic = (struct nicvf *)nicvf_irq; + struct queue_set *qs = nic->qs; + struct nicvf_cq_poll *cq_poll = NULL; + + intr = nicvf_reg_read(nic, NIC_VF_INT); + if (netif_msg_intr(nic)) + netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n", + nic->netdev->name, intr); + + qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK; + if (qs_err_intr) { + /* Disable Qset err interrupt and schedule softirq */ + nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); + tasklet_hi_schedule(&nic->qs_err_task); + clear_intr |= qs_err_intr; + } + + /* Disable interrupts and start polling */ + cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT; + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + if (!(cq_intr & (1 << qidx))) + continue; + if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx)) + continue; + + nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); + clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT); + + cq_poll = nic->napi[qidx]; + /* Schedule NAPI */ + if (cq_poll) + napi_schedule(&cq_poll->napi); + } + + /* Handle RBDR interrupts */ + rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT; + if (rbdr_intr) { + /* Disable RBDR interrupt and schedule softirq */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { + if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx)) + continue; + nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); + tasklet_hi_schedule(&nic->rbdr_task); + clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT); + } + } + + /* Clear interrupts */ + nicvf_reg_write(nic, NIC_VF_INT, clear_intr); + return IRQ_HANDLED; +} + +static int nicvf_enable_msix(struct nicvf *nic) +{ + int ret, vec; + + nic->num_vec = NIC_VF_MSIX_VECTORS; + + for (vec = 0; vec < nic->num_vec; vec++) + nic->msix_entries[vec].entry = vec; + + ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); + if (ret) { + netdev_err(nic->netdev, + "Req for #%d msix vectors failed\n", nic->num_vec); + return 0; + } + nic->msix_enabled = 1; + return 1; +} + +static void nicvf_disable_msix(struct nicvf *nic) +{ + if (nic->msix_enabled) { + pci_disable_msix(nic->pdev); + nic->msix_enabled = 0; + nic->num_vec = 0; + } +} + +static int nicvf_register_interrupts(struct nicvf *nic) +{ + int irq, free, ret = 0; + int vector; + + for_each_cq_irq(irq) + sprintf(nic->irq_name[irq], "NICVF%d CQ%d", + nic->vf_id, irq); + + for_each_sq_irq(irq) + sprintf(nic->irq_name[irq], "NICVF%d SQ%d", + nic->vf_id, irq - NICVF_INTR_ID_SQ); + + for_each_rbdr_irq(irq) + sprintf(nic->irq_name[irq], "NICVF%d RBDR%d", + nic->vf_id, irq - NICVF_INTR_ID_RBDR); + + /* Register all interrupts except mailbox */ + for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) { + vector = nic->msix_entries[irq].vector; + ret = request_irq(vector, nicvf_intr_handler, + 0, nic->irq_name[irq], nic); + if (ret) + break; + nic->irq_allocated[irq] = true; + } + + for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) { + vector = nic->msix_entries[irq].vector; + ret = request_irq(vector, nicvf_intr_handler, + 0, nic->irq_name[irq], nic); + if (ret) + break; + nic->irq_allocated[irq] = true; + } + + sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], + "NICVF%d Qset error", nic->vf_id); + if (!ret) { + vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector; + irq = NICVF_INTR_ID_QS_ERR; + ret = request_irq(vector, nicvf_intr_handler, + 0, nic->irq_name[irq], nic); + if (!ret) + nic->irq_allocated[irq] = true; + } + + if (ret) { + netdev_err(nic->netdev, "Request irq failed\n"); + for (free = 0; free < irq; free++) + free_irq(nic->msix_entries[free].vector, nic); + return ret; + } + + return 0; +} + +static void nicvf_unregister_interrupts(struct nicvf *nic) +{ + int irq; + + /* Free registered interrupts */ + for (irq = 0; irq < nic->num_vec; irq++) { + if (nic->irq_allocated[irq]) + free_irq(nic->msix_entries[irq].vector, nic); + nic->irq_allocated[irq] = false; + } + + /* Disable MSI-X */ + nicvf_disable_msix(nic); +} + +/* Initialize MSIX vectors and register MISC interrupt. + * Send READY message to PF to check if its alive + */ +static int nicvf_register_misc_interrupt(struct nicvf *nic) +{ + int ret = 0; + int irq = NICVF_INTR_ID_MISC; + + /* Return if mailbox interrupt is already registered */ + if (nic->msix_enabled) + return 0; + + /* Enable MSI-X */ + if (!nicvf_enable_msix(nic)) + return 1; + + sprintf(nic->irq_name[irq], "%s Mbox", "NICVF"); + /* Register Misc interrupt */ + ret = request_irq(nic->msix_entries[irq].vector, + nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic); + + if (ret) + return ret; + nic->irq_allocated[irq] = true; + + /* Enable mailbox interrupt */ + nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0); + + /* Check if VF is able to communicate with PF */ + if (!nicvf_check_pf_ready(nic)) { + nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); + nicvf_unregister_interrupts(nic); + return 1; + } + + return 0; +} + +static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nicvf *nic = netdev_priv(netdev); + int qid = skb_get_queue_mapping(skb); + struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid); + + /* Check for minimum packet length */ + if (skb->len <= ETH_HLEN) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + nic->drv_stats.tx_busy++; + if (netif_msg_tx_err(nic)) + netdev_warn(netdev, + "%s: Transmit ring full, stopping SQ%d\n", + netdev->name, qid); + + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +int nicvf_stop(struct net_device *netdev) +{ + int irq, qidx; + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + struct nicvf_cq_poll *cq_poll = NULL; + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; + nicvf_send_msg_to_pf(nic, &mbx); + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + /* Disable RBDR & QS error interrupts */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { + nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); + nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); + } + nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); + nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); + + /* Wait for pending IRQ handlers to finish */ + for (irq = 0; irq < nic->num_vec; irq++) + synchronize_irq(nic->msix_entries[irq].vector); + + tasklet_kill(&nic->rbdr_task); + tasklet_kill(&nic->qs_err_task); + if (nic->rb_work_scheduled) + cancel_delayed_work_sync(&nic->rbdr_work); + + for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { + cq_poll = nic->napi[qidx]; + if (!cq_poll) + continue; + nic->napi[qidx] = NULL; + napi_synchronize(&cq_poll->napi); + /* CQ intr is enabled while napi_complete, + * so disable it now + */ + nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); + nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); + napi_disable(&cq_poll->napi); + netif_napi_del(&cq_poll->napi); + kfree(cq_poll); + } + + /* Free resources */ + nicvf_config_data_transfer(nic, false); + + /* Disable HW Qset */ + nicvf_qset_config(nic, false); + + /* disable mailbox interrupt */ + nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); + + nicvf_unregister_interrupts(nic); + + return 0; +} + +int nicvf_open(struct net_device *netdev) +{ + int err, qidx; + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + struct nicvf_cq_poll *cq_poll = NULL; + + nic->mtu = netdev->mtu; + + netif_carrier_off(netdev); + + err = nicvf_register_misc_interrupt(nic); + if (err) + return err; + + /* Register NAPI handler for processing CQEs */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL); + if (!cq_poll) { + err = -ENOMEM; + goto napi_del; + } + cq_poll->cq_idx = qidx; + netif_napi_add(netdev, &cq_poll->napi, nicvf_poll, + NAPI_POLL_WEIGHT); + napi_enable(&cq_poll->napi); + nic->napi[qidx] = cq_poll; + } + + /* Check if we got MAC address from PF or else generate a radom MAC */ + if (is_zero_ether_addr(netdev->dev_addr)) { + eth_hw_addr_random(netdev); + nicvf_hw_set_mac_addr(nic, netdev); + } + + /* Init tasklet for handling Qset err interrupt */ + tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err, + (unsigned long)nic); + + /* Init RBDR tasklet which will refill RBDR */ + tasklet_init(&nic->rbdr_task, nicvf_rbdr_task, + (unsigned long)nic); + INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work); + + /* Configure CPI alorithm */ + nic->cpi_alg = cpi_alg; + nicvf_config_cpi(nic); + + /* Configure receive side scaling */ + nicvf_rss_init(nic); + + err = nicvf_register_interrupts(nic); + if (err) + goto cleanup; + + /* Initialize the queues */ + err = nicvf_init_resources(nic); + if (err) + goto cleanup; + + /* Make sure queue initialization is written */ + wmb(); + + nicvf_reg_write(nic, NIC_VF_INT, -1); + /* Enable Qset err interrupt */ + nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); + + /* Enable completion queue interrupt */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); + + /* Enable RBDR threshold interrupt */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); + + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + + return 0; +cleanup: + nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); + nicvf_unregister_interrupts(nic); +napi_del: + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + cq_poll = nic->napi[qidx]; + if (!cq_poll) + continue; + napi_disable(&cq_poll->napi); + netif_napi_del(&cq_poll->napi); + kfree(cq_poll); + nic->napi[qidx] = NULL; + } + return err; +} + +static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) +{ + union nic_mbx mbx = {}; + + mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; + mbx.frs.max_frs = mtu; + mbx.frs.vf_id = nic->vf_id; + + return nicvf_send_msg_to_pf(nic, &mbx); +} + +static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct nicvf *nic = netdev_priv(netdev); + + if (new_mtu > NIC_HW_MAX_FRS) + return -EINVAL; + + if (new_mtu < NIC_HW_MIN_FRS) + return -EINVAL; + + if (nicvf_update_hw_max_frs(nic, new_mtu)) + return -EINVAL; + netdev->mtu = new_mtu; + nic->mtu = new_mtu; + + return 0; +} + +static int nicvf_set_mac_address(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = p; + struct nicvf *nic = netdev_priv(netdev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + if (nic->msix_enabled) + if (nicvf_hw_set_mac_addr(nic, netdev)) + return -EBUSY; + + return 0; +} + +void nicvf_update_lmac_stats(struct nicvf *nic) +{ + int stat = 0; + union nic_mbx mbx = {}; + int timeout; + + if (!netif_running(nic->netdev)) + return; + + mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; + mbx.bgx_stats.vf_id = nic->vf_id; + /* Rx stats */ + mbx.bgx_stats.rx = 1; + while (stat < BGX_RX_STATS_COUNT) { + nic->bgx_stats_acked = 0; + mbx.bgx_stats.idx = stat; + nicvf_send_msg_to_pf(nic, &mbx); + timeout = 0; + while ((!nic->bgx_stats_acked) && (timeout < 10)) { + msleep(2); + timeout++; + } + stat++; + } + + stat = 0; + + /* Tx stats */ + mbx.bgx_stats.rx = 0; + while (stat < BGX_TX_STATS_COUNT) { + nic->bgx_stats_acked = 0; + mbx.bgx_stats.idx = stat; + nicvf_send_msg_to_pf(nic, &mbx); + timeout = 0; + while ((!nic->bgx_stats_acked) && (timeout < 10)) { + msleep(2); + timeout++; + } + stat++; + } +} + +void nicvf_update_stats(struct nicvf *nic) +{ + int qidx; + struct nicvf_hw_stats *stats = &nic->stats; + struct nicvf_drv_stats *drv_stats = &nic->drv_stats; + struct queue_set *qs = nic->qs; + +#define GET_RX_STATS(reg) \ + nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3)) +#define GET_TX_STATS(reg) \ + nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3)) + + stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS); + stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST); + stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST); + stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST); + stats->rx_fcs_errors = GET_RX_STATS(RX_FCS); + stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR); + stats->rx_drop_red = GET_RX_STATS(RX_RED); + stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN); + stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST); + stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST); + stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); + stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); + + stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); + stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); + stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); + stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); + stats->tx_drops = GET_TX_STATS(TX_DROP); + + drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok + + stats->rx_bcast_frames_ok + + stats->rx_mcast_frames_ok; + drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + + stats->tx_bcast_frames_ok + + stats->tx_mcast_frames_ok; + drv_stats->rx_drops = stats->rx_drop_red + + stats->rx_drop_overrun; + drv_stats->tx_drops = stats->tx_drops; + + /* Update RQ and SQ stats */ + for (qidx = 0; qidx < qs->rq_cnt; qidx++) + nicvf_update_rq_stats(nic, qidx); + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_update_sq_stats(nic, qidx); +} + +static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct nicvf *nic = netdev_priv(netdev); + struct nicvf_hw_stats *hw_stats = &nic->stats; + struct nicvf_drv_stats *drv_stats = &nic->drv_stats; + + nicvf_update_stats(nic); + + stats->rx_bytes = hw_stats->rx_bytes_ok; + stats->rx_packets = drv_stats->rx_frames_ok; + stats->rx_dropped = drv_stats->rx_drops; + + stats->tx_bytes = hw_stats->tx_bytes_ok; + stats->tx_packets = drv_stats->tx_frames_ok; + stats->tx_dropped = drv_stats->tx_drops; + + return stats; +} + +static void nicvf_tx_timeout(struct net_device *dev) +{ + struct nicvf *nic = netdev_priv(dev); + + if (netif_msg_tx_err(nic)) + netdev_warn(dev, "%s: Transmit timed out, resetting\n", + dev->name); + + schedule_work(&nic->reset_task); +} + +static void nicvf_reset_task(struct work_struct *work) +{ + struct nicvf *nic; + + nic = container_of(work, struct nicvf, reset_task); + + if (!netif_running(nic->netdev)) + return; + + nicvf_stop(nic->netdev); + nicvf_open(nic->netdev); + nic->netdev->trans_start = jiffies; +} + +static const struct net_device_ops nicvf_netdev_ops = { + .ndo_open = nicvf_open, + .ndo_stop = nicvf_stop, + .ndo_start_xmit = nicvf_xmit, + .ndo_change_mtu = nicvf_change_mtu, + .ndo_set_mac_address = nicvf_set_mac_address, + .ndo_get_stats64 = nicvf_get_stats64, + .ndo_tx_timeout = nicvf_tx_timeout, +}; + +static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct nicvf *nic; + struct queue_set *qs; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to get usable DMA configuration\n"); + goto err_release_regions; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n"); + goto err_release_regions; + } + + netdev = alloc_etherdev_mqs(sizeof(struct nicvf), + MAX_RCV_QUEUES_PER_QS, + MAX_SND_QUEUES_PER_QS); + if (!netdev) { + err = -ENOMEM; + goto err_release_regions; + } + + pci_set_drvdata(pdev, netdev); + + SET_NETDEV_DEV(netdev, &pdev->dev); + + nic = netdev_priv(netdev); + nic->netdev = netdev; + nic->pdev = pdev; + + /* MAP VF's configuration registers */ + nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!nic->reg_base) { + dev_err(dev, "Cannot map config register space, aborting\n"); + err = -ENOMEM; + goto err_free_netdev; + } + + err = nicvf_set_qset_resources(nic); + if (err) + goto err_free_netdev; + + qs = nic->qs; + + err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt); + if (err) + goto err_free_netdev; + + /* Check if PF is alive and get MAC address for this VF */ + err = nicvf_register_misc_interrupt(nic); + if (err) + goto err_free_netdev; + + netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_GRO); + netdev->hw_features = netdev->features; + + netdev->netdev_ops = &nicvf_netdev_ops; + + INIT_WORK(&nic->reset_task, nicvf_reset_task); + + err = register_netdev(netdev); + if (err) { + dev_err(dev, "Failed to register netdevice\n"); + goto err_unregister_interrupts; + } + + nic->msg_enable = debug; + + nicvf_set_ethtool_ops(netdev); + + return 0; + +err_unregister_interrupts: + nicvf_unregister_interrupts(nic); +err_free_netdev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + return err; +} + +static void nicvf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nicvf *nic = netdev_priv(netdev); + + unregister_netdev(netdev); + nicvf_unregister_interrupts(nic); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver nicvf_driver = { + .name = DRV_NAME, + .id_table = nicvf_id_table, + .probe = nicvf_probe, + .remove = nicvf_remove, +}; + +static int __init nicvf_init_module(void) +{ + pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); + + return pci_register_driver(&nicvf_driver); +} + +static void __exit nicvf_cleanup_module(void) +{ + pci_unregister_driver(&nicvf_driver); +} + +module_init(nicvf_init_module); +module_exit(nicvf_cleanup_module); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c new file mode 100644 index 000000000000..d69d228d11a0 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -0,0 +1,1545 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "nic_reg.h" +#include "nic.h" +#include "q_struct.h" +#include "nicvf_queues.h" + +struct rbuf_info { + struct page *page; + void *data; + u64 offset; +}; + +#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES)) + +/* Poll a register for a specific value */ +static int nicvf_poll_reg(struct nicvf *nic, int qidx, + u64 reg, int bit_pos, int bits, int val) +{ + u64 bit_mask; + u64 reg_val; + int timeout = 10; + + bit_mask = (1ULL << bits) - 1; + bit_mask = (bit_mask << bit_pos); + + while (timeout) { + reg_val = nicvf_queue_reg_read(nic, reg, qidx); + if (((reg_val & bit_mask) >> bit_pos) == val) + return 0; + usleep_range(1000, 2000); + timeout--; + } + netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); + return 1; +} + +/* Allocate memory for a queue's descriptors */ +static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, + int q_len, int desc_size, int align_bytes) +{ + dmem->q_len = q_len; + dmem->size = (desc_size * q_len) + align_bytes; + /* Save address, need it while freeing */ + dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, + &dmem->dma, GFP_KERNEL); + if (!dmem->unalign_base) + return -ENOMEM; + + /* Align memory address for 'align_bytes' */ + dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); + dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); + return 0; +} + +/* Free queue's descriptor memory */ +static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) +{ + if (!dmem) + return; + + dma_free_coherent(&nic->pdev->dev, dmem->size, + dmem->unalign_base, dmem->dma); + dmem->unalign_base = NULL; + dmem->base = NULL; +} + +/* Allocate buffer for packet reception + * HW returns memory address where packet is DMA'ed but not a pointer + * into RBDR ring, so save buffer address at the start of fragment and + * align the start address to a cache aligned address + */ +static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, + u32 buf_len, u64 **rbuf) +{ + u64 data; + struct rbuf_info *rinfo; + int order = get_order(buf_len); + + /* Check if request can be accomodated in previous allocated page */ + if (nic->rb_page) { + if ((nic->rb_page_offset + buf_len + buf_len) > + (PAGE_SIZE << order)) { + nic->rb_page = NULL; + } else { + nic->rb_page_offset += buf_len; + get_page(nic->rb_page); + } + } + + /* Allocate a new page */ + if (!nic->rb_page) { + nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); + if (!nic->rb_page) { + netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); + return -ENOMEM; + } + nic->rb_page_offset = 0; + } + + data = (u64)page_address(nic->rb_page) + nic->rb_page_offset; + + /* Align buffer addr to cache line i.e 128 bytes */ + rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data)); + /* Save page address for reference updation */ + rinfo->page = nic->rb_page; + /* Store start address for later retrieval */ + rinfo->data = (void *)data; + /* Store alignment offset */ + rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data); + + data += rinfo->offset; + + /* Give next aligned address to hw for DMA */ + *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES); + return 0; +} + +/* Retrieve actual buffer start address and build skb for received packet */ +static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, + u64 rb_ptr, int len) +{ + struct sk_buff *skb; + struct rbuf_info *rinfo; + + rb_ptr = (u64)phys_to_virt(rb_ptr); + /* Get buffer start address and alignment offset */ + rinfo = GET_RBUF_INFO(rb_ptr); + + /* Now build an skb to give to stack */ + skb = build_skb(rinfo->data, RCV_FRAG_LEN); + if (!skb) { + put_page(rinfo->page); + return NULL; + } + + /* Set correct skb->data */ + skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES); + + prefetch((void *)rb_ptr); + return skb; +} + +/* Allocate RBDR ring and populate receive buffers */ +static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, + int ring_len, int buf_size) +{ + int idx; + u64 *rbuf; + struct rbdr_entry_t *desc; + int err; + + err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, + sizeof(struct rbdr_entry_t), + NICVF_RCV_BUF_ALIGN_BYTES); + if (err) + return err; + + rbdr->desc = rbdr->dmem.base; + /* Buffer size has to be in multiples of 128 bytes */ + rbdr->dma_size = buf_size; + rbdr->enable = true; + rbdr->thresh = RBDR_THRESH; + + nic->rb_page = NULL; + for (idx = 0; idx < ring_len; idx++) { + err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, + &rbuf); + if (err) + return err; + + desc = GET_RBDR_DESC(rbdr, idx); + desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; + } + return 0; +} + +/* Free RBDR ring and its receive buffers */ +static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) +{ + int head, tail; + u64 buf_addr; + struct rbdr_entry_t *desc; + struct rbuf_info *rinfo; + + if (!rbdr) + return; + + rbdr->enable = false; + if (!rbdr->dmem.base) + return; + + head = rbdr->head; + tail = rbdr->tail; + + /* Free SKBs */ + while (head != tail) { + desc = GET_RBDR_DESC(rbdr, head); + buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; + rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); + put_page(rinfo->page); + head++; + head &= (rbdr->dmem.q_len - 1); + } + /* Free SKB of tail desc */ + desc = GET_RBDR_DESC(rbdr, tail); + buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; + rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); + put_page(rinfo->page); + + /* Free RBDR ring */ + nicvf_free_q_desc_mem(nic, &rbdr->dmem); +} + +/* Refill receive buffer descriptors with new buffers. + */ +static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) +{ + struct queue_set *qs = nic->qs; + int rbdr_idx = qs->rbdr_cnt; + int tail, qcount; + int refill_rb_cnt; + struct rbdr *rbdr; + struct rbdr_entry_t *desc; + u64 *rbuf; + int new_rb = 0; + +refill: + if (!rbdr_idx) + return; + rbdr_idx--; + rbdr = &qs->rbdr[rbdr_idx]; + /* Check if it's enabled */ + if (!rbdr->enable) + goto next_rbdr; + + /* Get no of desc's to be refilled */ + qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); + qcount &= 0x7FFFF; + /* Doorbell can be ringed with a max of ring size minus 1 */ + if (qcount >= (qs->rbdr_len - 1)) + goto next_rbdr; + else + refill_rb_cnt = qs->rbdr_len - qcount - 1; + + /* Start filling descs from tail */ + tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; + while (refill_rb_cnt) { + tail++; + tail &= (rbdr->dmem.q_len - 1); + + if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) + break; + + desc = GET_RBDR_DESC(rbdr, tail); + desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; + refill_rb_cnt--; + new_rb++; + } + + /* make sure all memory stores are done before ringing doorbell */ + smp_wmb(); + + /* Check if buffer allocation failed */ + if (refill_rb_cnt) + nic->rb_alloc_fail = true; + else + nic->rb_alloc_fail = false; + + /* Notify HW */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, + rbdr_idx, new_rb); +next_rbdr: + /* Re-enable RBDR interrupts only if buffer allocation is success */ + if (!nic->rb_alloc_fail && rbdr->enable) + nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); + + if (rbdr_idx) + goto refill; +} + +/* Alloc rcv buffers in non-atomic mode for better success */ +void nicvf_rbdr_work(struct work_struct *work) +{ + struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); + + nicvf_refill_rbdr(nic, GFP_KERNEL); + if (nic->rb_alloc_fail) + schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); + else + nic->rb_work_scheduled = false; +} + +/* In Softirq context, alloc rcv buffers in atomic mode */ +void nicvf_rbdr_task(unsigned long data) +{ + struct nicvf *nic = (struct nicvf *)data; + + nicvf_refill_rbdr(nic, GFP_ATOMIC); + if (nic->rb_alloc_fail) { + nic->rb_work_scheduled = true; + schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); + } +} + +/* Initialize completion queue */ +static int nicvf_init_cmp_queue(struct nicvf *nic, + struct cmp_queue *cq, int q_len) +{ + int err; + + err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, + NICVF_CQ_BASE_ALIGN_BYTES); + if (err) + return err; + + cq->desc = cq->dmem.base; + cq->thresh = CMP_QUEUE_CQE_THRESH; + nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; + + return 0; +} + +static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) +{ + if (!cq) + return; + if (!cq->dmem.base) + return; + + nicvf_free_q_desc_mem(nic, &cq->dmem); +} + +/* Initialize transmit queue */ +static int nicvf_init_snd_queue(struct nicvf *nic, + struct snd_queue *sq, int q_len) +{ + int err; + + err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, + NICVF_SQ_BASE_ALIGN_BYTES); + if (err) + return err; + + sq->desc = sq->dmem.base; + sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); + if (!sq->skbuff) + return -ENOMEM; + sq->head = 0; + sq->tail = 0; + atomic_set(&sq->free_cnt, q_len - 1); + sq->thresh = SND_QUEUE_THRESH; + + /* Preallocate memory for TSO segment's header */ + sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, + q_len * TSO_HEADER_SIZE, + &sq->tso_hdrs_phys, GFP_KERNEL); + if (!sq->tso_hdrs) + return -ENOMEM; + + return 0; +} + +static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) +{ + if (!sq) + return; + if (!sq->dmem.base) + return; + + if (sq->tso_hdrs) + dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, + sq->tso_hdrs, sq->tso_hdrs_phys); + + kfree(sq->skbuff); + nicvf_free_q_desc_mem(nic, &sq->dmem); +} + +static void nicvf_reclaim_snd_queue(struct nicvf *nic, + struct queue_set *qs, int qidx) +{ + /* Disable send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); + /* Check if SQ is stopped */ + if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) + return; + /* Reset send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); +} + +static void nicvf_reclaim_rcv_queue(struct nicvf *nic, + struct queue_set *qs, int qidx) +{ + union nic_mbx mbx = {}; + + /* Make sure all packets in the pipeline are written back into mem */ + mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; + nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_reclaim_cmp_queue(struct nicvf *nic, + struct queue_set *qs, int qidx) +{ + /* Disable timer threshold (doesn't get reset upon CQ reset */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); + /* Disable completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); + /* Reset completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); +} + +static void nicvf_reclaim_rbdr(struct nicvf *nic, + struct rbdr *rbdr, int qidx) +{ + u64 tmp, fifo_state; + int timeout = 10; + + /* Save head and tail pointers for feeing up buffers */ + rbdr->head = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_HEAD, + qidx) >> 3; + rbdr->tail = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_TAIL, + qidx) >> 3; + + /* If RBDR FIFO is in 'FAIL' state then do a reset first + * before relaiming. + */ + fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); + if (((fifo_state >> 62) & 0x03) == 0x3) + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, + qidx, NICVF_RBDR_RESET); + + /* Disable RBDR */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); + if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) + return; + while (1) { + tmp = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_PREFETCH_STATUS, + qidx); + if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) + break; + usleep_range(1000, 2000); + timeout--; + if (!timeout) { + netdev_err(nic->netdev, + "Failed polling on prefetch status\n"); + return; + } + } + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, + qidx, NICVF_RBDR_RESET); + + if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) + return; + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); + if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) + return; +} + +/* Configures receive queue */ +static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + union nic_mbx mbx = {}; + struct rcv_queue *rq; + struct rq_cfg rq_cfg; + + rq = &qs->rq[qidx]; + rq->enable = enable; + + /* Disable receive queue */ + nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); + + if (!rq->enable) { + nicvf_reclaim_rcv_queue(nic, qs, qidx); + return; + } + + rq->cq_qs = qs->vnic_id; + rq->cq_idx = qidx; + rq->start_rbdr_qs = qs->vnic_id; + rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; + rq->cont_rbdr_qs = qs->vnic_id; + rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; + /* all writes of RBDR data to be loaded into L2 Cache as well*/ + rq->caching = 1; + + /* Send a mailbox msg to PF to config RQ */ + mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; + mbx.rq.qs_num = qs->vnic_id; + mbx.rq.rq_num = qidx; + mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | + (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | + (rq->cont_qs_rbdr_idx << 8) | + (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); + nicvf_send_msg_to_pf(nic, &mbx); + + mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; + mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); + nicvf_send_msg_to_pf(nic, &mbx); + + /* RQ drop config + * Enable CQ drop to reserve sufficient CQEs for all tx packets + */ + mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; + mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); + nicvf_send_msg_to_pf(nic, &mbx); + + nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00); + + /* Enable Receive queue */ + rq_cfg.ena = 1; + rq_cfg.tcp_ena = 0; + nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); +} + +/* Configures completion queue */ +void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + struct cmp_queue *cq; + struct cq_cfg cq_cfg; + + cq = &qs->cq[qidx]; + cq->enable = enable; + + if (!cq->enable) { + nicvf_reclaim_cmp_queue(nic, qs, qidx); + return; + } + + /* Reset completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); + + if (!cq->enable) + return; + + spin_lock_init(&cq->lock); + /* Set completion queue base address */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, + qidx, (u64)(cq->dmem.phys_base)); + + /* Enable Completion queue */ + cq_cfg.ena = 1; + cq_cfg.reset = 0; + cq_cfg.caching = 0; + cq_cfg.qsize = CMP_QSIZE; + cq_cfg.avg_con = 0; + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); + + /* Set threshold value for interrupt generation */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, + qidx, nic->cq_coalesce_usecs); +} + +/* Configures transmit queue */ +static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + union nic_mbx mbx = {}; + struct snd_queue *sq; + struct sq_cfg sq_cfg; + + sq = &qs->sq[qidx]; + sq->enable = enable; + + if (!sq->enable) { + nicvf_reclaim_snd_queue(nic, qs, qidx); + return; + } + + /* Reset send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); + + sq->cq_qs = qs->vnic_id; + sq->cq_idx = qidx; + + /* Send a mailbox msg to PF to config SQ */ + mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; + mbx.sq.qs_num = qs->vnic_id; + mbx.sq.sq_num = qidx; + mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; + nicvf_send_msg_to_pf(nic, &mbx); + + /* Set queue base address */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, + qidx, (u64)(sq->dmem.phys_base)); + + /* Enable send queue & set queue size */ + sq_cfg.ena = 1; + sq_cfg.reset = 0; + sq_cfg.ldwb = 0; + sq_cfg.qsize = SND_QSIZE; + sq_cfg.tstmp_bgx_intf = 0; + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); + + /* Set threshold value for interrupt generation */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); + + /* Set queue:cpu affinity for better load distribution */ + if (cpu_online(qidx)) { + cpumask_set_cpu(qidx, &sq->affinity_mask); + netif_set_xps_queue(nic->netdev, + &sq->affinity_mask, qidx); + } +} + +/* Configures receive buffer descriptor ring */ +static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + struct rbdr *rbdr; + struct rbdr_cfg rbdr_cfg; + + rbdr = &qs->rbdr[qidx]; + nicvf_reclaim_rbdr(nic, rbdr, qidx); + if (!enable) + return; + + /* Set descriptor base address */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, + qidx, (u64)(rbdr->dmem.phys_base)); + + /* Enable RBDR & set queue size */ + /* Buffer size should be in multiples of 128 bytes */ + rbdr_cfg.ena = 1; + rbdr_cfg.reset = 0; + rbdr_cfg.ldwb = 0; + rbdr_cfg.qsize = RBDR_SIZE; + rbdr_cfg.avg_con = 0; + rbdr_cfg.lines = rbdr->dma_size / 128; + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, + qidx, *(u64 *)&rbdr_cfg); + + /* Notify HW */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, + qidx, qs->rbdr_len - 1); + + /* Set threshold value for interrupt generation */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, + qidx, rbdr->thresh - 1); +} + +/* Requests PF to assign and enable Qset */ +void nicvf_qset_config(struct nicvf *nic, bool enable) +{ + union nic_mbx mbx = {}; + struct queue_set *qs = nic->qs; + struct qs_cfg *qs_cfg; + + if (!qs) { + netdev_warn(nic->netdev, + "Qset is still not allocated, don't init queues\n"); + return; + } + + qs->enable = enable; + qs->vnic_id = nic->vf_id; + + /* Send a mailbox msg to PF to config Qset */ + mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; + mbx.qs.num = qs->vnic_id; + + mbx.qs.cfg = 0; + qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; + if (qs->enable) { + qs_cfg->ena = 1; +#ifdef __BIG_ENDIAN + qs_cfg->be = 1; +#endif + qs_cfg->vnic = qs->vnic_id; + } + nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_free_resources(struct nicvf *nic) +{ + int qidx; + struct queue_set *qs = nic->qs; + + /* Free receive buffer descriptor ring */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_free_rbdr(nic, &qs->rbdr[qidx]); + + /* Free completion queue */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_free_cmp_queue(nic, &qs->cq[qidx]); + + /* Free send queue */ + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_free_snd_queue(nic, &qs->sq[qidx]); +} + +static int nicvf_alloc_resources(struct nicvf *nic) +{ + int qidx; + struct queue_set *qs = nic->qs; + + /* Alloc receive buffer descriptor ring */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { + if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, + DMA_BUFFER_LEN)) + goto alloc_fail; + } + + /* Alloc send queue */ + for (qidx = 0; qidx < qs->sq_cnt; qidx++) { + if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) + goto alloc_fail; + } + + /* Alloc completion queue */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) + goto alloc_fail; + } + + return 0; +alloc_fail: + nicvf_free_resources(nic); + return -ENOMEM; +} + +int nicvf_set_qset_resources(struct nicvf *nic) +{ + struct queue_set *qs; + + qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); + if (!qs) + return -ENOMEM; + nic->qs = qs; + + /* Set count of each queue */ + qs->rbdr_cnt = RBDR_CNT; + qs->rq_cnt = RCV_QUEUE_CNT; + qs->sq_cnt = SND_QUEUE_CNT; + qs->cq_cnt = CMP_QUEUE_CNT; + + /* Set queue lengths */ + qs->rbdr_len = RCV_BUF_COUNT; + qs->sq_len = SND_QUEUE_LEN; + qs->cq_len = CMP_QUEUE_LEN; + return 0; +} + +int nicvf_config_data_transfer(struct nicvf *nic, bool enable) +{ + bool disable = false; + struct queue_set *qs = nic->qs; + int qidx; + + if (!qs) + return 0; + + if (enable) { + if (nicvf_alloc_resources(nic)) + return -ENOMEM; + + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_snd_queue_config(nic, qs, qidx, enable); + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_cmp_queue_config(nic, qs, qidx, enable); + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_rbdr_config(nic, qs, qidx, enable); + for (qidx = 0; qidx < qs->rq_cnt; qidx++) + nicvf_rcv_queue_config(nic, qs, qidx, enable); + } else { + for (qidx = 0; qidx < qs->rq_cnt; qidx++) + nicvf_rcv_queue_config(nic, qs, qidx, disable); + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_rbdr_config(nic, qs, qidx, disable); + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_snd_queue_config(nic, qs, qidx, disable); + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_cmp_queue_config(nic, qs, qidx, disable); + + nicvf_free_resources(nic); + } + + return 0; +} + +/* Get a free desc from SQ + * returns descriptor ponter & descriptor number + */ +static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) +{ + int qentry; + + qentry = sq->tail; + atomic_sub(desc_cnt, &sq->free_cnt); + sq->tail += desc_cnt; + sq->tail &= (sq->dmem.q_len - 1); + + return qentry; +} + +/* Free descriptor back to SQ for future use */ +void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) +{ + atomic_add(desc_cnt, &sq->free_cnt); + sq->head += desc_cnt; + sq->head &= (sq->dmem.q_len - 1); +} + +static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) +{ + qentry++; + qentry &= (sq->dmem.q_len - 1); + return qentry; +} + +void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) +{ + u64 sq_cfg; + + sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); + sq_cfg |= NICVF_SQ_EN; + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); + /* Ring doorbell so that H/W restarts processing SQEs */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); +} + +void nicvf_sq_disable(struct nicvf *nic, int qidx) +{ + u64 sq_cfg; + + sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); + sq_cfg &= ~NICVF_SQ_EN; + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); +} + +void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, + int qidx) +{ + u64 head, tail; + struct sk_buff *skb; + struct nicvf *nic = netdev_priv(netdev); + struct sq_hdr_subdesc *hdr; + + head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; + tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; + while (sq->head != head) { + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); + if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { + nicvf_put_sq_desc(sq, 1); + continue; + } + skb = (struct sk_buff *)sq->skbuff[sq->head]; + atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); + atomic64_add(hdr->tot_len, + (atomic64_t *)&netdev->stats.tx_bytes); + dev_kfree_skb_any(skb); + nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); + } +} + +/* Calculate no of SQ subdescriptors needed to transmit all + * segments of this TSO packet. + * Taken from 'Tilera network driver' with a minor modification. + */ +static int nicvf_tso_count_subdescs(struct sk_buff *skb) +{ + struct skb_shared_info *sh = skb_shinfo(skb); + unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + unsigned int data_len = skb->len - sh_len; + unsigned int p_len = sh->gso_size; + long f_id = -1; /* id of the current fragment */ + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ + long n; /* size of the current piece of payload */ + int num_edescs = 0; + int segment; + + for (segment = 0; segment < sh->gso_segs; segment++) { + unsigned int p_used = 0; + + /* One edesc for header and for each piece of the payload. */ + for (num_edescs++; p_used < p_len; num_edescs++) { + /* Advance as needed. */ + while (f_used >= f_size) { + f_id++; + f_size = skb_frag_size(&sh->frags[f_id]); + f_used = 0; + } + + /* Use bytes from the current fragment. */ + n = p_len - p_used; + if (n > f_size - f_used) + n = f_size - f_used; + f_used += n; + p_used += n; + } + + /* The last segment may be less than gso_size. */ + data_len -= p_len; + if (data_len < p_len) + p_len = data_len; + } + + /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ + return num_edescs + sh->gso_segs; +} + +/* Get the number of SQ descriptors needed to xmit this skb */ +static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) +{ + int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; + + if (skb_shinfo(skb)->gso_size) { + subdesc_cnt = nicvf_tso_count_subdescs(skb); + return subdesc_cnt; + } + + if (skb_shinfo(skb)->nr_frags) + subdesc_cnt += skb_shinfo(skb)->nr_frags; + + return subdesc_cnt; +} + +/* Add SQ HEADER subdescriptor. + * First subdescriptor for every send descriptor. + */ +static inline void +nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, + int subdesc_cnt, struct sk_buff *skb, int len) +{ + int proto; + struct sq_hdr_subdesc *hdr; + + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); + sq->skbuff[qentry] = (u64)skb; + + memset(hdr, 0, SND_QUEUE_DESC_SIZE); + hdr->subdesc_type = SQ_DESC_TYPE_HEADER; + /* Enable notification via CQE after processing SQE */ + hdr->post_cqe = 1; + /* No of subdescriptors following this */ + hdr->subdesc_cnt = subdesc_cnt; + hdr->tot_len = len; + + /* Offload checksum calculation to HW */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->protocol != htons(ETH_P_IP)) + return; + + hdr->csum_l3 = 1; /* Enable IP csum calculation */ + hdr->l3_offset = skb_network_offset(skb); + hdr->l4_offset = skb_transport_offset(skb); + + proto = ip_hdr(skb)->protocol; + switch (proto) { + case IPPROTO_TCP: + hdr->csum_l4 = SEND_L4_CSUM_TCP; + break; + case IPPROTO_UDP: + hdr->csum_l4 = SEND_L4_CSUM_UDP; + break; + case IPPROTO_SCTP: + hdr->csum_l4 = SEND_L4_CSUM_SCTP; + break; + } + } +} + +/* SQ GATHER subdescriptor + * Must follow HDR descriptor + */ +static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, + int size, u64 data) +{ + struct sq_gather_subdesc *gather; + + qentry &= (sq->dmem.q_len - 1); + gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); + + memset(gather, 0, SND_QUEUE_DESC_SIZE); + gather->subdesc_type = SQ_DESC_TYPE_GATHER; + gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; + gather->size = size; + gather->addr = data; +} + +/* Segment a TSO packet into 'gso_size' segments and append + * them to SQ for transfer + */ +static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, + int qentry, struct sk_buff *skb) +{ + struct tso_t tso; + int seg_subdescs = 0, desc_cnt = 0; + int seg_len, total_len, data_left; + int hdr_qentry = qentry; + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + tso_start(skb, &tso); + total_len = skb->len - hdr_len; + while (total_len > 0) { + char *hdr; + + /* Save Qentry for adding HDR_SUBDESC at the end */ + hdr_qentry = qentry; + + data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_left; + + /* Add segment's header */ + qentry = nicvf_get_nxt_sqentry(sq, qentry); + hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); + nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, + sq->tso_hdrs_phys + + qentry * TSO_HEADER_SIZE); + /* HDR_SUDESC + GATHER */ + seg_subdescs = 2; + seg_len = hdr_len; + + /* Add segment's payload fragments */ + while (data_left > 0) { + int size; + + size = min_t(int, tso.size, data_left); + + qentry = nicvf_get_nxt_sqentry(sq, qentry); + nicvf_sq_add_gather_subdesc(sq, qentry, size, + virt_to_phys(tso.data)); + seg_subdescs++; + seg_len += size; + + data_left -= size; + tso_build_data(skb, &tso, size); + } + nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, + seg_subdescs - 1, skb, seg_len); + sq->skbuff[hdr_qentry] = 0; + qentry = nicvf_get_nxt_sqentry(sq, qentry); + + desc_cnt += seg_subdescs; + } + /* Save SKB in the last segment for freeing */ + sq->skbuff[hdr_qentry] = (u64)skb; + + /* make sure all memory stores are done before ringing doorbell */ + smp_wmb(); + + /* Inform HW to xmit all TSO segments */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, + skb_get_queue_mapping(skb), desc_cnt); + return 1; +} + +/* Append an skb to a SQ for packet transfer. */ +int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) +{ + int i, size; + int subdesc_cnt; + int sq_num, qentry; + struct queue_set *qs = nic->qs; + struct snd_queue *sq; + + sq_num = skb_get_queue_mapping(skb); + sq = &qs->sq[sq_num]; + + subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); + if (subdesc_cnt > atomic_read(&sq->free_cnt)) + goto append_fail; + + qentry = nicvf_get_sq_desc(sq, subdesc_cnt); + + /* Check if its a TSO packet */ + if (skb_shinfo(skb)->gso_size) + return nicvf_sq_append_tso(nic, sq, qentry, skb); + + /* Add SQ header subdesc */ + nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); + + /* Add SQ gather subdescs */ + qentry = nicvf_get_nxt_sqentry(sq, qentry); + size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; + nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); + + /* Check for scattered buffer */ + if (!skb_is_nonlinear(skb)) + goto doorbell; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[i]; + + qentry = nicvf_get_nxt_sqentry(sq, qentry); + size = skb_frag_size(frag); + nicvf_sq_add_gather_subdesc(sq, qentry, size, + virt_to_phys( + skb_frag_address(frag))); + } + +doorbell: + /* make sure all memory stores are done before ringing doorbell */ + smp_wmb(); + + /* Inform HW to xmit new packet */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, + sq_num, subdesc_cnt); + return 1; + +append_fail: + netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); + return 0; +} + +static inline unsigned frag_num(unsigned i) +{ +#ifdef __BIG_ENDIAN + return (i & ~3) + 3 - (i & 3); +#else + return i; +#endif +} + +/* Returns SKB for a received packet */ +struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) +{ + int frag; + int payload_len = 0; + struct sk_buff *skb = NULL; + struct sk_buff *skb_frag = NULL; + struct sk_buff *prev_frag = NULL; + u16 *rb_lens = NULL; + u64 *rb_ptrs = NULL; + + rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); + rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); + + netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", + __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); + + for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { + payload_len = rb_lens[frag_num(frag)]; + if (!frag) { + /* First fragment */ + skb = nicvf_rb_ptr_to_skb(nic, + *rb_ptrs - cqe_rx->align_pad, + payload_len); + if (!skb) + return NULL; + skb_reserve(skb, cqe_rx->align_pad); + skb_put(skb, payload_len); + } else { + /* Add fragments */ + skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, + payload_len); + if (!skb_frag) { + dev_kfree_skb(skb); + return NULL; + } + + if (!skb_shinfo(skb)->frag_list) + skb_shinfo(skb)->frag_list = skb_frag; + else + prev_frag->next = skb_frag; + + prev_frag = skb_frag; + skb->len += payload_len; + skb->data_len += payload_len; + skb_frag->len = payload_len; + } + /* Next buffer pointer */ + rb_ptrs++; + } + return skb; +} + +/* Enable interrupt */ +void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) +{ + u64 reg_val; + + reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); + + switch (int_type) { + case NICVF_INTR_CQ: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + break; + case NICVF_INTR_SQ: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + break; + case NICVF_INTR_RBDR: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + break; + case NICVF_INTR_PKT_DROP: + reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); + break; + case NICVF_INTR_TCP_TIMER: + reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); + break; + case NICVF_INTR_MBOX: + reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); + break; + case NICVF_INTR_QS_ERR: + reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); + break; + default: + netdev_err(nic->netdev, + "Failed to enable interrupt: unknown type\n"); + break; + } + + nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); +} + +/* Disable interrupt */ +void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) +{ + u64 reg_val = 0; + + switch (int_type) { + case NICVF_INTR_CQ: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + break; + case NICVF_INTR_SQ: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + break; + case NICVF_INTR_RBDR: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + break; + case NICVF_INTR_PKT_DROP: + reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); + break; + case NICVF_INTR_TCP_TIMER: + reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); + break; + case NICVF_INTR_MBOX: + reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); + break; + case NICVF_INTR_QS_ERR: + reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); + break; + default: + netdev_err(nic->netdev, + "Failed to disable interrupt: unknown type\n"); + break; + } + + nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); +} + +/* Clear interrupt */ +void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) +{ + u64 reg_val = 0; + + switch (int_type) { + case NICVF_INTR_CQ: + reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + break; + case NICVF_INTR_SQ: + reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + break; + case NICVF_INTR_RBDR: + reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + break; + case NICVF_INTR_PKT_DROP: + reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); + break; + case NICVF_INTR_TCP_TIMER: + reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); + break; + case NICVF_INTR_MBOX: + reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); + break; + case NICVF_INTR_QS_ERR: + reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); + break; + default: + netdev_err(nic->netdev, + "Failed to clear interrupt: unknown type\n"); + break; + } + + nicvf_reg_write(nic, NIC_VF_INT, reg_val); +} + +/* Check if interrupt is enabled */ +int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) +{ + u64 reg_val; + u64 mask = 0xff; + + reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); + + switch (int_type) { + case NICVF_INTR_CQ: + mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + break; + case NICVF_INTR_SQ: + mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + break; + case NICVF_INTR_RBDR: + mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + break; + case NICVF_INTR_PKT_DROP: + mask = NICVF_INTR_PKT_DROP_MASK; + break; + case NICVF_INTR_TCP_TIMER: + mask = NICVF_INTR_TCP_TIMER_MASK; + break; + case NICVF_INTR_MBOX: + mask = NICVF_INTR_MBOX_MASK; + break; + case NICVF_INTR_QS_ERR: + mask = NICVF_INTR_QS_ERR_MASK; + break; + default: + netdev_err(nic->netdev, + "Failed to check interrupt enable: unknown type\n"); + break; + } + + return (reg_val & mask); +} + +void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) +{ + struct rcv_queue *rq; + +#define GET_RQ_STATS(reg) \ + nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ + (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) + + rq = &nic->qs->rq[rq_idx]; + rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); + rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); +} + +void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) +{ + struct snd_queue *sq; + +#define GET_SQ_STATS(reg) \ + nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ + (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) + + sq = &nic->qs->sq[sq_idx]; + sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); + sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); +} + +/* Check for errors in the receive cmp.queue entry */ +int nicvf_check_cqe_rx_errs(struct nicvf *nic, + struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) +{ + struct cmp_queue_stats *stats = &cq->stats; + + if (!cqe_rx->err_level && !cqe_rx->err_opcode) { + stats->rx.errop.good++; + return 0; + } + + if (netif_msg_rx_err(nic)) + netdev_err(nic->netdev, + "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", + nic->netdev->name, + cqe_rx->err_level, cqe_rx->err_opcode); + + switch (cqe_rx->err_level) { + case CQ_ERRLVL_MAC: + stats->rx.errlvl.mac_errs++; + break; + case CQ_ERRLVL_L2: + stats->rx.errlvl.l2_errs++; + break; + case CQ_ERRLVL_L3: + stats->rx.errlvl.l3_errs++; + break; + case CQ_ERRLVL_L4: + stats->rx.errlvl.l4_errs++; + break; + } + + switch (cqe_rx->err_opcode) { + case CQ_RX_ERROP_RE_PARTIAL: + stats->rx.errop.partial_pkts++; + break; + case CQ_RX_ERROP_RE_JABBER: + stats->rx.errop.jabber_errs++; + break; + case CQ_RX_ERROP_RE_FCS: + stats->rx.errop.fcs_errs++; + break; + case CQ_RX_ERROP_RE_TERMINATE: + stats->rx.errop.terminate_errs++; + break; + case CQ_RX_ERROP_RE_RX_CTL: + stats->rx.errop.bgx_rx_errs++; + break; + case CQ_RX_ERROP_PREL2_ERR: + stats->rx.errop.prel2_errs++; + break; + case CQ_RX_ERROP_L2_FRAGMENT: + stats->rx.errop.l2_frags++; + break; + case CQ_RX_ERROP_L2_OVERRUN: + stats->rx.errop.l2_overruns++; + break; + case CQ_RX_ERROP_L2_PFCS: + stats->rx.errop.l2_pfcs++; + break; + case CQ_RX_ERROP_L2_PUNY: + stats->rx.errop.l2_puny++; + break; + case CQ_RX_ERROP_L2_MAL: + stats->rx.errop.l2_hdr_malformed++; + break; + case CQ_RX_ERROP_L2_OVERSIZE: + stats->rx.errop.l2_oversize++; + break; + case CQ_RX_ERROP_L2_UNDERSIZE: + stats->rx.errop.l2_undersize++; + break; + case CQ_RX_ERROP_L2_LENMISM: + stats->rx.errop.l2_len_mismatch++; + break; + case CQ_RX_ERROP_L2_PCLP: + stats->rx.errop.l2_pclp++; + break; + case CQ_RX_ERROP_IP_NOT: + stats->rx.errop.non_ip++; + break; + case CQ_RX_ERROP_IP_CSUM_ERR: + stats->rx.errop.ip_csum_err++; + break; + case CQ_RX_ERROP_IP_MAL: + stats->rx.errop.ip_hdr_malformed++; + break; + case CQ_RX_ERROP_IP_MALD: + stats->rx.errop.ip_payload_malformed++; + break; + case CQ_RX_ERROP_IP_HOP: + stats->rx.errop.ip_hop_errs++; + break; + case CQ_RX_ERROP_L3_ICRC: + stats->rx.errop.l3_icrc_errs++; + break; + case CQ_RX_ERROP_L3_PCLP: + stats->rx.errop.l3_pclp++; + break; + case CQ_RX_ERROP_L4_MAL: + stats->rx.errop.l4_malformed++; + break; + case CQ_RX_ERROP_L4_CHK: + stats->rx.errop.l4_csum_errs++; + break; + case CQ_RX_ERROP_UDP_LEN: + stats->rx.errop.udp_len_err++; + break; + case CQ_RX_ERROP_L4_PORT: + stats->rx.errop.bad_l4_port++; + break; + case CQ_RX_ERROP_TCP_FLAG: + stats->rx.errop.bad_tcp_flag++; + break; + case CQ_RX_ERROP_TCP_OFFSET: + stats->rx.errop.tcp_offset_errs++; + break; + case CQ_RX_ERROP_L4_PCLP: + stats->rx.errop.l4_pclp++; + break; + case CQ_RX_ERROP_RBDR_TRUNC: + stats->rx.errop.pkt_truncated++; + break; + } + + return 1; +} + +/* Check for errors in the send cmp.queue entry */ +int nicvf_check_cqe_tx_errs(struct nicvf *nic, + struct cmp_queue *cq, struct cqe_send_t *cqe_tx) +{ + struct cmp_queue_stats *stats = &cq->stats; + + switch (cqe_tx->send_status) { + case CQ_TX_ERROP_GOOD: + stats->tx.good++; + return 0; + case CQ_TX_ERROP_DESC_FAULT: + stats->tx.desc_fault++; + break; + case CQ_TX_ERROP_HDR_CONS_ERR: + stats->tx.hdr_cons_err++; + break; + case CQ_TX_ERROP_SUBDC_ERR: + stats->tx.subdesc_err++; + break; + case CQ_TX_ERROP_IMM_SIZE_OFLOW: + stats->tx.imm_size_oflow++; + break; + case CQ_TX_ERROP_DATA_SEQUENCE_ERR: + stats->tx.data_seq_err++; + break; + case CQ_TX_ERROP_MEM_SEQUENCE_ERR: + stats->tx.mem_seq_err++; + break; + case CQ_TX_ERROP_LOCK_VIOL: + stats->tx.lock_viol++; + break; + case CQ_TX_ERROP_DATA_FAULT: + stats->tx.data_fault++; + break; + case CQ_TX_ERROP_TSTMP_CONFLICT: + stats->tx.tstmp_conflict++; + break; + case CQ_TX_ERROP_TSTMP_TIMEOUT: + stats->tx.tstmp_timeout++; + break; + case CQ_TX_ERROP_MEM_FAULT: + stats->tx.mem_fault++; + break; + case CQ_TX_ERROP_CK_OVERLAP: + stats->tx.csum_overlap++; + break; + case CQ_TX_ERROP_CK_OFLOW: + stats->tx.csum_overflow++; + break; + } + + return 1; +} diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h new file mode 100644 index 000000000000..8341bdf755d1 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -0,0 +1,381 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef NICVF_QUEUES_H +#define NICVF_QUEUES_H + +#include +#include "q_struct.h" + +#define MAX_QUEUE_SET 128 +#define MAX_RCV_QUEUES_PER_QS 8 +#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2 +#define MAX_SND_QUEUES_PER_QS 8 +#define MAX_CMP_QUEUES_PER_QS 8 + +/* VF's queue interrupt ranges */ +#define NICVF_INTR_ID_CQ 0 +#define NICVF_INTR_ID_SQ 8 +#define NICVF_INTR_ID_RBDR 16 +#define NICVF_INTR_ID_MISC 18 +#define NICVF_INTR_ID_QS_ERR 19 + +#define for_each_cq_irq(irq) \ + for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++) +#define for_each_sq_irq(irq) \ + for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++) +#define for_each_rbdr_irq(irq) \ + for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++) + +#define RBDR_SIZE0 0ULL /* 8K entries */ +#define RBDR_SIZE1 1ULL /* 16K entries */ +#define RBDR_SIZE2 2ULL /* 32K entries */ +#define RBDR_SIZE3 3ULL /* 64K entries */ +#define RBDR_SIZE4 4ULL /* 126K entries */ +#define RBDR_SIZE5 5ULL /* 256K entries */ +#define RBDR_SIZE6 6ULL /* 512K entries */ + +#define SND_QUEUE_SIZE0 0ULL /* 1K entries */ +#define SND_QUEUE_SIZE1 1ULL /* 2K entries */ +#define SND_QUEUE_SIZE2 2ULL /* 4K entries */ +#define SND_QUEUE_SIZE3 3ULL /* 8K entries */ +#define SND_QUEUE_SIZE4 4ULL /* 16K entries */ +#define SND_QUEUE_SIZE5 5ULL /* 32K entries */ +#define SND_QUEUE_SIZE6 6ULL /* 64K entries */ + +#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */ +#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */ +#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */ +#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */ +#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */ +#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */ +#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ + +/* Default queue count per QS, its lengths and threshold values */ +#define RBDR_CNT 1 +#define RCV_QUEUE_CNT 8 +#define SND_QUEUE_CNT 8 +#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ + +#define SND_QSIZE SND_QUEUE_SIZE4 +#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) +#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) +#define SND_QUEUE_THRESH 2ULL +#define MIN_SQ_DESC_PER_PKT_XMIT 2 +/* Since timestamp not enabled, otherwise 2 */ +#define MAX_CQE_PER_PKT_XMIT 1 + +#define CMP_QSIZE CMP_QUEUE_SIZE4 +#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) +#define CMP_QUEUE_CQE_THRESH 0 +#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ + +#define RBDR_SIZE RBDR_SIZE0 +#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) +#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) +#define RBDR_THRESH (RCV_BUF_COUNT / 2) +#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ +#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ + (NICVF_RCV_BUF_ALIGN_BYTES * 2)) +#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES + +#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ + MAX_CQE_PER_PKT_XMIT) +#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) + +/* Descriptor size in bytes */ +#define SND_QUEUE_DESC_SIZE 16 +#define CMP_QUEUE_DESC_SIZE 512 + +/* Buffer / descriptor alignments */ +#define NICVF_RCV_BUF_ALIGN 7 +#define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN) +#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */ +#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ + +#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES) +#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\ + (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES) +#define NICVF_RCV_BUF_ALIGN_LEN(X)\ + (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X) + +/* Queue enable/disable */ +#define NICVF_SQ_EN BIT_ULL(19) + +/* Queue reset */ +#define NICVF_CQ_RESET BIT_ULL(41) +#define NICVF_SQ_RESET BIT_ULL(17) +#define NICVF_RBDR_RESET BIT_ULL(43) + +enum CQ_RX_ERRLVL_E { + CQ_ERRLVL_MAC, + CQ_ERRLVL_L2, + CQ_ERRLVL_L3, + CQ_ERRLVL_L4, +}; + +enum CQ_RX_ERROP_E { + CQ_RX_ERROP_RE_NONE = 0x0, + CQ_RX_ERROP_RE_PARTIAL = 0x1, + CQ_RX_ERROP_RE_JABBER = 0x2, + CQ_RX_ERROP_RE_FCS = 0x7, + CQ_RX_ERROP_RE_TERMINATE = 0x9, + CQ_RX_ERROP_RE_RX_CTL = 0xb, + CQ_RX_ERROP_PREL2_ERR = 0x1f, + CQ_RX_ERROP_L2_FRAGMENT = 0x20, + CQ_RX_ERROP_L2_OVERRUN = 0x21, + CQ_RX_ERROP_L2_PFCS = 0x22, + CQ_RX_ERROP_L2_PUNY = 0x23, + CQ_RX_ERROP_L2_MAL = 0x24, + CQ_RX_ERROP_L2_OVERSIZE = 0x25, + CQ_RX_ERROP_L2_UNDERSIZE = 0x26, + CQ_RX_ERROP_L2_LENMISM = 0x27, + CQ_RX_ERROP_L2_PCLP = 0x28, + CQ_RX_ERROP_IP_NOT = 0x41, + CQ_RX_ERROP_IP_CSUM_ERR = 0x42, + CQ_RX_ERROP_IP_MAL = 0x43, + CQ_RX_ERROP_IP_MALD = 0x44, + CQ_RX_ERROP_IP_HOP = 0x45, + CQ_RX_ERROP_L3_ICRC = 0x46, + CQ_RX_ERROP_L3_PCLP = 0x47, + CQ_RX_ERROP_L4_MAL = 0x61, + CQ_RX_ERROP_L4_CHK = 0x62, + CQ_RX_ERROP_UDP_LEN = 0x63, + CQ_RX_ERROP_L4_PORT = 0x64, + CQ_RX_ERROP_TCP_FLAG = 0x65, + CQ_RX_ERROP_TCP_OFFSET = 0x66, + CQ_RX_ERROP_L4_PCLP = 0x67, + CQ_RX_ERROP_RBDR_TRUNC = 0x70, +}; + +enum CQ_TX_ERROP_E { + CQ_TX_ERROP_GOOD = 0x0, + CQ_TX_ERROP_DESC_FAULT = 0x10, + CQ_TX_ERROP_HDR_CONS_ERR = 0x11, + CQ_TX_ERROP_SUBDC_ERR = 0x12, + CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, + CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, + CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, + CQ_TX_ERROP_LOCK_VIOL = 0x83, + CQ_TX_ERROP_DATA_FAULT = 0x84, + CQ_TX_ERROP_TSTMP_CONFLICT = 0x85, + CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86, + CQ_TX_ERROP_MEM_FAULT = 0x87, + CQ_TX_ERROP_CK_OVERLAP = 0x88, + CQ_TX_ERROP_CK_OFLOW = 0x89, + CQ_TX_ERROP_ENUM_LAST = 0x8a, +}; + +struct cmp_queue_stats { + struct rx_stats { + struct { + u64 mac_errs; + u64 l2_errs; + u64 l3_errs; + u64 l4_errs; + } errlvl; + struct { + u64 good; + u64 partial_pkts; + u64 jabber_errs; + u64 fcs_errs; + u64 terminate_errs; + u64 bgx_rx_errs; + u64 prel2_errs; + u64 l2_frags; + u64 l2_overruns; + u64 l2_pfcs; + u64 l2_puny; + u64 l2_hdr_malformed; + u64 l2_oversize; + u64 l2_undersize; + u64 l2_len_mismatch; + u64 l2_pclp; + u64 non_ip; + u64 ip_csum_err; + u64 ip_hdr_malformed; + u64 ip_payload_malformed; + u64 ip_hop_errs; + u64 l3_icrc_errs; + u64 l3_pclp; + u64 l4_malformed; + u64 l4_csum_errs; + u64 udp_len_err; + u64 bad_l4_port; + u64 bad_tcp_flag; + u64 tcp_offset_errs; + u64 l4_pclp; + u64 pkt_truncated; + } errop; + } rx; + struct tx_stats { + u64 good; + u64 desc_fault; + u64 hdr_cons_err; + u64 subdesc_err; + u64 imm_size_oflow; + u64 data_seq_err; + u64 mem_seq_err; + u64 lock_viol; + u64 data_fault; + u64 tstmp_conflict; + u64 tstmp_timeout; + u64 mem_fault; + u64 csum_overlap; + u64 csum_overflow; + } tx; +} ____cacheline_aligned_in_smp; + +enum RQ_SQ_STATS { + RQ_SQ_STATS_OCTS, + RQ_SQ_STATS_PKTS, +}; + +struct rx_tx_queue_stats { + u64 bytes; + u64 pkts; +} ____cacheline_aligned_in_smp; + +struct q_desc_mem { + dma_addr_t dma; + u64 size; + u16 q_len; + dma_addr_t phys_base; + void *base; + void *unalign_base; +}; + +struct rbdr { + bool enable; + u32 dma_size; + u32 frag_len; + u32 thresh; /* Threshold level for interrupt */ + void *desc; + u32 head; + u32 tail; + struct q_desc_mem dmem; +} ____cacheline_aligned_in_smp; + +struct rcv_queue { + bool enable; + struct rbdr *rbdr_start; + struct rbdr *rbdr_cont; + bool en_tcp_reassembly; + u8 cq_qs; /* CQ's QS to which this RQ is assigned */ + u8 cq_idx; /* CQ index (0 to 7) in the QS */ + u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */ + u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */ + u8 start_rbdr_qs; /* First buffer ptrs - QS num */ + u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ + u8 caching; + struct rx_tx_queue_stats stats; +} ____cacheline_aligned_in_smp; + +struct cmp_queue { + bool enable; + u16 thresh; + spinlock_t lock; /* lock to serialize processing CQEs */ + void *desc; + struct q_desc_mem dmem; + struct cmp_queue_stats stats; +} ____cacheline_aligned_in_smp; + +struct snd_queue { + bool enable; + u8 cq_qs; /* CQ's QS to which this SQ is pointing */ + u8 cq_idx; /* CQ index (0 to 7) in the above QS */ + u16 thresh; + atomic_t free_cnt; + u32 head; + u32 tail; + u64 *skbuff; + void *desc; + +#define TSO_HEADER_SIZE 128 + /* For TSO segment's header */ + char *tso_hdrs; + dma_addr_t tso_hdrs_phys; + + cpumask_t affinity_mask; + struct q_desc_mem dmem; + struct rx_tx_queue_stats stats; +} ____cacheline_aligned_in_smp; + +struct queue_set { + bool enable; + bool be_en; + u8 vnic_id; + u8 rq_cnt; + u8 cq_cnt; + u64 cq_len; + u8 sq_cnt; + u64 sq_len; + u8 rbdr_cnt; + u64 rbdr_len; + struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS]; + struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; + struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; + struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS]; +} ____cacheline_aligned_in_smp; + +#define GET_RBDR_DESC(RING, idx)\ + (&(((struct rbdr_entry_t *)((RING)->desc))[idx])) +#define GET_SQ_DESC(RING, idx)\ + (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx])) +#define GET_CQ_DESC(RING, idx)\ + (&(((union cq_desc_t *)((RING)->desc))[idx])) + +/* CQ status bits */ +#define CQ_WR_FULL BIT(26) +#define CQ_WR_DISABLE BIT(25) +#define CQ_WR_FAULT BIT(24) +#define CQ_CQE_COUNT (0xFFFF << 0) + +#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) + +int nicvf_set_qset_resources(struct nicvf *nic); +int nicvf_config_data_transfer(struct nicvf *nic, bool enable); +void nicvf_qset_config(struct nicvf *nic, bool enable); +void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable); + +void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); +void nicvf_sq_disable(struct nicvf *nic, int qidx); +void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); +void nicvf_sq_free_used_descs(struct net_device *netdev, + struct snd_queue *sq, int qidx); +int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb); + +struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx); +void nicvf_rbdr_task(unsigned long data); +void nicvf_rbdr_work(struct work_struct *work); + +void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); +void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); +void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); +int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx); + +/* Register access APIs */ +void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val); +u64 nicvf_reg_read(struct nicvf *nic, u64 offset); +void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val); +u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset); +void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, + u64 qidx, u64 val); +u64 nicvf_queue_reg_read(struct nicvf *nic, + u64 offset, u64 qidx); + +/* Stats */ +void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); +void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); +int nicvf_check_cqe_rx_errs(struct nicvf *nic, + struct cmp_queue *cq, struct cqe_rx_t *cqe_rx); +int nicvf_check_cqe_tx_errs(struct nicvf *nic, + struct cmp_queue *cq, struct cqe_send_t *cqe_tx); +#endif /* NICVF_QUEUES_H */ diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h new file mode 100644 index 000000000000..3c1de97b1add --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/q_struct.h @@ -0,0 +1,701 @@ +/* + * This file contains HW queue descriptor formats, config register + * structures etc + * + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef Q_STRUCT_H +#define Q_STRUCT_H + +/* Load transaction types for reading segment bytes specified by + * NIC_SEND_GATHER_S[LD_TYPE]. + */ +enum nic_send_ld_type_e { + NIC_SEND_LD_TYPE_E_LDD = 0x0, + NIC_SEND_LD_TYPE_E_LDT = 0x1, + NIC_SEND_LD_TYPE_E_LDWB = 0x2, + NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3, +}; + +enum ether_type_algorithm { + ETYPE_ALG_NONE = 0x0, + ETYPE_ALG_SKIP = 0x1, + ETYPE_ALG_ENDPARSE = 0x2, + ETYPE_ALG_VLAN = 0x3, + ETYPE_ALG_VLAN_STRIP = 0x4, +}; + +enum layer3_type { + L3TYPE_NONE = 0x00, + L3TYPE_GRH = 0x01, + L3TYPE_IPV4 = 0x04, + L3TYPE_IPV4_OPTIONS = 0x05, + L3TYPE_IPV6 = 0x06, + L3TYPE_IPV6_OPTIONS = 0x07, + L3TYPE_ET_STOP = 0x0D, + L3TYPE_OTHER = 0x0E, +}; + +enum layer4_type { + L4TYPE_NONE = 0x00, + L4TYPE_IPSEC_ESP = 0x01, + L4TYPE_IPFRAG = 0x02, + L4TYPE_IPCOMP = 0x03, + L4TYPE_TCP = 0x04, + L4TYPE_UDP = 0x05, + L4TYPE_SCTP = 0x06, + L4TYPE_GRE = 0x07, + L4TYPE_ROCE_BTH = 0x08, + L4TYPE_OTHER = 0x0E, +}; + +/* CPI and RSSI configuration */ +enum cpi_algorithm_type { + CPI_ALG_NONE = 0x0, + CPI_ALG_VLAN = 0x1, + CPI_ALG_VLAN16 = 0x2, + CPI_ALG_DIFF = 0x3, +}; + +enum rss_algorithm_type { + RSS_ALG_NONE = 0x00, + RSS_ALG_PORT = 0x01, + RSS_ALG_IP = 0x02, + RSS_ALG_TCP_IP = 0x03, + RSS_ALG_UDP_IP = 0x04, + RSS_ALG_SCTP_IP = 0x05, + RSS_ALG_GRE_IP = 0x06, + RSS_ALG_ROCE = 0x07, +}; + +enum rss_hash_cfg { + RSS_HASH_L2ETC = 0x00, + RSS_HASH_IP = 0x01, + RSS_HASH_TCP = 0x02, + RSS_HASH_TCP_SYN_DIS = 0x03, + RSS_HASH_UDP = 0x04, + RSS_HASH_L4ETC = 0x05, + RSS_HASH_ROCE = 0x06, + RSS_L3_BIDI = 0x07, + RSS_L4_BIDI = 0x08, +}; + +/* Completion queue entry types */ +enum cqe_type { + CQE_TYPE_INVALID = 0x0, + CQE_TYPE_RX = 0x2, + CQE_TYPE_RX_SPLIT = 0x3, + CQE_TYPE_RX_TCP = 0x4, + CQE_TYPE_SEND = 0x8, + CQE_TYPE_SEND_PTP = 0x9, +}; + +enum cqe_rx_tcp_status { + CQE_RX_STATUS_VALID_TCP_CNXT = 0x00, + CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F, +}; + +enum cqe_send_status { + CQE_SEND_STATUS_GOOD = 0x00, + CQE_SEND_STATUS_DESC_FAULT = 0x01, + CQE_SEND_STATUS_HDR_CONS_ERR = 0x11, + CQE_SEND_STATUS_SUBDESC_ERR = 0x12, + CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80, + CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81, + CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82, + CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83, + CQE_SEND_STATUS_LOCK_VIOL = 0x84, + CQE_SEND_STATUS_LOCK_UFLOW = 0x85, + CQE_SEND_STATUS_DATA_FAULT = 0x86, + CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87, + CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88, + CQE_SEND_STATUS_MEM_FAULT = 0x89, + CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A, + CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B, +}; + +enum cqe_rx_tcp_end_reason { + CQE_RX_TCP_END_FIN_FLAG_DET = 0, + CQE_RX_TCP_END_INVALID_FLAG = 1, + CQE_RX_TCP_END_TIMEOUT = 2, + CQE_RX_TCP_END_OUT_OF_SEQ = 3, + CQE_RX_TCP_END_PKT_ERR = 4, + CQE_RX_TCP_END_QS_DISABLED = 0x0F, +}; + +/* Packet protocol level error enumeration */ +enum cqe_rx_err_level { + CQE_RX_ERRLVL_RE = 0x0, + CQE_RX_ERRLVL_L2 = 0x1, + CQE_RX_ERRLVL_L3 = 0x2, + CQE_RX_ERRLVL_L4 = 0x3, +}; + +/* Packet protocol level error type enumeration */ +enum cqe_rx_err_opcode { + CQE_RX_ERR_RE_NONE = 0x0, + CQE_RX_ERR_RE_PARTIAL = 0x1, + CQE_RX_ERR_RE_JABBER = 0x2, + CQE_RX_ERR_RE_FCS = 0x7, + CQE_RX_ERR_RE_TERMINATE = 0x9, + CQE_RX_ERR_RE_RX_CTL = 0xb, + CQE_RX_ERR_PREL2_ERR = 0x1f, + CQE_RX_ERR_L2_FRAGMENT = 0x20, + CQE_RX_ERR_L2_OVERRUN = 0x21, + CQE_RX_ERR_L2_PFCS = 0x22, + CQE_RX_ERR_L2_PUNY = 0x23, + CQE_RX_ERR_L2_MAL = 0x24, + CQE_RX_ERR_L2_OVERSIZE = 0x25, + CQE_RX_ERR_L2_UNDERSIZE = 0x26, + CQE_RX_ERR_L2_LENMISM = 0x27, + CQE_RX_ERR_L2_PCLP = 0x28, + CQE_RX_ERR_IP_NOT = 0x41, + CQE_RX_ERR_IP_CHK = 0x42, + CQE_RX_ERR_IP_MAL = 0x43, + CQE_RX_ERR_IP_MALD = 0x44, + CQE_RX_ERR_IP_HOP = 0x45, + CQE_RX_ERR_L3_ICRC = 0x46, + CQE_RX_ERR_L3_PCLP = 0x47, + CQE_RX_ERR_L4_MAL = 0x61, + CQE_RX_ERR_L4_CHK = 0x62, + CQE_RX_ERR_UDP_LEN = 0x63, + CQE_RX_ERR_L4_PORT = 0x64, + CQE_RX_ERR_TCP_FLAG = 0x65, + CQE_RX_ERR_TCP_OFFSET = 0x66, + CQE_RX_ERR_L4_PCLP = 0x67, + CQE_RX_ERR_RBDR_TRUNC = 0x70, +}; + +struct cqe_rx_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 stdn_fault:1; + u64 rsvd0:1; + u64 rq_qs:7; + u64 rq_idx:3; + u64 rsvd1:12; + u64 rss_alg:4; + u64 rsvd2:4; + u64 rb_cnt:4; + u64 vlan_found:1; + u64 vlan_stripped:1; + u64 vlan2_found:1; + u64 vlan2_stripped:1; + u64 l4_type:4; + u64 l3_type:4; + u64 l2_present:1; + u64 err_level:3; + u64 err_opcode:8; + + u64 pkt_len:16; /* W1 */ + u64 l2_ptr:8; + u64 l3_ptr:8; + u64 l4_ptr:8; + u64 cq_pkt_len:8; + u64 align_pad:3; + u64 rsvd3:1; + u64 chan:12; + + u64 rss_tag:32; /* W2 */ + u64 vlan_tci:16; + u64 vlan_ptr:8; + u64 vlan2_ptr:8; + + u64 rb3_sz:16; /* W3 */ + u64 rb2_sz:16; + u64 rb1_sz:16; + u64 rb0_sz:16; + + u64 rb7_sz:16; /* W4 */ + u64 rb6_sz:16; + u64 rb5_sz:16; + u64 rb4_sz:16; + + u64 rb11_sz:16; /* W5 */ + u64 rb10_sz:16; + u64 rb9_sz:16; + u64 rb8_sz:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 err_opcode:8; + u64 err_level:3; + u64 l2_present:1; + u64 l3_type:4; + u64 l4_type:4; + u64 vlan2_stripped:1; + u64 vlan2_found:1; + u64 vlan_stripped:1; + u64 vlan_found:1; + u64 rb_cnt:4; + u64 rsvd2:4; + u64 rss_alg:4; + u64 rsvd1:12; + u64 rq_idx:3; + u64 rq_qs:7; + u64 rsvd0:1; + u64 stdn_fault:1; + u64 cqe_type:4; /* W0 */ + u64 chan:12; + u64 rsvd3:1; + u64 align_pad:3; + u64 cq_pkt_len:8; + u64 l4_ptr:8; + u64 l3_ptr:8; + u64 l2_ptr:8; + u64 pkt_len:16; /* W1 */ + u64 vlan2_ptr:8; + u64 vlan_ptr:8; + u64 vlan_tci:16; + u64 rss_tag:32; /* W2 */ + u64 rb0_sz:16; + u64 rb1_sz:16; + u64 rb2_sz:16; + u64 rb3_sz:16; /* W3 */ + u64 rb4_sz:16; + u64 rb5_sz:16; + u64 rb6_sz:16; + u64 rb7_sz:16; /* W4 */ + u64 rb8_sz:16; + u64 rb9_sz:16; + u64 rb10_sz:16; + u64 rb11_sz:16; /* W5 */ +#endif + u64 rb0_ptr:64; + u64 rb1_ptr:64; + u64 rb2_ptr:64; + u64 rb3_ptr:64; + u64 rb4_ptr:64; + u64 rb5_ptr:64; + u64 rb6_ptr:64; + u64 rb7_ptr:64; + u64 rb8_ptr:64; + u64 rb9_ptr:64; + u64 rb10_ptr:64; + u64 rb11_ptr:64; +}; + +struct cqe_rx_tcp_err_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 rsvd0:60; + + u64 rsvd1:4; /* W1 */ + u64 partial_first:1; + u64 rsvd2:27; + u64 rbdr_bytes:8; + u64 rsvd3:24; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 rsvd0:60; + u64 cqe_type:4; + + u64 rsvd3:24; + u64 rbdr_bytes:8; + u64 rsvd2:27; + u64 partial_first:1; + u64 rsvd1:4; +#endif +}; + +struct cqe_rx_tcp_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 rsvd0:52; + u64 cq_tcp_status:8; + + u64 rsvd1:32; /* W1 */ + u64 tcp_cntx_bytes:8; + u64 rsvd2:8; + u64 tcp_err_bytes:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 cq_tcp_status:8; + u64 rsvd0:52; + u64 cqe_type:4; /* W0 */ + + u64 tcp_err_bytes:16; + u64 rsvd2:8; + u64 tcp_cntx_bytes:8; + u64 rsvd1:32; /* W1 */ +#endif +}; + +struct cqe_send_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 rsvd0:4; + u64 sqe_ptr:16; + u64 rsvd1:4; + u64 rsvd2:10; + u64 sq_qs:7; + u64 sq_idx:3; + u64 rsvd3:8; + u64 send_status:8; + + u64 ptp_timestamp:64; /* W1 */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 send_status:8; + u64 rsvd3:8; + u64 sq_idx:3; + u64 sq_qs:7; + u64 rsvd2:10; + u64 rsvd1:4; + u64 sqe_ptr:16; + u64 rsvd0:4; + u64 cqe_type:4; /* W0 */ + + u64 ptp_timestamp:64; /* W1 */ +#endif +}; + +union cq_desc_t { + u64 u[64]; + struct cqe_send_t snd_hdr; + struct cqe_rx_t rx_hdr; + struct cqe_rx_tcp_t rx_tcp_hdr; + struct cqe_rx_tcp_err_t rx_tcp_err_hdr; +}; + +struct rbdr_entry_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd0:15; + u64 buf_addr:42; + u64 cache_align:7; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 cache_align:7; + u64 buf_addr:42; + u64 rsvd0:15; +#endif +}; + +/* TCP reassembly context */ +struct rbe_tcp_cnxt_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 tcp_pkt_cnt:12; + u64 rsvd1:4; + u64 align_hdr_bytes:4; + u64 align_ptr_bytes:4; + u64 ptr_bytes:16; + u64 rsvd2:24; + u64 cqe_type:4; + u64 rsvd0:54; + u64 tcp_end_reason:2; + u64 tcp_status:4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tcp_status:4; + u64 tcp_end_reason:2; + u64 rsvd0:54; + u64 cqe_type:4; + u64 rsvd2:24; + u64 ptr_bytes:16; + u64 align_ptr_bytes:4; + u64 align_hdr_bytes:4; + u64 rsvd1:4; + u64 tcp_pkt_cnt:12; +#endif +}; + +/* Always Big endian */ +struct rx_hdr_t { + u64 opaque:32; + u64 rss_flow:8; + u64 skip_length:6; + u64 disable_rss:1; + u64 disable_tcp_reassembly:1; + u64 nodrop:1; + u64 dest_alg:2; + u64 rsvd0:2; + u64 dest_rq:11; +}; + +enum send_l4_csum_type { + SEND_L4_CSUM_DISABLE = 0x00, + SEND_L4_CSUM_UDP = 0x01, + SEND_L4_CSUM_TCP = 0x02, + SEND_L4_CSUM_SCTP = 0x03, +}; + +enum send_crc_alg { + SEND_CRCALG_CRC32 = 0x00, + SEND_CRCALG_CRC32C = 0x01, + SEND_CRCALG_ICRC = 0x02, +}; + +enum send_load_type { + SEND_LD_TYPE_LDD = 0x00, + SEND_LD_TYPE_LDT = 0x01, + SEND_LD_TYPE_LDWB = 0x02, +}; + +enum send_mem_alg_type { + SEND_MEMALG_SET = 0x00, + SEND_MEMALG_ADD = 0x08, + SEND_MEMALG_SUB = 0x09, + SEND_MEMALG_ADDLEN = 0x0A, + SEND_MEMALG_SUBLEN = 0x0B, +}; + +enum send_mem_dsz_type { + SEND_MEMDSZ_B64 = 0x00, + SEND_MEMDSZ_B32 = 0x01, + SEND_MEMDSZ_B8 = 0x03, +}; + +enum sq_subdesc_type { + SQ_DESC_TYPE_INVALID = 0x00, + SQ_DESC_TYPE_HEADER = 0x01, + SQ_DESC_TYPE_CRC = 0x02, + SQ_DESC_TYPE_IMMEDIATE = 0x03, + SQ_DESC_TYPE_GATHER = 0x04, + SQ_DESC_TYPE_MEMORY = 0x05, +}; + +struct sq_crc_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd1:32; + u64 crc_ival:32; + u64 subdesc_type:4; + u64 crc_alg:2; + u64 rsvd0:10; + u64 crc_insert_pos:16; + u64 hdr_start:16; + u64 crc_len:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 crc_len:16; + u64 hdr_start:16; + u64 crc_insert_pos:16; + u64 rsvd0:10; + u64 crc_alg:2; + u64 subdesc_type:4; + u64 crc_ival:32; + u64 rsvd1:32; +#endif +}; + +struct sq_gather_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; /* W0 */ + u64 ld_type:2; + u64 rsvd0:42; + u64 size:16; + + u64 rsvd1:15; /* W1 */ + u64 addr:49; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 size:16; + u64 rsvd0:42; + u64 ld_type:2; + u64 subdesc_type:4; /* W0 */ + + u64 addr:49; + u64 rsvd1:15; /* W1 */ +#endif +}; + +/* SQ immediate subdescriptor */ +struct sq_imm_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; /* W0 */ + u64 rsvd0:46; + u64 len:14; + + u64 data:64; /* W1 */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 len:14; + u64 rsvd0:46; + u64 subdesc_type:4; /* W0 */ + + u64 data:64; /* W1 */ +#endif +}; + +struct sq_mem_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; /* W0 */ + u64 mem_alg:4; + u64 mem_dsz:2; + u64 wmem:1; + u64 rsvd0:21; + u64 offset:32; + + u64 rsvd1:15; /* W1 */ + u64 addr:49; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 offset:32; + u64 rsvd0:21; + u64 wmem:1; + u64 mem_dsz:2; + u64 mem_alg:4; + u64 subdesc_type:4; /* W0 */ + + u64 addr:49; + u64 rsvd1:15; /* W1 */ +#endif +}; + +struct sq_hdr_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; + u64 tso:1; + u64 post_cqe:1; /* Post CQE on no error also */ + u64 dont_send:1; + u64 tstmp:1; + u64 subdesc_cnt:8; + u64 csum_l4:2; + u64 csum_l3:1; + u64 rsvd0:5; + u64 l4_offset:8; + u64 l3_offset:8; + u64 rsvd1:4; + u64 tot_len:20; /* W0 */ + + u64 tso_sdc_cont:8; + u64 tso_sdc_first:8; + u64 tso_l4_offset:8; + u64 tso_flags_last:12; + u64 tso_flags_first:12; + u64 rsvd2:2; + u64 tso_max_paysize:14; /* W1 */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tot_len:20; + u64 rsvd1:4; + u64 l3_offset:8; + u64 l4_offset:8; + u64 rsvd0:5; + u64 csum_l3:1; + u64 csum_l4:2; + u64 subdesc_cnt:8; + u64 tstmp:1; + u64 dont_send:1; + u64 post_cqe:1; /* Post CQE on no error also */ + u64 tso:1; + u64 subdesc_type:4; /* W0 */ + + u64 tso_max_paysize:14; + u64 rsvd2:2; + u64 tso_flags_first:12; + u64 tso_flags_last:12; + u64 tso_l4_offset:8; + u64 tso_sdc_first:8; + u64 tso_sdc_cont:8; /* W1 */ +#endif +}; + +/* Queue config register formats */ +struct rq_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_2_63:62; + u64 ena:1; + u64 tcp_ena:1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tcp_ena:1; + u64 ena:1; + u64 reserved_2_63:62; +#endif +}; + +struct cq_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_43_63:21; + u64 ena:1; + u64 reset:1; + u64 caching:1; + u64 reserved_35_39:5; + u64 qsize:3; + u64 reserved_25_31:7; + u64 avg_con:9; + u64 reserved_0_15:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 reserved_0_15:16; + u64 avg_con:9; + u64 reserved_25_31:7; + u64 qsize:3; + u64 reserved_35_39:5; + u64 caching:1; + u64 reset:1; + u64 ena:1; + u64 reserved_43_63:21; +#endif +}; + +struct sq_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_20_63:44; + u64 ena:1; + u64 reserved_18_18:1; + u64 reset:1; + u64 ldwb:1; + u64 reserved_11_15:5; + u64 qsize:3; + u64 reserved_3_7:5; + u64 tstmp_bgx_intf:3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tstmp_bgx_intf:3; + u64 reserved_3_7:5; + u64 qsize:3; + u64 reserved_11_15:5; + u64 ldwb:1; + u64 reset:1; + u64 reserved_18_18:1; + u64 ena:1; + u64 reserved_20_63:44; +#endif +}; + +struct rbdr_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_45_63:19; + u64 ena:1; + u64 reset:1; + u64 ldwb:1; + u64 reserved_36_41:6; + u64 qsize:4; + u64 reserved_25_31:7; + u64 avg_con:9; + u64 reserved_12_15:4; + u64 lines:12; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 lines:12; + u64 reserved_12_15:4; + u64 avg_con:9; + u64 reserved_25_31:7; + u64 qsize:4; + u64 reserved_36_41:6; + u64 ldwb:1; + u64 reset:1; + u64 ena: 1; + u64 reserved_45_63:19; +#endif +}; + +struct qs_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_32_63:32; + u64 ena:1; + u64 reserved_27_30:4; + u64 sq_ins_ena:1; + u64 sq_ins_pos:6; + u64 lock_ena:1; + u64 lock_viol_cqe_ena:1; + u64 send_tstmp_ena:1; + u64 be:1; + u64 reserved_7_15:9; + u64 vnic:7; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 vnic:7; + u64 reserved_7_15:9; + u64 be:1; + u64 send_tstmp_ena:1; + u64 lock_viol_cqe_ena:1; + u64 lock_ena:1; + u64 sq_ins_pos:6; + u64 sq_ins_ena:1; + u64 reserved_27_30:4; + u64 ena:1; + u64 reserved_32_63:32; +#endif +}; + +#endif /* Q_STRUCT_H */ diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c new file mode 100644 index 000000000000..633ec05dfe05 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -0,0 +1,966 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nic_reg.h" +#include "nic.h" +#include "thunder_bgx.h" + +#define DRV_NAME "thunder-BGX" +#define DRV_VERSION "1.0" + +struct lmac { + struct bgx *bgx; + int dmac; + unsigned char mac[ETH_ALEN]; + bool link_up; + int lmacid; /* ID within BGX */ + int lmacid_bd; /* ID on board */ + struct net_device netdev; + struct phy_device *phydev; + unsigned int last_duplex; + unsigned int last_link; + unsigned int last_speed; + bool is_sgmii; + struct delayed_work dwork; + struct workqueue_struct *check_link; +}; + +struct bgx { + u8 bgx_id; + u8 qlm_mode; + struct lmac lmac[MAX_LMAC_PER_BGX]; + int lmac_count; + int lmac_type; + int lane_to_sds; + int use_training; + void __iomem *reg_base; + struct pci_dev *pdev; +}; + +static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; +static int lmac_count; /* Total no of LMACs in system */ + +static int bgx_xaui_check_link(struct lmac *lmac); + +/* Supported devices */ +static const struct pci_device_id bgx_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Cavium Inc"); +MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, bgx_id_table); + +/* The Cavium ThunderX network controller can *only* be found in SoCs + * containing the ThunderX ARM64 CPU implementation. All accesses to the device + * registers on this platform are implicitly strongly ordered with respect + * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use + * with no memory barriers in this driver. The readq()/writeq() functions add + * explicit ordering operation which in this case are redundant, and only + * add overhead. + */ + +/* Register read/write APIs */ +static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) +{ + void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; + + return readq_relaxed(addr); +} + +static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) +{ + void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; + + writeq_relaxed(val, addr); +} + +static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) +{ + void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; + + writeq_relaxed(val | readq_relaxed(addr), addr); +} + +static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) +{ + int timeout = 100; + u64 reg_val; + + while (timeout) { + reg_val = bgx_reg_read(bgx, lmac, reg); + if (zero && !(reg_val & mask)) + return 0; + if (!zero && (reg_val & mask)) + return 0; + usleep_range(1000, 2000); + timeout--; + } + return 1; +} + +/* Return number of BGX present in HW */ +unsigned bgx_get_map(int node) +{ + int i; + unsigned map = 0; + + for (i = 0; i < MAX_BGX_PER_CN88XX; i++) { + if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i]) + map |= (1 << i); + } + + return map; +} +EXPORT_SYMBOL(bgx_get_map); + +/* Return number of LMAC configured for this BGX */ +int bgx_get_lmac_count(int node, int bgx_idx) +{ + struct bgx *bgx; + + bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + if (bgx) + return bgx->lmac_count; + + return 0; +} +EXPORT_SYMBOL(bgx_get_lmac_count); + +/* Returns the current link status of LMAC */ +void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) +{ + struct bgx_link_status *link = (struct bgx_link_status *)status; + struct bgx *bgx; + struct lmac *lmac; + + bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + if (!bgx) + return; + + lmac = &bgx->lmac[lmacid]; + link->link_up = lmac->link_up; + link->duplex = lmac->last_duplex; + link->speed = lmac->last_speed; +} +EXPORT_SYMBOL(bgx_get_lmac_link_state); + +const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) +{ + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + + if (bgx) + return bgx->lmac[lmacid].mac; + + return NULL; +} +EXPORT_SYMBOL(bgx_get_lmac_mac); + +void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) +{ + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + + if (!bgx) + return; + + ether_addr_copy(bgx->lmac[lmacid].mac, mac); +} +EXPORT_SYMBOL(bgx_set_lmac_mac); + +static void bgx_sgmii_change_link_state(struct lmac *lmac) +{ + struct bgx *bgx = lmac->bgx; + u64 cmr_cfg; + u64 port_cfg = 0; + u64 misc_ctl = 0; + + cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); + cmr_cfg &= ~CMR_EN; + bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); + + port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); + misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); + + if (lmac->link_up) { + misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; + port_cfg &= ~GMI_PORT_CFG_DUPLEX; + port_cfg |= (lmac->last_duplex << 2); + } else { + misc_ctl |= PCS_MISC_CTL_GMX_ENO; + } + + switch (lmac->last_speed) { + case 10: + port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ + port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ + port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ + misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; + misc_ctl |= 50; /* samp_pt */ + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); + break; + case 100: + port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ + port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ + port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ + misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; + misc_ctl |= 5; /* samp_pt */ + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); + break; + case 1000: + port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ + port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ + port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ + misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; + misc_ctl |= 1; /* samp_pt */ + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); + if (lmac->last_duplex) + bgx_reg_write(bgx, lmac->lmacid, + BGX_GMP_GMI_TXX_BURST, 0); + else + bgx_reg_write(bgx, lmac->lmacid, + BGX_GMP_GMI_TXX_BURST, 8192); + break; + default: + break; + } + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); + + port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); + + /* renable lmac */ + cmr_cfg |= CMR_EN; + bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); +} + +static void bgx_lmac_handler(struct net_device *netdev) +{ + struct lmac *lmac = container_of(netdev, struct lmac, netdev); + struct phy_device *phydev = lmac->phydev; + int link_changed = 0; + + if (!lmac) + return; + + if (!phydev->link && lmac->last_link) + link_changed = -1; + + if (phydev->link && + (lmac->last_duplex != phydev->duplex || + lmac->last_link != phydev->link || + lmac->last_speed != phydev->speed)) { + link_changed = 1; + } + + lmac->last_link = phydev->link; + lmac->last_speed = phydev->speed; + lmac->last_duplex = phydev->duplex; + + if (!link_changed) + return; + + if (link_changed > 0) + lmac->link_up = true; + else + lmac->link_up = false; + + if (lmac->is_sgmii) + bgx_sgmii_change_link_state(lmac); + else + bgx_xaui_check_link(lmac); +} + +u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) +{ + struct bgx *bgx; + + bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + if (!bgx) + return 0; + + if (idx > 8) + lmac = 0; + return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)); +} +EXPORT_SYMBOL(bgx_get_rx_stats); + +u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) +{ + struct bgx *bgx; + + bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + if (!bgx) + return 0; + + return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)); +} +EXPORT_SYMBOL(bgx_get_tx_stats); + +static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) +{ + u64 offset; + + while (bgx->lmac[lmac].dmac > 0) { + offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) + + (lmac * MAX_DMAC_PER_LMAC * sizeof(u64)); + bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); + bgx->lmac[lmac].dmac--; + } +} + +static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) +{ + u64 cfg; + + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); + /* max packet size */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); + + /* Disable frame alignment if using preamble */ + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); + if (cfg & 1) + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); + + /* Enable lmac */ + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); + + /* PCS reset */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); + if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, + PCS_MRX_CTL_RESET, true)) { + dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n"); + return -1; + } + + /* power down, reset autoneg, autoneg enable */ + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); + cfg &= ~PCS_MRX_CTL_PWR_DN; + cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); + bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); + + if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, + PCS_MRX_STATUS_AN_CPT, false)) { + dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); + return -1; + } + + return 0; +} + +static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) +{ + u64 cfg; + + /* Reset SPU */ + bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { + dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); + return -1; + } + + /* Disable LMAC */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~CMR_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); + /* Set interleaved running disparity for RXAUI */ + if (bgx->lmac_type != BGX_MODE_RXAUI) + bgx_reg_modify(bgx, lmacid, + BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); + else + bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, + SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); + + /* clear all interrupts */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); + bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); + bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); + + if (bgx->use_training) { + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); + /* training enable */ + bgx_reg_modify(bgx, lmacid, + BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); + } + + /* Append FCS to each packet */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); + + /* Disable forward error correction */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); + cfg &= ~SPU_FEC_CTL_FEC_EN; + bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); + + /* Disable autoneg */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); + cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); + bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); + + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); + if (bgx->lmac_type == BGX_MODE_10G_KR) + cfg |= (1 << 23); + else if (bgx->lmac_type == BGX_MODE_40G_KR) + cfg |= (1 << 24); + else + cfg &= ~((1 << 23) | (1 << 24)); + cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12))); + bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); + + cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); + cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; + bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); + + /* Enable lmac */ + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); + + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); + cfg &= ~SPU_CTL_LOW_POWER; + bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); + + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); + cfg &= ~SMU_TX_CTL_UNI_EN; + cfg |= SMU_TX_CTL_DIC_EN; + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); + + /* take lmac_count into account */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); + /* max packet size */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); + + return 0; +} + +static int bgx_xaui_check_link(struct lmac *lmac) +{ + struct bgx *bgx = lmac->bgx; + int lmacid = lmac->lmacid; + int lmac_type = bgx->lmac_type; + u64 cfg; + + bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); + if (bgx->use_training) { + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); + if (!(cfg & (1ull << 13))) { + cfg = (1ull << 13) | (1ull << 14); + bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); + cfg |= (1ull << 0); + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); + return -1; + } + } + + /* wait for PCS to come out of reset */ + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { + dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); + return -1; + } + + if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || + (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, + SPU_BR_STATUS_BLK_LOCK, false)) { + dev_err(&bgx->pdev->dev, + "SPU_BR_STATUS_BLK_LOCK not completed\n"); + return -1; + } + } else { + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, + SPU_BX_STATUS_RX_ALIGN, false)) { + dev_err(&bgx->pdev->dev, + "SPU_BX_STATUS_RX_ALIGN not completed\n"); + return -1; + } + } + + /* Clear rcvflt bit (latching high) and read it back */ + bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); + if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { + dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); + if (bgx->use_training) { + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); + if (!(cfg & (1ull << 13))) { + cfg = (1ull << 13) | (1ull << 14); + bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, + BGX_SPUX_BR_PMD_CRTL); + cfg |= (1ull << 0); + bgx_reg_write(bgx, lmacid, + BGX_SPUX_BR_PMD_CRTL, cfg); + return -1; + } + } + return -1; + } + + /* Wait for MAC RX to be ready */ + if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL, + SMU_RX_CTL_STATUS, true)) { + dev_err(&bgx->pdev->dev, "SMU RX link not okay\n"); + return -1; + } + + /* Wait for BGX RX to be idle */ + if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { + dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); + return -1; + } + + /* Wait for BGX TX to be idle */ + if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) { + dev_err(&bgx->pdev->dev, "SMU TX not idle\n"); + return -1; + } + + if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { + dev_err(&bgx->pdev->dev, "Receive fault\n"); + return -1; + } + + /* Receive link is latching low. Force it high and verify it */ + bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1, + SPU_STATUS1_RCV_LNK, false)) { + dev_err(&bgx->pdev->dev, "SPU receive link down\n"); + return -1; + } + + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); + cfg &= ~SPU_MISC_CTL_RX_DIS; + bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); + return 0; +} + +static void bgx_poll_for_link(struct work_struct *work) +{ + struct lmac *lmac; + u64 link; + + lmac = container_of(work, struct lmac, dwork.work); + + /* Receive link is latching low. Force it high and verify it */ + bgx_reg_modify(lmac->bgx, lmac->lmacid, + BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); + bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, + SPU_STATUS1_RCV_LNK, false); + + link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); + if (link & SPU_STATUS1_RCV_LNK) { + lmac->link_up = 1; + if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) + lmac->last_speed = 40000; + else + lmac->last_speed = 10000; + lmac->last_duplex = 1; + } else { + lmac->link_up = 0; + } + + if (lmac->last_link != lmac->link_up) { + lmac->last_link = lmac->link_up; + if (lmac->link_up) + bgx_xaui_check_link(lmac); + } + + queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); +} + +static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) +{ + struct lmac *lmac; + u64 cfg; + + lmac = &bgx->lmac[lmacid]; + lmac->bgx = bgx; + + if (bgx->lmac_type == BGX_MODE_SGMII) { + lmac->is_sgmii = 1; + if (bgx_lmac_sgmii_init(bgx, lmacid)) + return -1; + } else { + lmac->is_sgmii = 0; + if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type)) + return -1; + } + + if (lmac->is_sgmii) { + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); + cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); + } else { + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); + cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); + } + + /* Enable lmac */ + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, + CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); + + /* Restore default cfg, incase low level firmware changed it */ + bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); + + if ((bgx->lmac_type != BGX_MODE_XFI) && + (bgx->lmac_type != BGX_MODE_XLAUI) && + (bgx->lmac_type != BGX_MODE_40G_KR) && + (bgx->lmac_type != BGX_MODE_10G_KR)) { + if (!lmac->phydev) + return -ENODEV; + + lmac->phydev->dev_flags = 0; + + if (phy_connect_direct(&lmac->netdev, lmac->phydev, + bgx_lmac_handler, + PHY_INTERFACE_MODE_SGMII)) + return -ENODEV; + + phy_start_aneg(lmac->phydev); + } else { + lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (!lmac->check_link) + return -ENOMEM; + INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); + queue_delayed_work(lmac->check_link, &lmac->dwork, 0); + } + + return 0; +} + +static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) +{ + struct lmac *lmac; + u64 cmrx_cfg; + + lmac = &bgx->lmac[lmacid]; + if (lmac->check_link) { + /* Destroy work queue */ + cancel_delayed_work(&lmac->dwork); + flush_workqueue(lmac->check_link); + destroy_workqueue(lmac->check_link); + } + + cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cmrx_cfg &= ~(1 << 15); + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); + bgx_flush_dmac_addrs(bgx, lmacid); + + if (lmac->phydev) + phy_disconnect(lmac->phydev); + + lmac->phydev = NULL; +} + +static void bgx_set_num_ports(struct bgx *bgx) +{ + u64 lmac_count; + + switch (bgx->qlm_mode) { + case QLM_MODE_SGMII: + bgx->lmac_count = 4; + bgx->lmac_type = BGX_MODE_SGMII; + bgx->lane_to_sds = 0; + break; + case QLM_MODE_XAUI_1X4: + bgx->lmac_count = 1; + bgx->lmac_type = BGX_MODE_XAUI; + bgx->lane_to_sds = 0xE4; + break; + case QLM_MODE_RXAUI_2X2: + bgx->lmac_count = 2; + bgx->lmac_type = BGX_MODE_RXAUI; + bgx->lane_to_sds = 0xE4; + break; + case QLM_MODE_XFI_4X1: + bgx->lmac_count = 4; + bgx->lmac_type = BGX_MODE_XFI; + bgx->lane_to_sds = 0; + break; + case QLM_MODE_XLAUI_1X4: + bgx->lmac_count = 1; + bgx->lmac_type = BGX_MODE_XLAUI; + bgx->lane_to_sds = 0xE4; + break; + case QLM_MODE_10G_KR_4X1: + bgx->lmac_count = 4; + bgx->lmac_type = BGX_MODE_10G_KR; + bgx->lane_to_sds = 0; + bgx->use_training = 1; + break; + case QLM_MODE_40G_KR4_1X4: + bgx->lmac_count = 1; + bgx->lmac_type = BGX_MODE_40G_KR; + bgx->lane_to_sds = 0xE4; + bgx->use_training = 1; + break; + default: + bgx->lmac_count = 0; + break; + } + + /* Check if low level firmware has programmed LMAC count + * based on board type, if yes consider that otherwise + * the default static values + */ + lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; + if (lmac_count != 4) + bgx->lmac_count = lmac_count; +} + +static void bgx_init_hw(struct bgx *bgx) +{ + int i; + + bgx_set_num_ports(bgx); + + bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); + if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) + dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id); + + /* Set lmac type and lane2serdes mapping */ + for (i = 0; i < bgx->lmac_count; i++) { + if (bgx->lmac_type == BGX_MODE_RXAUI) { + if (i) + bgx->lane_to_sds = 0x0e; + else + bgx->lane_to_sds = 0x04; + bgx_reg_write(bgx, i, BGX_CMRX_CFG, + (bgx->lmac_type << 8) | bgx->lane_to_sds); + continue; + } + bgx_reg_write(bgx, i, BGX_CMRX_CFG, + (bgx->lmac_type << 8) | (bgx->lane_to_sds + i)); + bgx->lmac[i].lmacid_bd = lmac_count; + lmac_count++; + } + + bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); + bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); + + /* Set the backpressure AND mask */ + for (i = 0; i < bgx->lmac_count; i++) + bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, + ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << + (i * MAX_BGX_CHANS_PER_LMAC)); + + /* Disable all MAC filtering */ + for (i = 0; i < RX_DMAC_COUNT; i++) + bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); + + /* Disable MAC steering (NCSI traffic) */ + for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) + bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); +} + +static void bgx_get_qlm_mode(struct bgx *bgx) +{ + struct device *dev = &bgx->pdev->dev; + int lmac_type; + int train_en; + + /* Read LMAC0 type to figure out QLM mode + * This is configured by low level firmware + */ + lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); + lmac_type = (lmac_type >> 8) & 0x07; + + train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) & + SPU_PMD_CRTL_TRAIN_EN; + + switch (lmac_type) { + case BGX_MODE_SGMII: + bgx->qlm_mode = QLM_MODE_SGMII; + dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id); + break; + case BGX_MODE_XAUI: + bgx->qlm_mode = QLM_MODE_XAUI_1X4; + dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id); + break; + case BGX_MODE_RXAUI: + bgx->qlm_mode = QLM_MODE_RXAUI_2X2; + dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id); + break; + case BGX_MODE_XFI: + if (!train_en) { + bgx->qlm_mode = QLM_MODE_XFI_4X1; + dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id); + } else { + bgx->qlm_mode = QLM_MODE_10G_KR_4X1; + dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id); + } + break; + case BGX_MODE_XLAUI: + if (!train_en) { + bgx->qlm_mode = QLM_MODE_XLAUI_1X4; + dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id); + } else { + bgx->qlm_mode = QLM_MODE_40G_KR4_1X4; + dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id); + } + break; + default: + bgx->qlm_mode = QLM_MODE_SGMII; + dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id); + } +} + +static void bgx_init_of(struct bgx *bgx, struct device_node *np) +{ + struct device_node *np_child; + u8 lmac = 0; + + for_each_child_of_node(np, np_child) { + struct device_node *phy_np; + const char *mac; + + phy_np = of_parse_phandle(np_child, "phy-handle", 0); + if (phy_np) + bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); + + mac = of_get_mac_address(np_child); + if (mac) + ether_addr_copy(bgx->lmac[lmac].mac, mac); + + SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); + bgx->lmac[lmac].lmacid = lmac; + lmac++; + if (lmac == MAX_LMAC_PER_BGX) + break; + } +} + +static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err; + struct device *dev = &pdev->dev; + struct bgx *bgx = NULL; + struct device_node *np; + char bgx_sel[5]; + u8 lmac; + + bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); + if (!bgx) + return -ENOMEM; + bgx->pdev = pdev; + + pci_set_drvdata(pdev, bgx); + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + /* MAP configuration registers */ + bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!bgx->reg_base) { + dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; + bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX; + + bgx_vnic[bgx->bgx_id] = bgx; + bgx_get_qlm_mode(bgx); + + snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); + np = of_find_node_by_name(NULL, bgx_sel); + if (np) + bgx_init_of(bgx, np); + + bgx_init_hw(bgx); + + /* Enable all LMACs */ + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { + err = bgx_lmac_enable(bgx, lmac); + if (err) { + dev_err(dev, "BGX%d failed to enable lmac%d\n", + bgx->bgx_id, lmac); + goto err_enable; + } + } + + return 0; + +err_enable: + bgx_vnic[bgx->bgx_id] = NULL; +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void bgx_remove(struct pci_dev *pdev) +{ + struct bgx *bgx = pci_get_drvdata(pdev); + u8 lmac; + + /* Disable all LMACs */ + for (lmac = 0; lmac < bgx->lmac_count; lmac++) + bgx_lmac_disable(bgx, lmac); + + bgx_vnic[bgx->bgx_id] = NULL; + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver bgx_driver = { + .name = DRV_NAME, + .id_table = bgx_id_table, + .probe = bgx_probe, + .remove = bgx_remove, +}; + +static int __init bgx_init_module(void) +{ + pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); + + return pci_register_driver(&bgx_driver); +} + +static void __exit bgx_cleanup_module(void) +{ + pci_unregister_driver(&bgx_driver); +} + +module_init(bgx_init_module); +module_exit(bgx_cleanup_module); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h new file mode 100644 index 000000000000..ba4f53b7cc2c --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef THUNDER_BGX_H +#define THUNDER_BGX_H + +#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */ +#define MAX_BGX_PER_CN88XX 2 +#define MAX_LMAC_PER_BGX 4 +#define MAX_BGX_CHANS_PER_LMAC 16 +#define MAX_DMAC_PER_LMAC 8 +#define MAX_FRAME_SIZE 9216 + +#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 + +#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX) + +/* Registers */ +#define BGX_CMRX_CFG 0x00 +#define CMR_PKT_TX_EN BIT_ULL(13) +#define CMR_PKT_RX_EN BIT_ULL(14) +#define CMR_EN BIT_ULL(15) +#define BGX_CMR_GLOBAL_CFG 0x08 +#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6) +#define BGX_CMRX_RX_ID_MAP 0x60 +#define BGX_CMRX_RX_STAT0 0x70 +#define BGX_CMRX_RX_STAT1 0x78 +#define BGX_CMRX_RX_STAT2 0x80 +#define BGX_CMRX_RX_STAT3 0x88 +#define BGX_CMRX_RX_STAT4 0x90 +#define BGX_CMRX_RX_STAT5 0x98 +#define BGX_CMRX_RX_STAT6 0xA0 +#define BGX_CMRX_RX_STAT7 0xA8 +#define BGX_CMRX_RX_STAT8 0xB0 +#define BGX_CMRX_RX_STAT9 0xB8 +#define BGX_CMRX_RX_STAT10 0xC0 +#define BGX_CMRX_RX_BP_DROP 0xC8 +#define BGX_CMRX_RX_DMAC_CTL 0x0E8 +#define BGX_CMR_RX_DMACX_CAM 0x200 +#define RX_DMACX_CAM_EN BIT_ULL(48) +#define RX_DMACX_CAM_LMACID(x) (x << 49) +#define RX_DMAC_COUNT 32 +#define BGX_CMR_RX_STREERING 0x300 +#define RX_TRAFFIC_STEER_RULE_COUNT 8 +#define BGX_CMR_CHAN_MSK_AND 0x450 +#define BGX_CMR_BIST_STATUS 0x460 +#define BGX_CMR_RX_LMACS 0x468 +#define BGX_CMRX_TX_STAT0 0x600 +#define BGX_CMRX_TX_STAT1 0x608 +#define BGX_CMRX_TX_STAT2 0x610 +#define BGX_CMRX_TX_STAT3 0x618 +#define BGX_CMRX_TX_STAT4 0x620 +#define BGX_CMRX_TX_STAT5 0x628 +#define BGX_CMRX_TX_STAT6 0x630 +#define BGX_CMRX_TX_STAT7 0x638 +#define BGX_CMRX_TX_STAT8 0x640 +#define BGX_CMRX_TX_STAT9 0x648 +#define BGX_CMRX_TX_STAT10 0x650 +#define BGX_CMRX_TX_STAT11 0x658 +#define BGX_CMRX_TX_STAT12 0x660 +#define BGX_CMRX_TX_STAT13 0x668 +#define BGX_CMRX_TX_STAT14 0x670 +#define BGX_CMRX_TX_STAT15 0x678 +#define BGX_CMRX_TX_STAT16 0x680 +#define BGX_CMRX_TX_STAT17 0x688 +#define BGX_CMR_TX_LMACS 0x1000 + +#define BGX_SPUX_CONTROL1 0x10000 +#define SPU_CTL_LOW_POWER BIT_ULL(11) +#define SPU_CTL_RESET BIT_ULL(15) +#define BGX_SPUX_STATUS1 0x10008 +#define SPU_STATUS1_RCV_LNK BIT_ULL(2) +#define BGX_SPUX_STATUS2 0x10020 +#define SPU_STATUS2_RCVFLT BIT_ULL(10) +#define BGX_SPUX_BX_STATUS 0x10028 +#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12) +#define BGX_SPUX_BR_STATUS1 0x10030 +#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0) +#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12) +#define BGX_SPUX_BR_PMD_CRTL 0x10068 +#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1) +#define BGX_SPUX_BR_PMD_LP_CUP 0x10078 +#define BGX_SPUX_BR_PMD_LD_CUP 0x10088 +#define BGX_SPUX_BR_PMD_LD_REP 0x10090 +#define BGX_SPUX_FEC_CONTROL 0x100A0 +#define SPU_FEC_CTL_FEC_EN BIT_ULL(0) +#define SPU_FEC_CTL_ERR_EN BIT_ULL(1) +#define BGX_SPUX_AN_CONTROL 0x100C8 +#define SPU_AN_CTL_AN_EN BIT_ULL(12) +#define SPU_AN_CTL_XNP_EN BIT_ULL(13) +#define BGX_SPUX_AN_ADV 0x100D8 +#define BGX_SPUX_MISC_CONTROL 0x10218 +#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10) +#define SPU_MISC_CTL_RX_DIS BIT_ULL(12) +#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */ +#define BGX_SPUX_INT_W1S 0x10228 +#define BGX_SPUX_INT_ENA_W1C 0x10230 +#define BGX_SPUX_INT_ENA_W1S 0x10238 +#define BGX_SPU_DBG_CONTROL 0x10300 +#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18) +#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29) + +#define BGX_SMUX_RX_INT 0x20000 +#define BGX_SMUX_RX_JABBER 0x20030 +#define BGX_SMUX_RX_CTL 0x20048 +#define SMU_RX_CTL_STATUS (3ull << 0) +#define BGX_SMUX_TX_APPEND 0x20100 +#define SMU_TX_APPEND_FCS_D BIT_ULL(2) +#define BGX_SMUX_TX_MIN_PKT 0x20118 +#define BGX_SMUX_TX_INT 0x20140 +#define BGX_SMUX_TX_CTL 0x20178 +#define SMU_TX_CTL_DIC_EN BIT_ULL(0) +#define SMU_TX_CTL_UNI_EN BIT_ULL(1) +#define SMU_TX_CTL_LNK_STATUS (3ull << 4) +#define BGX_SMUX_TX_THRESH 0x20180 +#define BGX_SMUX_CTL 0x20200 +#define SMU_CTL_RX_IDLE BIT_ULL(0) +#define SMU_CTL_TX_IDLE BIT_ULL(1) + +#define BGX_GMP_PCS_MRX_CTL 0x30000 +#define PCS_MRX_CTL_RST_AN BIT_ULL(9) +#define PCS_MRX_CTL_PWR_DN BIT_ULL(11) +#define PCS_MRX_CTL_AN_EN BIT_ULL(12) +#define PCS_MRX_CTL_RESET BIT_ULL(15) +#define BGX_GMP_PCS_MRX_STATUS 0x30008 +#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) +#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 +#define BGX_GMP_PCS_SGM_AN_ADV 0x30068 +#define BGX_GMP_PCS_MISCX_CTL 0x30078 +#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) +#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full +#define BGX_GMP_GMI_PRTX_CFG 0x38020 +#define GMI_PORT_CFG_SPEED BIT_ULL(1) +#define GMI_PORT_CFG_DUPLEX BIT_ULL(2) +#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3) +#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) +#define BGX_GMP_GMI_RXX_JABBER 0x38038 +#define BGX_GMP_GMI_TXX_THRESH 0x38210 +#define BGX_GMP_GMI_TXX_APPEND 0x38218 +#define BGX_GMP_GMI_TXX_SLOT 0x38220 +#define BGX_GMP_GMI_TXX_BURST 0x38228 +#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240 +#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300 + +#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */ +#define BGX_MSIX_VEC_0_29_CTL 0x400008 +#define BGX_MSIX_PBA_0 0x4F0000 + +/* MSI-X interrupts */ +#define BGX_MSIX_VECTORS 30 +#define BGX_LMAC_VEC_OFFSET 7 +#define BGX_MSIX_VEC_SHIFT 4 + +#define CMRX_INT 0 +#define SPUX_INT 1 +#define SMUX_RX_INT 2 +#define SMUX_TX_INT 3 +#define GMPX_PCS_INT 4 +#define GMPX_GMI_RX_INT 5 +#define GMPX_GMI_TX_INT 6 +#define CMR_MEM_INT 28 +#define SPU_MEM_INT 29 + +#define LMAC_INTR_LINK_UP BIT(0) +#define LMAC_INTR_LINK_DOWN BIT(1) + +/* RX_DMAC_CTL configuration*/ +enum MCAST_MODE { + MCAST_MODE_REJECT, + MCAST_MODE_ACCEPT, + MCAST_MODE_CAM_FILTER, + RSVD +}; + +#define BCAST_ACCEPT 1 +#define CAM_ACCEPT 1 + +void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); +unsigned bgx_get_map(int node); +int bgx_get_lmac_count(int node, int bgx); +const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid); +void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac); +void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); +u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx); +u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx); +#define BGX_RX_STATS_COUNT 11 +#define BGX_TX_STATS_COUNT 18 + +struct bgx_stats { + u64 rx_stats[BGX_RX_STATS_COUNT]; + u64 tx_stats[BGX_TX_STATS_COUNT]; +}; + +enum LMAC_TYPE { + BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */ + BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */ + BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */ + BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */ + BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */ + BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */ + BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */ + BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */ +}; + +enum qlm_mode { + QLM_MODE_SGMII, /* SGMII, each lane independent */ + QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */ + QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */ + QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */ + QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */ + QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */ + QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */ +}; + +#endif /* THUNDER_BGX_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 3c109d115365..4d627a8f04b0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -46,17 +46,19 @@ #include #include #include +#include #include #include "cxgb4_uld.h" #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) enum { - MAX_NPORTS = 4, /* max # of ports */ - SERNUM_LEN = 24, /* Serial # length */ - EC_LEN = 16, /* E/C length */ - ID_LEN = 16, /* ID length */ - PN_LEN = 16, /* Part Number length */ + MAX_NPORTS = 4, /* max # of ports */ + SERNUM_LEN = 24, /* Serial # length */ + EC_LEN = 16, /* E/C length */ + ID_LEN = 16, /* ID length */ + PN_LEN = 16, /* Part Number length */ + MACADDR_LEN = 12, /* MAC Address length */ }; enum { @@ -198,23 +200,45 @@ struct lb_port_stats { }; struct tp_tcp_stats { - u32 tcpOutRsts; - u64 tcpInSegs; - u64 tcpOutSegs; - u64 tcpRetransSegs; + u32 tcp_out_rsts; + u64 tcp_in_segs; + u64 tcp_out_segs; + u64 tcp_retrans_segs; +}; + +struct tp_usm_stats { + u32 frames; + u32 drops; + u64 octets; +}; + +struct tp_fcoe_stats { + u32 frames_ddp; + u32 frames_drop; + u64 octets_ddp; }; struct tp_err_stats { - u32 macInErrs[4]; - u32 hdrInErrs[4]; - u32 tcpInErrs[4]; - u32 tnlCongDrops[4]; - u32 ofldChanDrops[4]; - u32 tnlTxDrops[4]; - u32 ofldVlanDrops[4]; - u32 tcp6InErrs[4]; - u32 ofldNoNeigh; - u32 ofldCongDefer; + u32 mac_in_errs[4]; + u32 hdr_in_errs[4]; + u32 tcp_in_errs[4]; + u32 tnl_cong_drops[4]; + u32 ofld_chan_drops[4]; + u32 tnl_tx_drops[4]; + u32 ofld_vlan_drops[4]; + u32 tcp6_in_errs[4]; + u32 ofld_no_neigh; + u32 ofld_cong_defer; +}; + +struct tp_cpl_stats { + u32 req[4]; + u32 rsp[4]; +}; + +struct tp_rdma_stats { + u32 rqe_dfr_pkt; + u32 rqe_dfr_mod; }; struct sge_params { @@ -224,7 +248,6 @@ struct sge_params { }; struct tp_params { - unsigned int ntxchan; /* # of Tx channels */ unsigned int tre; /* log2 of core clocks per TP tick */ unsigned int la_mask; /* what events are recorded by TP LA */ unsigned short tx_modq_map; /* TX modulation scheduler queue to */ @@ -259,6 +282,7 @@ struct vpd_params { u8 sn[SERNUM_LEN + 1]; u8 id[ID_LEN + 1]; u8 pn[PN_LEN + 1]; + u8 na[MACADDR_LEN + 1]; }; struct pci_params { @@ -273,6 +297,7 @@ struct pci_params { #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 +#define CHELSIO_T6 0x6 enum chip_type { T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), @@ -284,6 +309,10 @@ enum chip_type { T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), T5_FIRST_REV = T5_A0, T5_LAST_REV = T5_A1, + + T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0), + T6_FIRST_REV = T6_A0, + T6_LAST_REV = T6_A0, }; struct devlog_params { @@ -292,6 +321,15 @@ struct devlog_params { u32 size; /* size of log */ }; +/* Stores chip specific parameters */ +struct arch_specific_params { + u8 nchan; + u16 mps_rplc_size; + u16 vfcount; + u32 sge_fl_db; + u16 mps_tcam_size; +}; + struct adapter_params { struct sge_params sge; struct tp_params tp; @@ -317,6 +355,7 @@ struct adapter_params { unsigned char nports; /* # of ethernet ports */ unsigned char portvec; enum chip_type chip; /* chip code */ + struct arch_specific_params arch; /* chip specific params */ unsigned char offload; unsigned char bypass; @@ -432,6 +471,7 @@ struct port_info { u8 rss_mode; struct link_config link_cfg; u16 *rss; + struct port_stats stats_base; #ifdef CONFIG_CHELSIO_T4_DCB struct port_dcb_info dcb; /* Data Center Bridging support */ #endif @@ -650,6 +690,7 @@ struct sge { struct sge_rspq **ingr_map; /* qid->queue ingress queue map */ unsigned long *starving_fl; unsigned long *txq_maperr; + unsigned long *blocked_fl; struct timer_list rx_timer; /* refills starving FLs */ struct timer_list tx_timer; /* checks Tx queues */ }; @@ -671,6 +712,12 @@ struct l2t_data; #endif +struct doorbell_stats { + u32 db_drop; + u32 db_empty; + u32 db_full; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -678,7 +725,7 @@ struct adapter { struct pci_dev *pdev; struct device *pdev_dev; unsigned int mbox; - unsigned int fn; + unsigned int pf; unsigned int flags; enum chip_type chip; @@ -688,13 +735,12 @@ struct adapter { struct cxgb4_virt_res vres; unsigned int swintr; - unsigned int wol; - struct { unsigned short vec; char desc[IFNAMSIZ + 10]; } msix_info[MAX_INGQ + 1]; + struct doorbell_stats db_stats; struct sge sge; struct net_device *port[MAX_NPORTS]; @@ -849,6 +895,16 @@ enum { VLAN_REWRITE }; +static inline int is_offload(const struct adapter *adap) +{ + return adap->params.offload; +} + +static inline int is_t6(enum chip_type chip) +{ + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6; +} + static inline int is_t5(enum chip_type chip) { return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; @@ -892,6 +948,22 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) writeq(val, adap->regs + reg_addr); } +/** + * t4_set_hw_addr - store a port's MAC address in SW + * @adapter: the adapter + * @port_idx: the port index + * @hw_addr: the Ethernet address + * + * Store the Ethernet address of the given port in SW. Called by the common + * code when it retrieves a port's Ethernet address from EEPROM. + */ +static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx, + u8 hw_addr[]) +{ + ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr); + ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr); +} + /** * netdev2pinfo - return the port_info structure associated with a net_device * @dev: the netdev @@ -1176,7 +1248,7 @@ void t4_intr_disable(struct adapter *adapter); int t4_slow_intr_handler(struct adapter *adapter); int t4_wait_dev_ready(void __iomem *regs); -int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, +int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc); int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); @@ -1198,7 +1270,8 @@ unsigned int t4_get_regs_len(struct adapter *adapter); void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); int t4_seeprom_wp(struct adapter *adapter, bool enable); -int get_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); @@ -1220,7 +1293,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, int t4_prep_adapter(struct adapter *adapter); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; -int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, +int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, enum t4_bar2_qtype qtype, u64 *pbar2_qoffset, @@ -1267,13 +1340,23 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr); void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres); const char *t4_get_port_type_description(enum fw_port_type port_type); void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4_get_port_stats_offset(struct adapter *adap, int idx, + struct port_stats *stats, + struct port_stats *offset); +void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]); void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val); void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr); +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); +void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st); +void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st); +void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st); void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6); +void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, + struct tp_fcoe_stats *st); void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta); @@ -1315,6 +1398,9 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, unsigned int *rss_size); +int t4_free_vi(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int viid); int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok); @@ -1344,6 +1430,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); +int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); void t4_db_full(struct adapter *adapter); void t4_db_dropped(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 371f75e782e5..3719807efddd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1084,41 +1084,89 @@ static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) static int mps_tcam_show(struct seq_file *seq, void *v) { - if (v == SEQ_START_TOKEN) - seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF" - " VF Replication " - "P0 P1 P2 P3 ML\n"); - else { + struct adapter *adap = seq->private; + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); + + if (v == SEQ_START_TOKEN) { + if (adap->params.arch.mps_rplc_size > 128) + seq_puts(seq, "Idx Ethernet address Mask " + "Vld Ports PF VF " + "Replication " + " P0 P1 P2 P3 ML\n"); + else + seq_puts(seq, "Idx Ethernet address Mask " + "Vld Ports PF VF Replication" + " P0 P1 P2 P3 ML\n"); + } else { u64 mask; u8 addr[ETH_ALEN]; - struct adapter *adap = seq->private; + bool replicate; unsigned int idx = (uintptr_t)v - 2; - u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx)); - u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx)); - u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx)); - u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx)); - u32 rplc[4] = {0, 0, 0, 0}; + u64 tcamy, tcamx, val; + u32 cls_lo, cls_hi, ctl; + u32 rplc[8] = {0}; + + if (chip_ver > CHELSIO_T5) { + /* CtlCmdType - 0: Read, 1: Write + * CtlTcamSel - 0: TCAM0, 1: TCAM1 + * CtlXYBitSel- 0: Y bit, 1: X bit + */ + + /* Read tcamy */ + ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0); + if (idx < 256) + ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0); + else + ctl |= CTLTCAMINDEX_V(idx - 256) | + CTLTCAMSEL_V(1); + t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); + val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); + tcamy = DMACH_G(val) << 32; + tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A); + + /* Read tcamx. Change the control param */ + ctl |= CTLXYBITSEL_V(1); + t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); + val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); + tcamx = DMACH_G(val) << 32; + tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A); + } else { + tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx)); + tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx)); + } + + cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx)); + cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx)); if (tcamx & tcamy) { seq_printf(seq, "%3u -\n", idx); goto out; } - if (cls_lo & REPLICATE_F) { + rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0; + if (chip_ver > CHELSIO_T5) + replicate = (cls_lo & T6_REPLICATE_F); + else + replicate = (cls_lo & REPLICATE_F); + + if (replicate) { struct fw_ldst_cmd ldst_cmd; int ret; + struct fw_ldst_mps_rplc mps_rplc; + u32 ldst_addrspc; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); + ldst_addrspc = + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS); ldst_cmd.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | FW_CMD_READ_F | - FW_LDST_CMD_ADDRSPACE_V( - FW_LDST_ADDRSPC_MPS)); + ldst_addrspc); ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); - ldst_cmd.u.mps.fid_ctl = + ldst_cmd.u.mps.rplc.fid_idx = htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) | - FW_LDST_CMD_CTL_V(idx)); + FW_LDST_CMD_IDX_V(idx)); ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); if (ret) @@ -1126,30 +1174,69 @@ static int mps_tcam_show(struct seq_file *seq, void *v) "replication map for idx %d: %d\n", idx, -ret); else { - rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0); - rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32); - rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64); - rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96); + mps_rplc = ldst_cmd.u.mps.rplc; + rplc[0] = ntohl(mps_rplc.rplc31_0); + rplc[1] = ntohl(mps_rplc.rplc63_32); + rplc[2] = ntohl(mps_rplc.rplc95_64); + rplc[3] = ntohl(mps_rplc.rplc127_96); + if (adap->params.arch.mps_rplc_size > 128) { + rplc[4] = ntohl(mps_rplc.rplc159_128); + rplc[5] = ntohl(mps_rplc.rplc191_160); + rplc[6] = ntohl(mps_rplc.rplc223_192); + rplc[7] = ntohl(mps_rplc.rplc255_224); + } } } tcamxy2valmask(tcamx, tcamy, addr, &mask); - seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx" - "%3c %#x%4u%4d", - idx, addr[0], addr[1], addr[2], addr[3], addr[4], - addr[5], (unsigned long long)mask, - (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi), - PF_G(cls_lo), - (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1); - if (cls_lo & REPLICATE_F) - seq_printf(seq, " %08x %08x %08x %08x", - rplc[3], rplc[2], rplc[1], rplc[0]); + if (chip_ver > CHELSIO_T5) + seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " + "%012llx%3c %#x%4u%4d", + idx, addr[0], addr[1], addr[2], addr[3], + addr[4], addr[5], (unsigned long long)mask, + (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', + PORTMAP_G(cls_hi), + T6_PF_G(cls_lo), + (cls_lo & T6_VF_VALID_F) ? + T6_VF_G(cls_lo) : -1); else - seq_printf(seq, "%36c", ' '); - seq_printf(seq, "%4u%3u%3u%3u %#x\n", - SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo), - SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo), - (cls_lo >> MULTILISTEN0_S) & 0xf); + seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " + "%012llx%3c %#x%4u%4d", + idx, addr[0], addr[1], addr[2], addr[3], + addr[4], addr[5], (unsigned long long)mask, + (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', + PORTMAP_G(cls_hi), + PF_G(cls_lo), + (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1); + + if (replicate) { + if (adap->params.arch.mps_rplc_size > 128) + seq_printf(seq, " %08x %08x %08x %08x " + "%08x %08x %08x %08x", + rplc[7], rplc[6], rplc[5], rplc[4], + rplc[3], rplc[2], rplc[1], rplc[0]); + else + seq_printf(seq, " %08x %08x %08x %08x", + rplc[3], rplc[2], rplc[1], rplc[0]); + } else { + if (adap->params.arch.mps_rplc_size > 128) + seq_printf(seq, "%72c", ' '); + else + seq_printf(seq, "%36c", ' '); + } + + if (chip_ver > CHELSIO_T5) + seq_printf(seq, "%4u%3u%3u%3u %#x\n", + T6_SRAM_PRIO0_G(cls_lo), + T6_SRAM_PRIO1_G(cls_lo), + T6_SRAM_PRIO2_G(cls_lo), + T6_SRAM_PRIO3_G(cls_lo), + (cls_lo >> T6_MULTILISTEN0_S) & 0xf); + else + seq_printf(seq, "%4u%3u%3u%3u %#x\n", + SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo), + SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo), + (cls_lo >> MULTILISTEN0_S) & 0xf); } out: return 0; } @@ -1222,7 +1309,7 @@ static int sensors_show(struct seq_file *seq, void *v) param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD)); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, param, val); if (ret < 0 || val[0] == 0) @@ -1416,6 +1503,9 @@ static int rss_config_show(struct seq_file *seq, void *v) seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf)); if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf)); + else + seq_printf(seq, " VfWrAddr: %3d\n", + T6_VFWRADDR_G(rssconf)); seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]); seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F)); seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F)); @@ -1634,14 +1724,14 @@ static int rss_vf_config_open(struct inode *inode, struct file *file) struct adapter *adapter = inode->i_private; struct seq_tab *p; struct rss_vf_conf *vfconf; - int vf; + int vf, vfcount = adapter->params.arch.vfcount; - p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show); + p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show); if (!p) return -ENOMEM; vfconf = (struct rss_vf_conf *)p->data; - for (vf = 0; vf < 128; vf++) { + for (vf = 0; vf < vfcount; vf++) { t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl, &vfconf[vf].rss_vf_vfh); } @@ -1959,6 +2049,61 @@ static void add_debugfs_mem(struct adapter *adap, const char *name, size_mb << 20); } +static int blocked_fl_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int len; + const struct adapter *adap = filp->private_data; + char *buf; + ssize_t size = (adap->sge.egr_sz + 3) / 4 + + adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */ + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = snprintf(buf, size - 1, "%*pb\n", + adap->sge.egr_sz, adap->sge.blocked_fl); + len += sprintf(buf + len, "\n"); + size = simple_read_from_buffer(ubuf, count, ppos, buf, len); + t4_free_mem(buf); + return size; +} + +static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + int err; + unsigned long *t; + struct adapter *adap = filp->private_data; + + t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL); + if (!t) + return -ENOMEM; + + err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); + if (err) + return err; + + bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); + t4_free_mem(t); + return count; +} + +static const struct file_operations blocked_fl_fops = { + .owner = THIS_MODULE, + .open = blocked_fl_open, + .read = blocked_fl_read, + .write = blocked_fl_write, + .llseek = generic_file_llseek, +}; + /* Add an array of Debug FS files. */ void add_debugfs_files(struct adapter *adap, @@ -1978,7 +2123,7 @@ void add_debugfs_files(struct adapter *adap, int t4_setup_debugfs(struct adapter *adap) { int i; - u32 size; + u32 size = 0; struct dentry *de; static struct t4_debugfs_entry t4_debugfs_files[] = { @@ -2022,6 +2167,7 @@ int t4_setup_debugfs(struct adapter *adap) #if IS_ENABLED(CONFIG_IPV6) { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 }, #endif + { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, }; /* Debug FS nodes common to all T5 and later adapters. @@ -2048,12 +2194,7 @@ int t4_setup_debugfs(struct adapter *adap) size = t4_read_reg(adap, MA_EDRAM1_BAR_A); add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size)); } - if (is_t4(adap->params.chip)) { - size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); - if (i & EXT_MEM_ENABLE_F) - add_debugfs_mem(adap, "mc", MEM_MC, - EXT_MEM_SIZE_G(size)); - } else { + if (is_t5(adap->params.chip)) { if (i & EXT_MEM0_ENABLE_F) { size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); add_debugfs_mem(adap, "mc0", MEM_MC0, @@ -2064,6 +2205,11 @@ int t4_setup_debugfs(struct adapter *adap) add_debugfs_mem(adap, "mc1", MEM_MC1, EXT_MEM1_SIZE_G(size)); } + } else { + if (i & EXT_MEM_ENABLE_F) + size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_G(size)); } de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 401272a2691e..687acf71fa15 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -108,15 +108,82 @@ static const char stats_strings[][ETH_GSTRING_LEN] = { "VLANinsertions ", "GROpackets ", "GROmerged ", - "WriteCoalSuccess ", - "WriteCoalFail ", +}; + +static char adapter_stats_strings[][ETH_GSTRING_LEN] = { + "db_drop ", + "db_full ", + "db_empty ", + "tcp_ipv4_out_rsts ", + "tcp_ipv4_in_segs ", + "tcp_ipv4_out_segs ", + "tcp_ipv4_retrans_segs ", + "tcp_ipv6_out_rsts ", + "tcp_ipv6_in_segs ", + "tcp_ipv6_out_segs ", + "tcp_ipv6_retrans_segs ", + "usm_ddp_frames ", + "usm_ddp_octets ", + "usm_ddp_drops ", + "rdma_no_rqe_mod_defer ", + "rdma_no_rqe_pkt_defer ", + "tp_err_ofld_no_neigh ", + "tp_err_ofld_cong_defer ", + "write_coal_success ", + "write_coal_fail ", +}; + +static char channel_stats_strings[][ETH_GSTRING_LEN] = { + "--------Channel--------- ", + "tp_cpl_requests ", + "tp_cpl_responses ", + "tp_mac_in_errs ", + "tp_hdr_in_errs ", + "tp_tcp_in_errs ", + "tp_tcp6_in_errs ", + "tp_tnl_cong_drops ", + "tp_tnl_tx_drops ", + "tp_ofld_vlan_drops ", + "tp_ofld_chan_drops ", + "fcoe_octets_ddp ", + "fcoe_frames_ddp ", + "fcoe_frames_drop ", +}; + +static char loopback_stats_strings[][ETH_GSTRING_LEN] = { + "-------Loopback----------- ", + "octets_ok ", + "frames_ok ", + "bcast_frames ", + "mcast_frames ", + "ucast_frames ", + "error_frames ", + "frames_64 ", + "frames_65_to_127 ", + "frames_128_to_255 ", + "frames_256_to_511 ", + "frames_512_to_1023 ", + "frames_1024_to_1518 ", + "frames_1519_to_max ", + "frames_dropped ", + "bg0_frames_dropped ", + "bg1_frames_dropped ", + "bg2_frames_dropped ", + "bg3_frames_dropped ", + "bg0_frames_trunc ", + "bg1_frames_trunc ", + "bg2_frames_trunc ", + "bg3_frames_trunc ", }; static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(stats_strings); + return ARRAY_SIZE(stats_strings) + + ARRAY_SIZE(adapter_stats_strings) + + ARRAY_SIZE(channel_stats_strings) + + ARRAY_SIZE(loopback_stats_strings); default: return -EOPNOTSUPP; } @@ -168,8 +235,18 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) static void get_strings(struct net_device *dev, u32 stringset, u8 *data) { - if (stringset == ETH_SS_STATS) + if (stringset == ETH_SS_STATS) { memcpy(data, stats_strings, sizeof(stats_strings)); + data += sizeof(stats_strings); + memcpy(data, adapter_stats_strings, + sizeof(adapter_stats_strings)); + data += sizeof(adapter_stats_strings); + memcpy(data, channel_stats_strings, + sizeof(channel_stats_strings)); + data += sizeof(channel_stats_strings); + memcpy(data, loopback_stats_strings, + sizeof(loopback_stats_strings)); + } } /* port stats maintained per queue of the port. They should be in the same @@ -185,6 +262,45 @@ struct queue_port_stats { u64 gro_merged; }; +struct adapter_stats { + u64 db_drop; + u64 db_full; + u64 db_empty; + u64 tcp_v4_out_rsts; + u64 tcp_v4_in_segs; + u64 tcp_v4_out_segs; + u64 tcp_v4_retrans_segs; + u64 tcp_v6_out_rsts; + u64 tcp_v6_in_segs; + u64 tcp_v6_out_segs; + u64 tcp_v6_retrans_segs; + u64 frames; + u64 octets; + u64 drops; + u64 rqe_dfr_mod; + u64 rqe_dfr_pkt; + u64 ofld_no_neigh; + u64 ofld_cong_defer; + u64 wc_success; + u64 wc_fail; +}; + +struct channel_stats { + u64 cpl_req; + u64 cpl_rsp; + u64 mac_in_errs; + u64 hdr_in_errs; + u64 tcp_in_errs; + u64 tcp6_in_errs; + u64 tnl_cong_drops; + u64 tnl_tx_drops; + u64 ofld_vlan_drops; + u64 ofld_chan_drops; + u64 octets_ddp; + u64 frames_ddp; + u64 frames_drop; +}; + static void collect_sge_port_stats(const struct adapter *adap, const struct port_info *p, struct queue_port_stats *s) @@ -205,30 +321,121 @@ static void collect_sge_port_stats(const struct adapter *adap, } } +static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) +{ + struct tp_tcp_stats v4, v6; + struct tp_rdma_stats rdma_stats; + struct tp_err_stats err_stats; + struct tp_usm_stats usm_stats; + u64 val1, val2; + + memset(s, 0, sizeof(*s)); + + spin_lock(&adap->stats_lock); + t4_tp_get_tcp_stats(adap, &v4, &v6); + t4_tp_get_rdma_stats(adap, &rdma_stats); + t4_get_usm_stats(adap, &usm_stats); + t4_tp_get_err_stats(adap, &err_stats); + spin_unlock(&adap->stats_lock); + + s->db_drop = adap->db_stats.db_drop; + s->db_full = adap->db_stats.db_full; + s->db_empty = adap->db_stats.db_empty; + + s->tcp_v4_out_rsts = v4.tcp_out_rsts; + s->tcp_v4_in_segs = v4.tcp_in_segs; + s->tcp_v4_out_segs = v4.tcp_out_segs; + s->tcp_v4_retrans_segs = v4.tcp_retrans_segs; + s->tcp_v6_out_rsts = v6.tcp_out_rsts; + s->tcp_v6_in_segs = v6.tcp_in_segs; + s->tcp_v6_out_segs = v6.tcp_out_segs; + s->tcp_v6_retrans_segs = v6.tcp_retrans_segs; + + if (is_offload(adap)) { + s->frames = usm_stats.frames; + s->octets = usm_stats.octets; + s->drops = usm_stats.drops; + s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod; + s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt; + } + + s->ofld_no_neigh = err_stats.ofld_no_neigh; + s->ofld_cong_defer = err_stats.ofld_cong_defer; + + if (!is_t4(adap->params.chip)) { + int v; + + v = t4_read_reg(adap, SGE_STAT_CFG_A); + if (STATSOURCE_T5_G(v) == 7) { + val2 = t4_read_reg(adap, SGE_STAT_MATCH_A); + val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A); + s->wc_success = val1 - val2; + s->wc_fail = val2; + } + } +} + +static void collect_channel_stats(struct adapter *adap, struct channel_stats *s, + u8 i) +{ + struct tp_cpl_stats cpl_stats; + struct tp_err_stats err_stats; + struct tp_fcoe_stats fcoe_stats; + + memset(s, 0, sizeof(*s)); + + spin_lock(&adap->stats_lock); + t4_tp_get_cpl_stats(adap, &cpl_stats); + t4_tp_get_err_stats(adap, &err_stats); + t4_get_fcoe_stats(adap, i, &fcoe_stats); + spin_unlock(&adap->stats_lock); + + s->cpl_req = cpl_stats.req[i]; + s->cpl_rsp = cpl_stats.rsp[i]; + s->mac_in_errs = err_stats.mac_in_errs[i]; + s->hdr_in_errs = err_stats.hdr_in_errs[i]; + s->tcp_in_errs = err_stats.tcp_in_errs[i]; + s->tcp6_in_errs = err_stats.tcp6_in_errs[i]; + s->tnl_cong_drops = err_stats.tnl_cong_drops[i]; + s->tnl_tx_drops = err_stats.tnl_tx_drops[i]; + s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i]; + s->ofld_chan_drops = err_stats.ofld_chan_drops[i]; + s->octets_ddp = fcoe_stats.octets_ddp; + s->frames_ddp = fcoe_stats.frames_ddp; + s->frames_drop = fcoe_stats.frames_drop; +} + static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - u32 val1, val2; + struct lb_port_stats s; + int i; + u64 *p0; - t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); + t4_get_port_stats_offset(adapter, pi->tx_chan, + (struct port_stats *)data, + &pi->stats_base); data += sizeof(struct port_stats) / sizeof(u64); collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); data += sizeof(struct queue_port_stats) / sizeof(u64); - if (!is_t4(adapter->params.chip)) { - t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7)); - val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A); - val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A); - *data = val1 - val2; - data++; - *data = val2; - data++; - } else { - memset(data, 0, 2 * sizeof(u64)); - *data += 2; - } + collect_adapter_stats(adapter, (struct adapter_stats *)data); + data += sizeof(struct adapter_stats) / sizeof(u64); + + *data++ = (u64)pi->port_id; + collect_channel_stats(adapter, (struct channel_stats *)data, + pi->port_id); + data += sizeof(struct channel_stats) / sizeof(u64); + + *data++ = (u64)pi->port_id; + memset(&s, 0, sizeof(s)); + t4_get_lb_stats(adapter, pi->port_id, &s); + + p0 = &s.octets; + for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++) + *data++ = (unsigned long long)*p0++; } static void get_regs(struct net_device *dev, struct ethtool_regs *regs, @@ -250,7 +457,7 @@ static int restart_autoneg(struct net_device *dev) return -EAGAIN; if (p->link_cfg.autoneg != AUTONEG_ENABLE) return -EINVAL; - t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan); + t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan); return 0; } @@ -267,7 +474,7 @@ static int identify_port(struct net_device *dev, else return -EINVAL; - return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); + return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val); } static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps) @@ -439,7 +646,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) lc->autoneg = cmd->autoneg; if (netif_running(dev)) - return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan, lc); return 0; } @@ -472,7 +679,7 @@ static int set_pauseparam(struct net_device *dev, if (epause->tx_pause) lc->requested_fc |= PAUSE_TX; if (netif_running(dev)) - return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan, lc); return 0; } @@ -617,7 +824,7 @@ static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) */ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) { - int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); + int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); if (vaddr >= 0) vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); @@ -626,7 +833,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) { - int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); + int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); if (vaddr >= 0) vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); @@ -669,8 +876,8 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, aligned_offset = eeprom->offset & ~3; aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; - if (adapter->fn > 0) { - u32 start = 1024 + adapter->fn * EEPROMPFSIZE; + if (adapter->pf > 0) { + u32 start = 1024 + adapter->pf * EEPROMPFSIZE; if (aligned_offset < start || aligned_offset + aligned_len > start + EEPROMPFSIZE) @@ -740,37 +947,6 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) return ret; } -#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) -#define BCAST_CRC 0xa0ccc1a6 - -static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - wol->supported = WAKE_BCAST | WAKE_MAGIC; - wol->wolopts = netdev2adap(dev)->wol; - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - int err = 0; - struct port_info *pi = netdev_priv(dev); - - if (wol->wolopts & ~WOL_SUPPORTED) - return -EINVAL; - t4_wol_magic_enable(pi->adapter, pi->tx_chan, - (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); - if (wol->wolopts & WAKE_BCAST) { - err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, - ~0ULL, 0, false); - if (!err) - err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, - ~6ULL, ~0ULL, BCAST_CRC, true); - } else { - t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); - } - return err; -} - static u32 get_rss_table_size(struct net_device *dev) { const struct port_info *pi = netdev_priv(dev); @@ -900,8 +1076,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, - .get_wol = get_wol, - .set_wol = set_wol, .get_rxnfc = get_rxnfc, .get_rxfh_indir_size = get_rss_table_size, .get_rxfh = get_rss_table, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 73ac153795b6..0e27f2266e6b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -135,8 +135,10 @@ struct filter_entry { #define FW4_FNAME "cxgb4/t4fw.bin" #define FW5_FNAME "cxgb4/t5fw.bin" +#define FW6_FNAME "cxgb4/t6fw.bin" #define FW4_CFNAME "cxgb4/t4-config.txt" #define FW5_CFNAME "cxgb4/t5-config.txt" +#define FW6_CFNAME "cxgb4/t6-config.txt" #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld" #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin" #define PHY_AQ1202_DEVICEID 0x4409 @@ -322,7 +324,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) * level") we need to issue the Set Parameters Commannd * without sleeping (timeout < 0). */ - err = t4_set_params_timeout(adap, adap->mbox, adap->fn, 0, 1, + err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, &name, &value, -FW_CMD_MAX_TIMEOUT); @@ -387,7 +389,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) int uc_cnt = netdev_uc_count(dev); int mc_cnt = netdev_mc_count(dev); const struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->fn; + unsigned int mb = pi->adapter->pf; /* first do the secondary unicast addresses */ netdev_for_each_uc_addr(ha, dev) { @@ -444,7 +446,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) ret = set_addr_filters(dev, sleep_ok); if (ret == 0) - ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu, + ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, sleep_ok); @@ -461,7 +463,7 @@ static int link_start(struct net_device *dev) { int ret; struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->fn; + unsigned int mb = pi->adapter->pf; /* * We do not set address filters and promiscuity here, the stack does @@ -479,7 +481,7 @@ static int link_start(struct net_device *dev) } } if (ret == 0) - ret = t4_link_start(pi->adapter, mb, pi->tx_chan, + ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, &pi->link_cfg); if (ret == 0) { local_bh_disable(); @@ -879,7 +881,7 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) for (i = 0; i < pi->rss_size; i++, queues++) rss[i] = rxq[*queues].rspq.abs_id; - err = t4_config_rss_range(adapter, adapter->fn, pi->viid, 0, + err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, pi->rss_size, rss, pi->rss_size); /* If Tunnel All Lookup isn't specified in the global RSS * Configuration, then we need to specify a default Ingress @@ -1351,11 +1353,6 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, return fallback(dev, skb) % dev->real_num_tx_queues; } -static inline int is_offload(const struct adapter *adap) -{ - return adap->params.offload; -} - static int closest_timer(const struct sge *s, int time) { int i, delta, match = 0, min_delta = INT_MAX; @@ -1416,8 +1413,8 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, FW_PARAMS_PARAM_X_V( FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | FW_PARAMS_PARAM_YZ_V(q->cntxt_id); - err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, - &new_idx); + err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, + &v, &new_idx); if (err) return err; } @@ -1438,7 +1435,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features) if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) return 0; - err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, + err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1, -1, -1, -1, !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); if (unlikely(err)) @@ -1721,7 +1718,7 @@ static int tid_init(struct tid_info *t) bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); /* Reserve stid 0 for T4/T5 adapters */ if (!t->stid_base && - (is_t4(adap->params.chip) || is_t5(adap->params.chip))) + (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)) __set_bit(0, t->stid_bmap); return 0; @@ -2010,11 +2007,8 @@ EXPORT_SYMBOL(cxgb4_iscsi_init); int cxgb4_flush_eq_cache(struct net_device *dev) { struct adapter *adap = netdev2adap(dev); - int ret; - ret = t4_fwaddrspace_write(adap, adap->mbox, - 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000); - return ret; + return t4_sge_ctxt_flush(adap, adap->mbox); } EXPORT_SYMBOL(cxgb4_flush_eq_cache); @@ -2069,25 +2063,6 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, } EXPORT_SYMBOL(cxgb4_sync_txq_pidx); -void cxgb4_disable_db_coalescing(struct net_device *dev) -{ - struct adapter *adap; - - adap = netdev2adap(dev); - t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, - NOCOALESCE_F); -} -EXPORT_SYMBOL(cxgb4_disable_db_coalescing); - -void cxgb4_enable_db_coalescing(struct net_device *dev) -{ - struct adapter *adap; - - adap = netdev2adap(dev); - t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0); -} -EXPORT_SYMBOL(cxgb4_enable_db_coalescing); - int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) { struct adapter *adap; @@ -2127,10 +2102,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) if (offset < mc0_end) { memtype = MEM_MC0; memaddr = offset - edc1_end; - } else if (is_t4(adap->params.chip)) { - /* T4 only has a single memory channel */ - goto err; - } else { + } else if (is_t5(adap->params.chip)) { size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); mc1_size = EXT_MEM1_SIZE_G(size) << 20; mc1_end = mc0_end + mc1_size; @@ -2141,6 +2113,9 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) /* offset beyond the end of any memory */ goto err; } + } else { + /* T4/T6 only has a single memory channel */ + goto err; } } @@ -2175,7 +2150,7 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev, u64 *pbar2_qoffset, unsigned int *pbar2_qid) { - return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev), + return t4_bar2_sge_qregs(netdev2adap(dev), qid, (qtype == CXGB4_BAR2_QTYPE_EGRESS ? T4_BAR2_QTYPE_EGRESS @@ -2305,9 +2280,13 @@ static void process_db_full(struct work_struct *work) drain_db_fifo(adap, dbfifo_drain_delay); enable_dbs(adap); notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); - t4_set_reg_field(adap, SGE_INT_ENABLE3_A, - DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, - DBFIFO_HP_INT_F | DBFIFO_LP_INT_F); + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + t4_set_reg_field(adap, SGE_INT_ENABLE3_A, + DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, + DBFIFO_HP_INT_F | DBFIFO_LP_INT_F); + else + t4_set_reg_field(adap, SGE_INT_ENABLE3_A, + DBFIFO_LP_INT_F, DBFIFO_LP_INT_F); } static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) @@ -2369,7 +2348,7 @@ static void process_db_drop(struct work_struct *work) drain_db_fifo(adap, dbfifo_drain_delay); enable_dbs(adap); notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); - } else { + } else if (is_t5(adap->params.chip)) { u32 dropped_db = t4_read_reg(adap, 0x010ac); u16 qid = (dropped_db >> 15) & 0x1ffff; u16 pidx_inc = dropped_db & 0x1fff; @@ -2377,7 +2356,7 @@ static void process_db_drop(struct work_struct *work) unsigned int bar2_qid; int ret; - ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, + ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, &bar2_qoffset, &bar2_qid); if (ret) dev_err(adap->pdev_dev, "doorbell drop recovery: " @@ -2390,7 +2369,8 @@ static void process_db_drop(struct work_struct *work) t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); } - t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); } void t4_db_full(struct adapter *adap) @@ -2420,7 +2400,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) unsigned short i; lli.pdev = adap->pdev; - lli.pf = adap->fn; + lli.pf = adap->pf; lli.l2t = adap->l2t; lli.tids = &adap->tids; lli.ports = adap->port; @@ -2757,7 +2737,7 @@ static int cxgb_close(struct net_device *dev) netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); + return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false); } /* Return an error number if the indicated filter isn't writable ... @@ -2901,7 +2881,8 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, spin_unlock(&adapter->stats_lock); return ns; } - t4_get_port_stats(adapter, p->tx_chan, &stats); + t4_get_port_stats_offset(adapter, p->tx_chan, &stats, + &p->stats_base); spin_unlock(&adapter->stats_lock); ns->tx_bytes = stats.tx_octets; @@ -2960,7 +2941,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) } else return -EINVAL; - mbox = pi->adapter->fn; + mbox = pi->adapter->pf; if (cmd == SIOCGMIIREG) ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, data->reg_num, &data->val_out); @@ -2987,7 +2968,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ return -EINVAL; - ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1, + ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1, -1, -1, -1, true); if (!ret) dev->mtu = new_mtu; @@ -3003,7 +2984,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid, + ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid, pi->xact_addr_filt, addr->sa_data, true, true); if (ret < 0) return ret; @@ -3100,7 +3081,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST_F | FW_CMD_READ_F); c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); - ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); + ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); if (ret < 0) return ret; @@ -3116,18 +3097,18 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) } c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F); - ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); + ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); if (ret < 0) return ret; - ret = t4_config_glbl_rss(adap, adap->fn, + ret = t4_config_glbl_rss(adap, adap->pf, FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F); if (ret < 0) return ret; - ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64, + ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); if (ret < 0) @@ -3171,7 +3152,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) } /* get basic stuff going */ - return t4_early_init(adap, adap->fn); + return t4_early_init(adap, adap->pf); } /* @@ -3409,6 +3390,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) case CHELSIO_T5: fw_config_file = FW5_CFNAME; break; + case CHELSIO_T6: + fw_config_file = FW6_CFNAME; + break; default: dev_err(adapter->pdev_dev, "Device %d is not supported\n", adapter->pdev->device); @@ -3434,7 +3418,7 @@ static int adap_init0_config(struct adapter *adapter, int reset) params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); ret = t4_query_params(adapter, adapter->mbox, - adapter->fn, 0, 1, params, val); + adapter->pf, 0, 1, params, val); if (ret == 0) { /* * For t4_memory_rw() below addresses and @@ -3605,7 +3589,24 @@ static struct fw_info fw_info_array[] = { .intfver_iscsi = FW_INTFVER(T5, ISCSI), .intfver_fcoe = FW_INTFVER(T5, FCOE), }, + }, { + .chip = CHELSIO_T6, + .fs_name = FW6_CFNAME, + .fw_mod_name = FW6_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T6, + .fw_ver = __cpu_to_be32(FW_VERSION(T6)), + .intfver_nic = FW_INTFVER(T6, NIC), + .intfver_vnic = FW_INTFVER(T6, VNIC), + .intfver_ofld = FW_INTFVER(T6, OFLD), + .intfver_ri = FW_INTFVER(T6, RI), + .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), + .intfver_iscsi = FW_INTFVER(T6, ISCSI), + .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), + .intfver_fcoe = FW_INTFVER(T6, FCOE), + }, } + }; static struct fw_info *find_fw_info(int chip) @@ -3711,7 +3712,7 @@ static int adap_init0(struct adapter *adap) * the firmware. On the other hand, we need these fairly early on * so we do this right after getting ahold of the firmware. */ - ret = get_vpd_params(adap, &adap->params.vpd); + ret = t4_get_vpd_params(adap, &adap->params.vpd); if (ret < 0) goto bye; @@ -3723,7 +3724,7 @@ static int adap_init0(struct adapter *adap) v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); if (ret < 0) goto bye; @@ -3746,7 +3747,7 @@ static int adap_init0(struct adapter *adap) */ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); /* If the firmware doesn't support Configuration Files, @@ -3805,7 +3806,7 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(FILTER_START); params[4] = FW_PARAM_PFVF(FILTER_END); params[5] = FW_PARAM_PFVF(IQFLINT_START); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); if (ret < 0) goto bye; adap->sge.egr_start = val[0]; @@ -3823,7 +3824,7 @@ static int adap_init0(struct adapter *adap) */ params[0] = FW_PARAM_PFVF(EQ_END); params[1] = FW_PARAM_PFVF(IQFLINT_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); if (ret < 0) goto bye; adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; @@ -3844,7 +3845,7 @@ static int adap_init0(struct adapter *adap) } /* Allocate the memory for the vaious egress queue bitmaps - * ie starving_fl and txq_maperr. + * ie starving_fl, txq_maperr and blocked_fl. */ adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL); @@ -3860,9 +3861,18 @@ static int adap_init0(struct adapter *adap) goto bye; } +#ifdef CONFIG_DEBUG_FS + adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), + sizeof(long), GFP_KERNEL); + if (!adap->sge.blocked_fl) { + ret = -ENOMEM; + goto bye; + } +#endif + params[0] = FW_PARAM_PFVF(CLIP_START); params[1] = FW_PARAM_PFVF(CLIP_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); if (ret < 0) goto bye; adap->clipt_start = val[0]; @@ -3871,7 +3881,7 @@ static int adap_init0(struct adapter *adap) /* query params related to active filter region */ params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); /* If Active filter size is set we enable establishing * offload connection through firmware work request */ @@ -3888,7 +3898,7 @@ static int adap_init0(struct adapter *adap) */ params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); val[0] = 1; - (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val); + (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); /* * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL @@ -3900,7 +3910,7 @@ static int adap_init0(struct adapter *adap) adap->params.ulptx_memwrite_dsgl = false; } else { params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); } @@ -3926,7 +3936,7 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(TDDP_START); params[4] = FW_PARAM_PFVF(TDDP_END); params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); if (ret < 0) goto bye; @@ -3964,7 +3974,7 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(RQ_END); params[4] = FW_PARAM_PFVF(PBL_START); params[5] = FW_PARAM_PFVF(PBL_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); if (ret < 0) goto bye; @@ -3981,7 +3991,7 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(CQ_END); params[4] = FW_PARAM_PFVF(OCQ_START); params[5] = FW_PARAM_PFVF(OCQ_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); if (ret < 0) goto bye; @@ -3994,7 +4004,7 @@ static int adap_init0(struct adapter *adap) params[0] = FW_PARAM_DEV(MAXORDIRD_QP); params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); if (ret < 0) { adap->params.max_ordird_qp = 8; @@ -4012,7 +4022,7 @@ static int adap_init0(struct adapter *adap) if (caps_cmd.iscsicaps) { params[0] = FW_PARAM_PFVF(ISCSI_START); params[1] = FW_PARAM_PFVF(ISCSI_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); if (ret < 0) goto bye; @@ -4058,8 +4068,8 @@ static int adap_init0(struct adapter *adap) adap->params.b_wnd); } t4_init_sge_params(adap); - t4_init_tp_params(adap); adap->flags |= FW_OK; + t4_init_tp_params(adap); return 0; /* @@ -4072,6 +4082,9 @@ static int adap_init0(struct adapter *adap) kfree(adap->sge.ingr_map); kfree(adap->sge.starving_fl); kfree(adap->sge.txq_maperr); +#ifdef CONFIG_DEBUG_FS + kfree(adap->sge.blocked_fl); +#endif if (ret != -ETIMEDOUT && ret != -EIO) t4_fw_bye(adap, adap->mbox); return ret; @@ -4139,7 +4152,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) if (t4_wait_dev_ready(adap->regs) < 0) return PCI_ERS_RESULT_DISCONNECT; - if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) + if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) return PCI_ERS_RESULT_DISCONNECT; adap->flags |= FW_OK; if (adap_init1(adap, &c)) @@ -4148,7 +4161,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) for_each_port(adap, i) { struct port_info *p = adap2pinfo(adap, i); - ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1, + ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1, NULL, NULL); if (ret < 0) return PCI_ERS_RESULT_DISCONNECT; @@ -4515,15 +4528,23 @@ static void free_some_resources(struct adapter *adapter) kfree(adapter->sge.ingr_map); kfree(adapter->sge.starving_fl); kfree(adapter->sge.txq_maperr); +#ifdef CONFIG_DEBUG_FS + kfree(adapter->sge.blocked_fl); +#endif disable_msi(adapter); for_each_port(adapter, i) if (adapter->port[i]) { + struct port_info *pi = adap2pinfo(adapter, i); + + if (pi->viid != 0) + t4_free_vi(adapter, adapter->mbox, adapter->pf, + 0, pi->viid); kfree(adap2pinfo(adapter, i)->rss); free_netdev(adapter->port[i]); } if (adapter->flags & FW_OK) - t4_fw_bye(adapter, adapter->fn); + t4_fw_bye(adapter, adapter->pf); } #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) @@ -4614,7 +4635,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; adapter->mbox = func; - adapter->fn = func; + adapter->pf = func; adapter->msg_enable = dflt_msg_enable; memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); @@ -4634,7 +4655,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!is_t4(adapter->params.chip)) { s_qpp = (QUEUESPERPAGEPF0_S + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * - adapter->fn); + adapter->pf); qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp); num_seg = PAGE_SIZE / SEGMENT_SIZE; @@ -4657,10 +4678,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto out_free_adapter; } + t4_write_reg(adapter, SGE_STAT_CFG_A, + STATSOURCE_T5_V(7) | STATMODE_V(0)); } setup_memwin(adapter); err = adap_init0(adapter); +#ifdef CONFIG_DEBUG_FS + bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); +#endif setup_memwin_rdma(adapter); if (err) goto out_unmap_bar; @@ -4709,10 +4735,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = t4_port_init(adapter, func, func, 0); if (err) goto out_free_dev; + } else if (adapter->params.nports == 1) { + /* If we don't have a connection to the firmware -- possibly + * because of an error -- grab the raw VPD parameters so we + * can set the proper MAC Address on the debug network + * interface that we've created. + */ + u8 hw_addr[ETH_ALEN]; + u8 *na = adapter->params.vpd.na; + + err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd); + if (!err) { + for (i = 0; i < ETH_ALEN; i++) + hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 + + hex2val(na[2 * i + 1])); + t4_set_hw_addr(adapter, 0, hw_addr); + } } - /* - * Configure queues and allocate tables now, they can be needed as + /* Configure queues and allocate tables now, they can be needed as * soon as the first register_netdev completes. */ cfg_queues(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index df34293f35e8..14e8110b5dbb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -298,8 +298,6 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, unsigned int skb_len, unsigned int pull_len); int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size); int cxgb4_flush_eq_cache(struct net_device *dev); -void cxgb4_disable_db_coalescing(struct net_device *dev); -void cxgb4_enable_db_coalescing(struct net_device *dev); int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte); u64 cxgb4_read_sge_timestamp(struct net_device *dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index dd18fcb644f9..6b7c37fd0252 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -522,14 +522,13 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) { - u32 val; if (q->pend_cred >= 8) { + u32 val = adap->params.arch.sge_fl_db; + if (is_t4(adap->params.chip)) - val = PIDX_V(q->pend_cred / 8); + val |= PIDX_V(q->pend_cred / 8); else - val = PIDX_T5_V(q->pend_cred / 8) | - DBTYPE_F; - val |= DBPRIO_F; + val |= PIDX_T5_V(q->pend_cred / 8); /* Make sure all memory writes to the Free List queue are * committed before we tell the hardware about them. @@ -588,6 +587,11 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, struct rx_sw_desc *sd = &q->sdesc[q->pidx]; int node; +#ifdef CONFIG_DEBUG_FS + if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) + goto out; +#endif + gfp |= __GFP_NOWARN; node = dev_to_node(adap->pdev_dev); @@ -1029,7 +1033,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, * Figure out what HW csum a packet wants and return the appropriate control * bits. */ -static u64 hwcsum(const struct sk_buff *skb) +static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) { int csum_type; const struct iphdr *iph = ip_hdr(skb); @@ -1060,11 +1064,16 @@ static u64 hwcsum(const struct sk_buff *skb) goto nocsum; } - if (likely(csum_type >= TX_CSUM_TCPIP)) - return TXPKT_CSUM_TYPE_V(csum_type) | - TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) | - TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN); - else { + if (likely(csum_type >= TX_CSUM_TCPIP)) { + u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)); + int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; + + if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) + hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); + else + hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); + return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; + } else { int start = skb_transport_offset(skb); return TXPKT_CSUM_TYPE_V(csum_type) | @@ -1232,9 +1241,15 @@ out_free: dev_kfree_skb_any(skb); else lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); cpl = (void *)(lso + 1); - cntrl = TXPKT_CSUM_TYPE_V(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - TXPKT_IPHDR_LEN_V(l3hdr_len) | - TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); + else + cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + cntrl |= TXPKT_CSUM_TYPE_V(v6 ? + TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN_V(l3hdr_len); q->tso++; q->tx_cso += ssi->gso_segs; } else { @@ -1243,7 +1258,8 @@ out_free: dev_kfree_skb_any(skb); FW_WR_IMMDLEN_V(len)); cpl = (void *)(wr + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) { - cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F; + cntrl = hwcsum(adap->params.chip, skb) | + TXPKT_IPCSUM_DIS_F; q->tx_cso++; } } @@ -1260,7 +1276,7 @@ out_free: dev_kfree_skb_any(skb); cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | - TXPKT_PF_V(adap->fn)); + TXPKT_PF_V(adap->pf)); cpl->pack = htons(0); cpl->len = htons(skb->len); cpl->ctrl1 = cpu_to_be64(cntrl); @@ -2385,7 +2401,7 @@ static void __iomem *bar2_address(struct adapter *adapter, u64 bar2_qoffset; int ret; - ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, + ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid); if (ret) return NULL; @@ -2416,7 +2432,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | - FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0)); + FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | FW_LEN16(c)); c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | @@ -2435,6 +2451,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F); if (fl) { + enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); + /* Allocate the ring for the hardware free list (with space * for its status page) along with the associated software * descriptor ring. The free list size needs to be a multiple @@ -2463,12 +2481,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, FW_IQ_CMD_FL0CONGEN_F); c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) | - FW_IQ_CMD_FL0FBMAX_V(FETCHBURSTMAX_512B_X)); + FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? + FETCHBURSTMAX_512B_X : + FETCHBURSTMAX_256B_X)); c.fl0size = htons(flsz); c.fl0addr = cpu_to_be64(fl->addr); } - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) goto err; @@ -2536,7 +2556,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, CONMCTXT_CNGCHMAP_V(1 << (i << 2)); } } - ret = t4_set_params(adap, adap->mbox, adap->fn, 0, 1, + ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); if (ret) dev_warn(adap->pdev_dev, "Failed to set Congestion" @@ -2601,7 +2621,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | - FW_EQ_ETH_CMD_PFN_V(adap->fn) | + FW_EQ_ETH_CMD_PFN_V(adap->pf) | FW_EQ_ETH_CMD_VFN_V(0)); c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); @@ -2618,7 +2638,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, FW_EQ_ETH_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) { kfree(txq->q.sdesc); txq->q.sdesc = NULL; @@ -2656,7 +2676,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | - FW_EQ_CTRL_CMD_PFN_V(adap->fn) | + FW_EQ_CTRL_CMD_PFN_V(adap->pf) | FW_EQ_CTRL_CMD_VFN_V(0)); c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); @@ -2673,7 +2693,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) { dma_free_coherent(adap->pdev_dev, nentries * sizeof(struct tx_desc), @@ -2711,7 +2731,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | - FW_EQ_OFLD_CMD_PFN_V(adap->fn) | + FW_EQ_OFLD_CMD_PFN_V(adap->pf) | FW_EQ_OFLD_CMD_VFN_V(0)); c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); @@ -2726,7 +2746,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) { kfree(txq->q.sdesc); txq->q.sdesc = NULL; @@ -2765,7 +2785,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; - t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, + t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id, 0xffff); dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, rq->desc, rq->phys_addr); @@ -2820,7 +2840,7 @@ void t4_free_sge_resources(struct adapter *adap) free_rspq_fl(adap, &eq->rspq, eq->fl.size ? &eq->fl : NULL); if (etq->q.desc) { - t4_eth_eq_free(adap, adap->fn, adap->fn, 0, + t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, etq->q.cntxt_id); free_tx_desc(adap, &etq->q, etq->q.in_use, true); kfree(etq->q.sdesc); @@ -2839,7 +2859,7 @@ void t4_free_sge_resources(struct adapter *adap) if (q->q.desc) { tasklet_kill(&q->qresume_tsk); - t4_ofld_eq_free(adap, adap->fn, adap->fn, 0, + t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, q->q.cntxt_id); free_tx_desc(adap, &q->q, q->q.in_use, false); kfree(q->q.sdesc); @@ -2854,7 +2874,7 @@ void t4_free_sge_resources(struct adapter *adap) if (cq->q.desc) { tasklet_kill(&cq->qresume_tsk); - t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0, + t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, cq->q.cntxt_id); __skb_queue_purge(&cq->sendq); free_txq(adap, &cq->q); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 7b92f0f86205..fdda0f8c5a19 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -150,7 +150,12 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, */ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) { - u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg); + u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + req |= ENABLE_F; + else + req |= T6_ENABLE_F; if (is_t4(adap->params.chip)) req |= LOCALCFG_F; @@ -381,9 +386,8 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, /* Offset into the region of memory which is being accessed * MEM_EDC0 = 0 * MEM_EDC1 = 1 - * MEM_MC = 2 -- T4 - * MEM_MC0 = 2 -- For T5 - * MEM_MC1 = 3 -- For T5 + * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller + * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) */ edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); if (mtype != MEM_MC1) @@ -412,7 +416,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X; if (is_t4(adap->params.chip)) mem_base -= adap->t4_bar0; - win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn); + win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf); /* Calculate our initial PCI-E Memory Window Position and Offset into * that Window. @@ -547,7 +551,7 @@ u32 t4_read_pcie_cfg4(struct adapter *adap, int reg) ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1); ldst_cmd.u.pcie.ctrl_to_fn = - (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn)); + (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf)); ldst_cmd.u.pcie.r = reg; /* If the LDST Command succeeds, return the result, otherwise @@ -634,6 +638,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter) return T4_REGMAP_SIZE; case CHELSIO_T5: + case CHELSIO_T6: return T5_REGMAP_SIZE; } @@ -1316,6 +1321,344 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x51300, 0x51308, }; + static const unsigned int t6_reg_ranges[] = { + 0x1008, 0x114c, + 0x1180, 0x11b4, + 0x11fc, 0x1250, + 0x1280, 0x133c, + 0x1800, 0x18fc, + 0x3000, 0x302c, + 0x3060, 0x30d8, + 0x30e0, 0x30fc, + 0x3140, 0x357c, + 0x35a8, 0x35cc, + 0x35ec, 0x35ec, + 0x3600, 0x5624, + 0x56cc, 0x575c, + 0x580c, 0x5814, + 0x5890, 0x58bc, + 0x5940, 0x595c, + 0x5980, 0x598c, + 0x59b0, 0x59dc, + 0x59fc, 0x5a18, + 0x5a60, 0x5a6c, + 0x5a80, 0x5a9c, + 0x5b94, 0x5bfc, + 0x5c10, 0x5ec0, + 0x5ec8, 0x5ec8, + 0x6000, 0x6040, + 0x6058, 0x6154, + 0x7700, 0x7798, + 0x77c0, 0x7880, + 0x78cc, 0x78fc, + 0x7b00, 0x7c54, + 0x7d00, 0x7efc, + 0x8dc0, 0x8de0, + 0x8df8, 0x8e84, + 0x8ea0, 0x8f88, + 0x8fb8, 0x911c, + 0x9400, 0x9470, + 0x9600, 0x971c, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0xa020, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0xf008, + 0x11000, 0x11014, + 0x11048, 0x11110, + 0x11118, 0x1117c, + 0x11190, 0x11260, + 0x11300, 0x1130c, + 0x12000, 0x1205c, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x192b8, + 0x193f8, 0x19474, + 0x19490, 0x194cc, + 0x194f0, 0x194f8, + 0x19c00, 0x19c80, + 0x19c94, 0x19cbc, + 0x19ce4, 0x19d28, + 0x19d50, 0x19d78, + 0x19d94, 0x19dc8, + 0x19df0, 0x19e10, + 0x19e50, 0x19e6c, + 0x19ea0, 0x19f34, + 0x19f40, 0x19f50, + 0x19f90, 0x19fac, + 0x19fc4, 0x19fe4, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e008, 0x1e00c, + 0x1e040, 0x1e04c, + 0x1e284, 0x1e290, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e408, 0x1e40c, + 0x1e440, 0x1e44c, + 0x1e684, 0x1e690, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e808, 0x1e80c, + 0x1e840, 0x1e84c, + 0x1ea84, 0x1ea90, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec08, 0x1ec0c, + 0x1ec40, 0x1ec4c, + 0x1ee84, 0x1ee90, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f008, 0x1f00c, + 0x1f040, 0x1f04c, + 0x1f284, 0x1f290, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f408, 0x1f40c, + 0x1f440, 0x1f44c, + 0x1f684, 0x1f690, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f808, 0x1f80c, + 0x1f840, 0x1f84c, + 0x1fa84, 0x1fa90, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc08, 0x1fc0c, + 0x1fc40, 0x1fc4c, + 0x1fe84, 0x1fe90, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x30000, 0x30070, + 0x30100, 0x3015c, + 0x30190, 0x301d0, + 0x30200, 0x30318, + 0x30400, 0x3052c, + 0x30540, 0x3061c, + 0x30800, 0x3088c, + 0x308c0, 0x30908, + 0x30910, 0x309b8, + 0x30a00, 0x30a04, + 0x30a0c, 0x30a2c, + 0x30a44, 0x30a50, + 0x30a74, 0x30c24, + 0x30d00, 0x30d3c, + 0x30d44, 0x30d7c, + 0x30de0, 0x30de0, + 0x30e00, 0x30ed4, + 0x30f00, 0x30fa4, + 0x30fc0, 0x30fc4, + 0x31000, 0x31004, + 0x31080, 0x310fc, + 0x31208, 0x31220, + 0x3123c, 0x31254, + 0x31300, 0x31300, + 0x31308, 0x3131c, + 0x31338, 0x3133c, + 0x31380, 0x31380, + 0x31388, 0x313a8, + 0x313b4, 0x313b4, + 0x31400, 0x31420, + 0x31438, 0x3143c, + 0x31480, 0x31480, + 0x314a8, 0x314a8, + 0x314b0, 0x314b4, + 0x314c8, 0x314d4, + 0x31a40, 0x31a4c, + 0x31af0, 0x31b20, + 0x31b38, 0x31b3c, + 0x31b80, 0x31b80, + 0x31ba8, 0x31ba8, + 0x31bb0, 0x31bb4, + 0x31bc8, 0x31bd4, + 0x32140, 0x3218c, + 0x321f0, 0x32200, + 0x32218, 0x32218, + 0x32400, 0x32400, + 0x32408, 0x3241c, + 0x32618, 0x32620, + 0x32664, 0x32664, + 0x326a8, 0x326a8, + 0x326ec, 0x326ec, + 0x32a00, 0x32abc, + 0x32b00, 0x32b78, + 0x32c00, 0x32c00, + 0x32c08, 0x32c3c, + 0x32e00, 0x32e2c, + 0x32f00, 0x32f2c, + 0x33000, 0x330ac, + 0x330c0, 0x331ac, + 0x331c0, 0x332c4, + 0x332e4, 0x333c4, + 0x333e4, 0x334ac, + 0x334c0, 0x335ac, + 0x335c0, 0x336c4, + 0x336e4, 0x337c4, + 0x337e4, 0x337fc, + 0x33814, 0x33814, + 0x33854, 0x33868, + 0x33880, 0x3388c, + 0x338c0, 0x338d0, + 0x338e8, 0x338ec, + 0x33900, 0x339ac, + 0x339c0, 0x33ac4, + 0x33ae4, 0x33b10, + 0x33b24, 0x33b50, + 0x33bf0, 0x33c10, + 0x33c24, 0x33c50, + 0x33cf0, 0x33cfc, + 0x34000, 0x34070, + 0x34100, 0x3415c, + 0x34190, 0x341d0, + 0x34200, 0x34318, + 0x34400, 0x3452c, + 0x34540, 0x3461c, + 0x34800, 0x3488c, + 0x348c0, 0x34908, + 0x34910, 0x349b8, + 0x34a00, 0x34a04, + 0x34a0c, 0x34a2c, + 0x34a44, 0x34a50, + 0x34a74, 0x34c24, + 0x34d00, 0x34d3c, + 0x34d44, 0x34d7c, + 0x34de0, 0x34de0, + 0x34e00, 0x34ed4, + 0x34f00, 0x34fa4, + 0x34fc0, 0x34fc4, + 0x35000, 0x35004, + 0x35080, 0x350fc, + 0x35208, 0x35220, + 0x3523c, 0x35254, + 0x35300, 0x35300, + 0x35308, 0x3531c, + 0x35338, 0x3533c, + 0x35380, 0x35380, + 0x35388, 0x353a8, + 0x353b4, 0x353b4, + 0x35400, 0x35420, + 0x35438, 0x3543c, + 0x35480, 0x35480, + 0x354a8, 0x354a8, + 0x354b0, 0x354b4, + 0x354c8, 0x354d4, + 0x35a40, 0x35a4c, + 0x35af0, 0x35b20, + 0x35b38, 0x35b3c, + 0x35b80, 0x35b80, + 0x35ba8, 0x35ba8, + 0x35bb0, 0x35bb4, + 0x35bc8, 0x35bd4, + 0x36140, 0x3618c, + 0x361f0, 0x36200, + 0x36218, 0x36218, + 0x36400, 0x36400, + 0x36408, 0x3641c, + 0x36618, 0x36620, + 0x36664, 0x36664, + 0x366a8, 0x366a8, + 0x366ec, 0x366ec, + 0x36a00, 0x36abc, + 0x36b00, 0x36b78, + 0x36c00, 0x36c00, + 0x36c08, 0x36c3c, + 0x36e00, 0x36e2c, + 0x36f00, 0x36f2c, + 0x37000, 0x370ac, + 0x370c0, 0x371ac, + 0x371c0, 0x372c4, + 0x372e4, 0x373c4, + 0x373e4, 0x374ac, + 0x374c0, 0x375ac, + 0x375c0, 0x376c4, + 0x376e4, 0x377c4, + 0x377e4, 0x377fc, + 0x37814, 0x37814, + 0x37854, 0x37868, + 0x37880, 0x3788c, + 0x378c0, 0x378d0, + 0x378e8, 0x378ec, + 0x37900, 0x379ac, + 0x379c0, 0x37ac4, + 0x37ae4, 0x37b10, + 0x37b24, 0x37b50, + 0x37bf0, 0x37c10, + 0x37c24, 0x37c50, + 0x37cf0, 0x37cfc, + 0x40040, 0x40040, + 0x40080, 0x40084, + 0x40100, 0x40100, + 0x40140, 0x401bc, + 0x40200, 0x40214, + 0x40228, 0x40228, + 0x40240, 0x40258, + 0x40280, 0x40280, + 0x40304, 0x40304, + 0x40330, 0x4033c, + 0x41304, 0x413dc, + 0x41400, 0x4141c, + 0x41480, 0x414d0, + 0x44000, 0x4407c, + 0x440c0, 0x4427c, + 0x442c0, 0x4447c, + 0x444c0, 0x4467c, + 0x446c0, 0x4487c, + 0x448c0, 0x44a7c, + 0x44ac0, 0x44c7c, + 0x44cc0, 0x44e7c, + 0x44ec0, 0x4507c, + 0x450c0, 0x451fc, + 0x45800, 0x45868, + 0x45880, 0x45884, + 0x458a0, 0x458b0, + 0x45a00, 0x45a68, + 0x45a80, 0x45a84, + 0x45aa0, 0x45ab0, + 0x460c0, 0x460e4, + 0x47000, 0x4708c, + 0x47200, 0x47250, + 0x47400, 0x47420, + 0x47600, 0x47618, + 0x47800, 0x4782c, + 0x50000, 0x500cc, + 0x50400, 0x50400, + 0x50800, 0x508cc, + 0x50c00, 0x50c00, + 0x51000, 0x510b0, + 0x51300, 0x51324, + }; + u32 *buf_end = (u32 *)((char *)buf + buf_size); const unsigned int *reg_ranges; int reg_ranges_size, range; @@ -1335,6 +1678,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); break; + case CHELSIO_T6: + reg_ranges = t6_reg_ranges; + reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); + break; + default: dev_err(adap->pdev_dev, "Unsupported chip version %d\n", chip_version); @@ -1381,17 +1729,16 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable) } /** - * get_vpd_params - read VPD parameters from VPD EEPROM + * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read * @p: where to store the parameters * * Reads card parameters stored in VPD EEPROM. */ -int get_vpd_params(struct adapter *adapter, struct vpd_params *p) +int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) { - u32 cclk_param, cclk_val; - int i, ret, addr; - int ec, sn, pn; + int i, ret = 0, addr; + int ec, sn, pn, na; u8 *vpd, csum; unsigned int vpdr_len, kw_offset, id_len; @@ -1399,6 +1746,9 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) if (!vpd) return -ENOMEM; + /* Card information normally starts at VPD_BASE but early cards had + * it at 0. + */ ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); if (ret < 0) goto out; @@ -1464,6 +1814,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) FIND_VPD_KW(ec, "EC"); FIND_VPD_KW(sn, "SN"); FIND_VPD_KW(pn, "PN"); + FIND_VPD_KW(na, "NA"); #undef FIND_VPD_KW memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); @@ -1476,18 +1827,42 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->pn, vpd + pn, min(i, PN_LEN)); strim(p->pn); + memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); + strim((char *)p->na); - /* - * Ask firmware for the Core Clock since it knows how to translate the +out: + vfree(vpd); + return ret; +} + +/** + * t4_get_vpd_params - read VPD parameters & retrieve Core Clock + * @adapter: adapter to read + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM and retrieves the Core + * Clock. This can only be called after a connection to the firmware + * is established. + */ +int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p) +{ + u32 cclk_param, cclk_val; + int ret; + + /* Grab the raw VPD parameters. + */ + ret = t4_get_raw_vpd_params(adapter, p); + if (ret) + return ret; + + /* Ask firmware for the Core Clock since it knows how to translate the * Reference Clock ('V2') VPD field into a Core Clock value ... */ cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); - ret = t4_query_params(adapter, adapter->mbox, 0, 0, + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, &cclk_param, &cclk_val); -out: - vfree(vpd); if (ret) return ret; p->cclk = cclk_val; @@ -1948,7 +2323,8 @@ static bool t4_fw_matches_chip(const struct adapter *adap, * which will keep us "honest" in the future ... */ if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) || - (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5)) + (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) || + (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6)) return true; dev_err(adap->pdev_dev, @@ -2062,7 +2438,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) | FW_PARAMS_PARAM_Y_V(adap->params.portvec) | FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION)); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); if (ret < 0) return ret; @@ -2134,7 +2510,7 @@ int t4_load_phy_fw(struct adapter *adap, FW_PARAMS_PARAM_Y_V(adap->params.portvec) | FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); val = phy_fw_size; - ret = t4_query_params_rw(adap, adap->mbox, adap->fn, 0, 1, + ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val, 1); if (ret < 0) return ret; @@ -2163,7 +2539,7 @@ int t4_load_phy_fw(struct adapter *adap, FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) | FW_PARAMS_PARAM_Y_V(adap->params.portvec) | FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); - ret = t4_set_params_timeout(adap, adap->mbox, adap->fn, 0, 1, + ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val, 30000); /* If we have version number support, then check to see that the new @@ -2199,7 +2575,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | - FW_PARAMS_CMD_PFN_V(adap->fn) | + FW_PARAMS_CMD_PFN_V(adap->pf) | FW_PARAMS_CMD_VFN_V(0)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.param[0].mnem = @@ -2230,7 +2606,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) FW_PORT_CAP_ANEG) /** - * t4_link_start - apply link configuration to MAC/PHY + * t4_link_l1cfg - apply link configuration to MAC/PHY * @phy: the PHY to setup * @mac: the MAC to setup * @lc: the requested link configuration @@ -2242,7 +2618,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ -int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, +int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; @@ -2488,6 +2864,7 @@ static void tp_intr_handler(struct adapter *adapter) static void sge_intr_handler(struct adapter *adapter) { u64 v; + u32 err; static const struct intr_info sge_intr_info[] = { { ERR_CPL_EXCEED_IQE_SIZE_F, @@ -2496,8 +2873,6 @@ static void sge_intr_handler(struct adapter *adapter) "SGE GTS CIDX increment too large", -1, 0 }, { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full }, - { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full }, - { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped }, { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, "SGE IQID > 1023 received CPL for FL", -1, 0 }, { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, @@ -2510,13 +2885,19 @@ static void sge_intr_handler(struct adapter *adapter) 0 }, { ERR_ING_CTXT_PRIO_F, "SGE too many priority ingress contexts", -1, 0 }, - { ERR_EGR_CTXT_PRIO_F, - "SGE too many priority egress contexts", -1, 0 }, { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, { 0 } }; + static struct intr_info t4t5_sge_intr_info[] = { + { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped }, + { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full }, + { ERR_EGR_CTXT_PRIO_F, + "SGE too many priority egress contexts", -1, 0 }, + { 0 } + }; + v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) | ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32); if (v) { @@ -2526,8 +2907,23 @@ static void sge_intr_handler(struct adapter *adapter) t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32); } - if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) || - v != 0) + v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info); + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, + t4t5_sge_intr_info); + + err = t4_read_reg(adapter, SGE_ERROR_STATS_A); + if (err & ERROR_QID_VALID_F) { + dev_err(adapter->pdev_dev, "SGE error for queue %u\n", + ERROR_QID_G(err)); + if (err & UNCAPTURED_ERROR_F) + dev_err(adapter->pdev_dev, + "SGE UNCAPTURED_ERROR set (clearing)\n"); + t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F | + UNCAPTURED_ERROR_F); + } + + if (v != 0) t4_fatal_err(adapter); } @@ -2700,6 +3096,7 @@ static void cplsw_intr_handler(struct adapter *adapter) */ static void le_intr_handler(struct adapter *adap) { + enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); static const struct intr_info le_intr_info[] = { { LIPMISS_F, "LE LIP miss", -1, 0 }, { LIP0_F, "LE 0 LIP error", -1, 0 }, @@ -2709,7 +3106,18 @@ static void le_intr_handler(struct adapter *adap) { 0 } }; - if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info)) + static struct intr_info t6_le_intr_info[] = { + { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, + { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, + { TCAMINTPERR_F, "LE parity error", -1, 1 }, + { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, + { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, + (chip <= CHELSIO_T5) ? + le_intr_info : t6_le_intr_info)) t4_fatal_err(adap); } @@ -2978,7 +3386,7 @@ int t4_slow_intr_handler(struct adapter *adapter) pcie_intr_handler(adapter); if (cause & MC_F) mem_intr_handler(adapter, MEM_MC); - if (!is_t4(adapter->params.chip) && (cause & MC1_S)) + if (is_t5(adapter->params.chip) && (cause & MC1_F)) mem_intr_handler(adapter, MEM_MC1); if (cause & EDC0_F) mem_intr_handler(adapter, MEM_EDC0); @@ -3024,17 +3432,18 @@ int t4_slow_intr_handler(struct adapter *adapter) */ void t4_intr_enable(struct adapter *adapter) { + u32 val = 0; u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A)); + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F; t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F | ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F | - ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F | + ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F | ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | - ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F | - DBFIFO_HP_INT_F | DBFIFO_LP_INT_F | - EGRESS_SIZE_ERR_F); + DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK); t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf); } @@ -3223,6 +3632,40 @@ int t4_read_rss(struct adapter *adapter, u16 *map) return 0; } +/** + * t4_fw_tp_pio_rw - Access TP PIO through LDST + * @adap: the adapter + * @vals: where the indirect register values are stored/written + * @nregs: how many indirect registers to read/write + * @start_idx: index of first indirect register to read/write + * @rw: Read (1) or Write (0) + * + * Access TP PIO registers through LDST + */ +static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, + unsigned int start_index, unsigned int rw) +{ + int ret, i; + int cmd = FW_LDST_ADDRSPC_TP_PIO; + struct fw_ldst_cmd c; + + for (i = 0 ; i < nregs; i++) { + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + (rw ? FW_CMD_READ_F : + FW_CMD_WRITE_F) | + FW_LDST_CMD_ADDRSPACE_V(cmd)); + c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); + + c.u.addrval.addr = cpu_to_be32(start_index + i); + c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (!ret && rw) + vals[i] = be32_to_cpu(c.u.addrval.val); + } +} + /** * t4_read_rss_key - read the global RSS key * @adap: the adapter @@ -3232,8 +3675,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map) */ void t4_read_rss_key(struct adapter *adap, u32 *key) { - t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, - TP_RSS_SECRET_KEY0_A); + if (adap->flags & FW_OK) + t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1); + else + t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, + TP_RSS_SECRET_KEY0_A); } /** @@ -3248,11 +3694,32 @@ void t4_read_rss_key(struct adapter *adap, u32 *key) */ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) { - t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, - TP_RSS_SECRET_KEY0_A); - if (idx >= 0 && idx < 16) - t4_write_reg(adap, TP_RSS_CONFIG_VRT_A, - KEYWRADDR_V(idx) | KEYWREN_F); + u8 rss_key_addr_cnt = 16; + u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A); + + /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble), + * allows access to key addresses 16-63 by using KeyWrAddrX + * as index[5:4](upper 2) into key table + */ + if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) && + (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3)) + rss_key_addr_cnt = 32; + + if (adap->flags & FW_OK) + t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0); + else + t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, + TP_RSS_SECRET_KEY0_A); + + if (idx >= 0 && idx < rss_key_addr_cnt) { + if (rss_key_addr_cnt > 16) + t4_write_reg(adap, TP_RSS_CONFIG_VRT_A, + KEYWRADDRX_V(idx >> 4) | + T6_VFWRADDR_V(idx) | KEYWREN_F); + else + t4_write_reg(adap, TP_RSS_CONFIG_VRT_A, + KEYWRADDR_V(idx) | KEYWREN_F); + } } /** @@ -3267,8 +3734,12 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp) { - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - valp, 1, TP_RSS_PF0_CONFIG_A + index); + if (adapter->flags & FW_OK) + t4_fw_tp_pio_rw(adapter, valp, 1, + TP_RSS_PF0_CONFIG_A + index, 1); + else + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + valp, 1, TP_RSS_PF0_CONFIG_A + index); } /** @@ -3286,8 +3757,13 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, { u32 vrt, mask, data; - mask = VFWRADDR_V(VFWRADDR_M); - data = VFWRADDR_V(index); + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { + mask = VFWRADDR_V(VFWRADDR_M); + data = VFWRADDR_V(index); + } else { + mask = T6_VFWRADDR_V(T6_VFWRADDR_M); + data = T6_VFWRADDR_V(index); + } /* Request that the index'th VF Table values be read into VFL/VFH. */ @@ -3298,10 +3774,15 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, /* Grab the VFL/VFH values ... */ - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - vfl, 1, TP_RSS_VFL_CONFIG_A); - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - vfh, 1, TP_RSS_VFH_CONFIG_A); + if (adapter->flags & FW_OK) { + t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1); + t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1); + } else { + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + vfl, 1, TP_RSS_VFL_CONFIG_A); + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + vfh, 1, TP_RSS_VFH_CONFIG_A); + } } /** @@ -3314,8 +3795,11 @@ u32 t4_read_rss_pf_map(struct adapter *adapter) { u32 pfmap; - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &pfmap, 1, TP_RSS_PF_MAP_A); + if (adapter->flags & FW_OK) + t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1); + else + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &pfmap, 1, TP_RSS_PF_MAP_A); return pfmap; } @@ -3329,8 +3813,11 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter) { u32 pfmask; - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &pfmask, 1, TP_RSS_PF_MSK_A); + if (adapter->flags & FW_OK) + t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1); + else + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &pfmask, 1, TP_RSS_PF_MSK_A); return pfmask; } @@ -3355,24 +3842,148 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, if (v4) { t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A); - v4->tcpOutRsts = STAT(OUT_RST); - v4->tcpInSegs = STAT64(IN_SEG); - v4->tcpOutSegs = STAT64(OUT_SEG); - v4->tcpRetransSegs = STAT64(RXT_SEG); + v4->tcp_out_rsts = STAT(OUT_RST); + v4->tcp_in_segs = STAT64(IN_SEG); + v4->tcp_out_segs = STAT64(OUT_SEG); + v4->tcp_retrans_segs = STAT64(RXT_SEG); } if (v6) { t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A); - v6->tcpOutRsts = STAT(OUT_RST); - v6->tcpInSegs = STAT64(IN_SEG); - v6->tcpOutSegs = STAT64(OUT_SEG); - v6->tcpRetransSegs = STAT64(RXT_SEG); + v6->tcp_out_rsts = STAT(OUT_RST); + v6->tcp_in_segs = STAT64(IN_SEG); + v6->tcp_out_segs = STAT64(OUT_SEG); + v6->tcp_retrans_segs = STAT64(RXT_SEG); } #undef STAT64 #undef STAT #undef STAT_IDX } +/** + * t4_tp_get_err_stats - read TP's error MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's error counters. + */ +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) +{ + /* T6 and later has 2 channels */ + if (adap->params.arch.nchan == NCHAN) { + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_cong_drops, 8, + TP_MIB_TNL_CNG_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_tx_drops, 4, + TP_MIB_TNL_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_vlan_drops, 4, + TP_MIB_OFD_VLN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp6_in_errs, 4, + TP_MIB_TCP_V6IN_ERR_0_A); + } else { + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_cong_drops, 2, + TP_MIB_TNL_CNG_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_chan_drops, 2, + TP_MIB_OFD_CHN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_vlan_drops, 2, + TP_MIB_OFD_VLN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A); + } + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A); +} + +/** + * t4_tp_get_cpl_stats - read TP's CPL MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's CPL counters. + */ +void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) +{ + /* T6 and later has 2 channels */ + if (adap->params.arch.nchan == NCHAN) { + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, + 8, TP_MIB_CPL_IN_REQ_0_A); + } else { + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, + 2, TP_MIB_CPL_IN_REQ_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp, + 2, TP_MIB_CPL_OUT_RSP_0_A); + } +} + +/** + * t4_tp_get_rdma_stats - read TP's RDMA MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's RDMA counters. + */ +void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) +{ + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt, + 2, TP_MIB_RQE_DFR_PKT_A); +} + +/** + * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port + * @adap: the adapter + * @idx: the port index + * @st: holds the counter values + * + * Returns the values of TP's FCoE counters for the selected port. + */ +void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, + struct tp_fcoe_stats *st) +{ + u32 val[2]; + + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp, + 1, TP_MIB_FCOE_DDP_0_A + idx); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop, + 1, TP_MIB_FCOE_DROP_0_A + idx); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, + 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx); + st->octets_ddp = ((u64)val[0] << 32) | val[1]; +} + +/** + * t4_get_usm_stats - read TP's non-TCP DDP MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's counters for non-TCP directly-placed packets. + */ +void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st) +{ + u32 val[4]; + + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4, + TP_MIB_USM_PKTS_A); + st->frames = val[0]; + st->drops = val[1]; + st->octets = ((u64)val[2] << 32) | val[3]; +} + /** * t4_read_mtu_tbl - returns the values in the HW path MTU table * @adap: the adapter @@ -3629,6 +4240,28 @@ const char *t4_get_port_type_description(enum fw_port_type port_type) return "UNKNOWN"; } +/** + * t4_get_port_stats_offset - collect port stats relative to a previous + * snapshot + * @adap: The adapter + * @idx: The port + * @stats: Current stats to fill + * @offset: Previous stats snapshot + */ +void t4_get_port_stats_offset(struct adapter *adap, int idx, + struct port_stats *stats, + struct port_stats *offset) +{ + u64 *s, *o; + int i; + + t4_get_port_stats(adap, idx, stats); + for (i = 0, s = (u64 *)stats, o = (u64 *)offset; + i < (sizeof(struct port_stats) / sizeof(u64)); + i++, s++, o++) + *s -= *o; +} + /** * t4_get_port_stats - collect port statistics * @adap: the adapter @@ -3713,103 +4346,51 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) } /** - * t4_wol_magic_enable - enable/disable magic packet WoL - * @adap: the adapter - * @port: the physical port index - * @addr: MAC address expected in magic packets, %NULL to disable - * - * Enables/disables magic packet wake-on-LAN for the selected port. - */ -void t4_wol_magic_enable(struct adapter *adap, unsigned int port, - const u8 *addr) -{ - u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; - - if (is_t4(adap->params.chip)) { - mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); - mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); - port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A); - } else { - mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); - mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); - port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A); - } - - if (addr) { - t4_write_reg(adap, mag_id_reg_l, - (addr[2] << 24) | (addr[3] << 16) | - (addr[4] << 8) | addr[5]); - t4_write_reg(adap, mag_id_reg_h, - (addr[0] << 8) | addr[1]); - } - t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F, - addr ? MAGICEN_F : 0); -} - -/** - * t4_wol_pat_enable - enable/disable pattern-based WoL + * t4_get_lb_stats - collect loopback port statistics * @adap: the adapter - * @port: the physical port index - * @map: bitmap of which HW pattern filters to set - * @mask0: byte mask for bytes 0-63 of a packet - * @mask1: byte mask for bytes 64-127 of a packet - * @crc: Ethernet CRC for selected bytes - * @enable: enable/disable switch + * @idx: the loopback port index + * @p: the stats structure to fill * - * Sets the pattern filters indicated in @map to mask out the bytes - * specified in @mask0/@mask1 in received packets and compare the CRC of - * the resulting packet against @crc. If @enable is %true pattern-based - * WoL is enabled, otherwise disabled. + * Return HW statistics for the given loopback port. */ -int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, - u64 mask0, u64 mask1, unsigned int crc, bool enable) +void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) { - int i; - u32 port_cfg_reg; - - if (is_t4(adap->params.chip)) - port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A); - else - port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A); - - if (!enable) { - t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0); - return 0; - } - if (map > 0xff) - return -EINVAL; + u32 bgmap = t4_get_mps_bg_map(adap, idx); -#define EPIO_REG(name) \ +#define GET_STAT(name) \ + t4_read_reg64(adap, \ (is_t4(adap->params.chip) ? \ - PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \ - T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A)) - - t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); - t4_write_reg(adap, EPIO_REG(DATA2), mask1); - t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); - - for (i = 0; i < NWOL_PAT; i++, map >>= 1) { - if (!(map & 1)) - continue; + PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \ + T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))) +#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) - /* write byte masks */ - t4_write_reg(adap, EPIO_REG(DATA0), mask0); - t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F); - t4_read_reg(adap, EPIO_REG(OP)); /* flush */ - if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F) - return -ETIMEDOUT; - - /* write CRC */ - t4_write_reg(adap, EPIO_REG(DATA0), crc); - t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F); - t4_read_reg(adap, EPIO_REG(OP)); /* flush */ - if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F) - return -ETIMEDOUT; - } -#undef EPIO_REG + p->octets = GET_STAT(BYTES); + p->frames = GET_STAT(FRAMES); + p->bcast_frames = GET_STAT(BCAST); + p->mcast_frames = GET_STAT(MCAST); + p->ucast_frames = GET_STAT(UCAST); + p->error_frames = GET_STAT(ERROR); + + p->frames_64 = GET_STAT(64B); + p->frames_65_127 = GET_STAT(65B_127B); + p->frames_128_255 = GET_STAT(128B_255B); + p->frames_256_511 = GET_STAT(256B_511B); + p->frames_512_1023 = GET_STAT(512B_1023B); + p->frames_1024_1518 = GET_STAT(1024B_1518B); + p->frames_1519_max = GET_STAT(1519B_MAX); + p->drop = GET_STAT(DROP_FRAMES); + + p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; + p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; + p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; + p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; + p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; + p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; + p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; + p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; - t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F); - return 0; +#undef GET_STAT +#undef GET_STAT_COM } /* t4_mk_filtdelwr - create a delete filter WR @@ -4030,6 +4611,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state) sge_regs[i], t4_read_reg(adapter, sge_regs[i])); } +/** + * t4_sge_ctxt_flush - flush the SGE context cache + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a FW command through the given mailbox to flush the + * SGE context cache. + */ +int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) +{ + int ret; + u32 ldst_addrspace; + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC); + c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + ldst_addrspace); + c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); + c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + return ret; +} + /** * t4_fw_hello - establish communication with FW * @adap: the adapter @@ -4726,6 +5333,33 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid)); } +/** + * t4_free_vi - free a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @viid: virtual interface identifiler + * + * Free a previously allocated virtual interface. + */ +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid) +{ + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_VI_CMD_PFN_V(pf) | + FW_VI_CMD_VFN_V(vf)); + c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c)); + c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); +} + /** * t4_set_rxmode - set Rx properties of a virtual interface * @adap: the adapter @@ -4798,45 +5432,71 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) { - int i, ret; + int offset, ret = 0; struct fw_vi_mac_cmd c; - struct fw_vi_mac_exact *p; - unsigned int max_naddr = is_t4(adap->params.chip) ? - NUM_MPS_CLS_SRAM_L_INSTANCES : - NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int nfilters = 0; + unsigned int max_naddr = adap->params.arch.mps_tcam_size; + unsigned int rem = naddr; - if (naddr > 7) + if (naddr > max_naddr) return -EINVAL; - memset(&c, 0, sizeof(c)); - c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | - FW_CMD_REQUEST_F | FW_CMD_WRITE_F | - (free ? FW_CMD_EXEC_F : 0) | - FW_VI_MAC_CMD_VIID_V(viid)); - c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | - FW_CMD_LEN16_V((naddr + 2) / 2)); - - for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - p->valid_to_idx = - cpu_to_be16(FW_VI_MAC_CMD_VALID_F | - FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); - memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); - } + for (offset = 0; offset < naddr ; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ? + rem : ARRAY_SIZE(c.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; - ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); - if (ret) - return ret; + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_V(free) | + FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | + FW_CMD_LEN16_V(len16)); + + for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { + p->valid_to_idx = + cpu_to_be16(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V( + FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[offset + i], + sizeof(p->macaddr)); + } - for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); + /* It's okay if we run out of space in our MAC address arena. + * Some of the addresses we submit may get stored so we need + * to run through the reply to see what the results were ... + */ + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); + if (ret && ret != -FW_ENOMEM) + break; + + for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_G( + be16_to_cpu(p->valid_to_idx)); + + if (idx) + idx[offset + i] = (index >= max_naddr ? + 0xffff : index); + if (index < max_naddr) + nfilters++; + else if (hash) + *hash |= (1ULL << + hash_mac_addr(addr[offset + i])); + } - if (idx) - idx[i] = index >= max_naddr ? 0xffff : index; - if (index < max_naddr) - ret++; - else if (hash) - *hash |= (1ULL << hash_mac_addr(addr[i])); + free = false; + offset += fw_naddr; + rem -= fw_naddr; } + + if (ret == 0 || ret == -FW_ENOMEM) + ret = nfilters; return ret; } @@ -4865,9 +5525,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; - unsigned int max_mac_addr = is_t4(adap->params.chip) ? - NUM_MPS_CLS_SRAM_L_INSTANCES : - NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int max_mac_addr = adap->params.arch.mps_tcam_size; if (idx < 0) /* new allocation */ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; @@ -5244,6 +5902,22 @@ static int get_flash_params(struct adapter *adap) return 0; } +static void set_pcie_completion_timeout(struct adapter *adapter, u8 range) +{ + u16 val; + u32 pcie_cap; + + pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); + if (pcie_cap) { + pci_read_config_word(adapter->pdev, + pcie_cap + PCI_EXP_DEVCTL2, &val); + val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT; + val |= range; + pci_write_config_word(adapter->pdev, + pcie_cap + PCI_EXP_DEVCTL2, val); + } +} + /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter @@ -5276,9 +5950,30 @@ int t4_prep_adapter(struct adapter *adapter) switch (ver) { case CHELSIO_T4: adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); + adapter->params.arch.sge_fl_db = DBPRIO_F; + adapter->params.arch.mps_tcam_size = + NUM_MPS_CLS_SRAM_L_INSTANCES; + adapter->params.arch.mps_rplc_size = 128; + adapter->params.arch.nchan = NCHAN; + adapter->params.arch.vfcount = 128; break; case CHELSIO_T5: adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); + adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + adapter->params.arch.mps_rplc_size = 128; + adapter->params.arch.nchan = NCHAN; + adapter->params.arch.vfcount = 128; + break; + case CHELSIO_T6: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); + adapter->params.arch.sge_fl_db = 0; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + adapter->params.arch.mps_rplc_size = 256; + adapter->params.arch.nchan = 2; + adapter->params.arch.vfcount = 256; break; default: dev_err(adapter->pdev_dev, "Device %d is not supported\n", @@ -5295,11 +5990,14 @@ int t4_prep_adapter(struct adapter *adapter) adapter->params.nports = 1; adapter->params.portvec = 1; adapter->params.vpd.cclk = 50000; + + /* Set pci completion timeout value to 4 seconds. */ + set_pcie_completion_timeout(adapter, 0xd); return 0; } /** - * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * t4_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter * @qid: the Queue ID * @qtype: the Ingress or Egress type for @qid @@ -5323,7 +6021,7 @@ int t4_prep_adapter(struct adapter *adapter) * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, * then these "Inferred Queue ID" register may not be used. */ -int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, +int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, enum t4_bar2_qtype qtype, u64 *pbar2_qoffset, @@ -5355,7 +6053,7 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, * o The BAR2 Queue ID. * o The BAR2 Queue ID Offset into the BAR2 page. */ - bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); bar2_qid = qid & qpp_mask; bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; @@ -5457,13 +6155,13 @@ int t4_init_sge_params(struct adapter *adapter) */ hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A); s_hps = (HOSTPAGESIZEPF0_S + - (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn); + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf); sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M); /* Extract the SGE Egress and Ingess Queues Per Page for our PF. */ s_qpp = (QUEUESPERPAGEPF0_S + - (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn); + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf); qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A); sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M); qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A); @@ -5494,12 +6192,19 @@ int t4_init_tp_params(struct adapter *adap) /* Cache the adapter's Compressed Filter Mode and global Incress * Configuration. */ - t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &adap->params.tp.vlan_pri_map, 1, - TP_VLAN_PRI_MAP_A); - t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &adap->params.tp.ingress_config, 1, - TP_INGRESS_CONFIG_A); + if (adap->flags & FW_OK) { + t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1, + TP_VLAN_PRI_MAP_A, 1); + t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1, + TP_INGRESS_CONFIG_A, 1); + } else { + t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &adap->params.tp.vlan_pri_map, 1, + TP_VLAN_PRI_MAP_A); + t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &adap->params.tp.ingress_config, 1, + TP_INGRESS_CONFIG_A); + } /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field * shift positions of several elements of the Compressed Filter Tuple diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 88067d90121c..f9a2cb164737 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -52,8 +52,6 @@ enum { MBOX_LEN = 64, /* mailbox size in bytes */ TRACE_LEN = 112, /* length of trace data and mask */ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ - NWOL_PAT = 8, /* # of WoL patterns */ - WOL_PAT_LEN = 128, /* length of WoL patterns */ }; enum { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index d90f8a03e378..132cb8fc0bf7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -686,6 +686,9 @@ struct cpl_tx_pkt { #define TXPKT_ETHHDR_LEN_S 34 #define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S) +#define T6_TXPKT_ETHHDR_LEN_S 32 +#define T6_TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << T6_TXPKT_ETHHDR_LEN_S) + #define TXPKT_CSUM_TYPE_S 40 #define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 326674b19983..af3462db5adb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -418,6 +418,20 @@ #define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4 #define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8 +#define SGE_ERROR_STATS_A 0x1100 + +#define UNCAPTURED_ERROR_S 18 +#define UNCAPTURED_ERROR_V(x) ((x) << UNCAPTURED_ERROR_S) +#define UNCAPTURED_ERROR_F UNCAPTURED_ERROR_V(1U) + +#define ERROR_QID_VALID_S 17 +#define ERROR_QID_VALID_V(x) ((x) << ERROR_QID_VALID_S) +#define ERROR_QID_VALID_F ERROR_QID_VALID_V(1U) + +#define ERROR_QID_S 0 +#define ERROR_QID_M 0x1ffffU +#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M) + #define HP_INT_THRESH_S 28 #define HP_INT_THRESH_M 0xfU #define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S) @@ -448,8 +462,13 @@ #define SGE_STAT_MATCH_A 0x10e8 #define SGE_STAT_CFG_A 0x10ec +#define STATMODE_S 2 +#define STATMODE_V(x) ((x) << STATMODE_S) + #define STATSOURCE_T5_S 9 +#define STATSOURCE_T5_M 0xfU #define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S) +#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M) #define SGE_DBFIFO_STATUS2_A 0x1118 @@ -705,6 +724,10 @@ #define REGISTER_S 0 #define REGISTER_V(x) ((x) << REGISTER_S) +#define T6_ENABLE_S 31 +#define T6_ENABLE_V(x) ((x) << T6_ENABLE_S) +#define T6_ENABLE_F T6_ENABLE_V(1U) + #define PFNUM_S 0 #define PFNUM_V(x) ((x) << PFNUM_S) @@ -1399,6 +1422,8 @@ #define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U) #define TP_MIB_MAC_IN_ERR_0_A 0x0 +#define TP_MIB_HDR_IN_ERR_0_A 0x4 +#define TP_MIB_TCP_IN_ERR_0_A 0x8 #define TP_MIB_TCP_OUT_RST_A 0xc #define TP_MIB_TCP_IN_SEG_HI_A 0x10 #define TP_MIB_TCP_IN_SEG_LO_A 0x11 @@ -1407,11 +1432,19 @@ #define TP_MIB_TCP_RXT_SEG_HI_A 0x14 #define TP_MIB_TCP_RXT_SEG_LO_A 0x15 #define TP_MIB_TNL_CNG_DROP_0_A 0x18 +#define TP_MIB_OFD_CHN_DROP_0_A 0x1c #define TP_MIB_TCP_V6IN_ERR_0_A 0x28 #define TP_MIB_TCP_V6OUT_RST_A 0x2c #define TP_MIB_OFD_ARP_DROP_A 0x36 +#define TP_MIB_CPL_IN_REQ_0_A 0x38 +#define TP_MIB_CPL_OUT_RSP_0_A 0x3c #define TP_MIB_TNL_DROP_0_A 0x44 +#define TP_MIB_FCOE_DDP_0_A 0x48 +#define TP_MIB_FCOE_DROP_0_A 0x4c +#define TP_MIB_FCOE_BYTE_0_HI_A 0x50 #define TP_MIB_OFD_VLN_DROP_0_A 0x58 +#define TP_MIB_USM_PKTS_A 0x5c +#define TP_MIB_RQE_DFR_PKT_A 0x64 #define ULP_TX_INT_CAUSE_A 0x8dcc @@ -1572,6 +1605,7 @@ #define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 #define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 #define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 +#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528 #define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 #define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 #define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 @@ -2054,6 +2088,11 @@ #define VFLKPIDX_M 0xffU #define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M) +#define T6_VFWRADDR_S 8 +#define T6_VFWRADDR_M 0xffU +#define T6_VFWRADDR_V(x) ((x) << T6_VFWRADDR_S) +#define T6_VFWRADDR_G(x) (((x) >> T6_VFWRADDR_S) & T6_VFWRADDR_M) + #define TP_RSS_CONFIG_CNG_A 0x7e04 #define TP_RSS_SECRET_KEY0_A 0x40 #define TP_RSS_PF0_CONFIG_A 0x30 @@ -2175,7 +2214,28 @@ #define MPS_RX_PERR_INT_CAUSE_A 0x11074 #define MPS_CLS_TCAM_Y_L_A 0xf000 +#define MPS_CLS_TCAM_DATA0_A 0xf000 +#define MPS_CLS_TCAM_DATA1_A 0xf004 + +#define DMACH_S 0 +#define DMACH_M 0xffffU +#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M) + #define MPS_CLS_TCAM_X_L_A 0xf008 +#define MPS_CLS_TCAM_DATA2_CTL_A 0xf008 + +#define CTLCMDTYPE_S 31 +#define CTLCMDTYPE_V(x) ((x) << CTLCMDTYPE_S) +#define CTLCMDTYPE_F CTLCMDTYPE_V(1U) + +#define CTLTCAMSEL_S 25 +#define CTLTCAMSEL_V(x) ((x) << CTLTCAMSEL_S) + +#define CTLTCAMINDEX_S 17 +#define CTLTCAMINDEX_V(x) ((x) << CTLTCAMINDEX_S) + +#define CTLXYBITSEL_S 16 +#define CTLXYBITSEL_V(x) ((x) << CTLXYBITSEL_S) #define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16) #define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512 @@ -2184,6 +2244,45 @@ #define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512 #define MPS_CLS_SRAM_L_A 0xe000 + +#define T6_MULTILISTEN0_S 26 + +#define T6_SRAM_PRIO3_S 23 +#define T6_SRAM_PRIO3_M 0x7U +#define T6_SRAM_PRIO3_G(x) (((x) >> T6_SRAM_PRIO3_S) & T6_SRAM_PRIO3_M) + +#define T6_SRAM_PRIO2_S 20 +#define T6_SRAM_PRIO2_M 0x7U +#define T6_SRAM_PRIO2_G(x) (((x) >> T6_SRAM_PRIO2_S) & T6_SRAM_PRIO2_M) + +#define T6_SRAM_PRIO1_S 17 +#define T6_SRAM_PRIO1_M 0x7U +#define T6_SRAM_PRIO1_G(x) (((x) >> T6_SRAM_PRIO1_S) & T6_SRAM_PRIO1_M) + +#define T6_SRAM_PRIO0_S 14 +#define T6_SRAM_PRIO0_M 0x7U +#define T6_SRAM_PRIO0_G(x) (((x) >> T6_SRAM_PRIO0_S) & T6_SRAM_PRIO0_M) + +#define T6_SRAM_VLD_S 13 +#define T6_SRAM_VLD_V(x) ((x) << T6_SRAM_VLD_S) +#define T6_SRAM_VLD_F T6_SRAM_VLD_V(1U) + +#define T6_REPLICATE_S 12 +#define T6_REPLICATE_V(x) ((x) << T6_REPLICATE_S) +#define T6_REPLICATE_F T6_REPLICATE_V(1U) + +#define T6_PF_S 9 +#define T6_PF_M 0x7U +#define T6_PF_G(x) (((x) >> T6_PF_S) & T6_PF_M) + +#define T6_VF_VALID_S 8 +#define T6_VF_VALID_V(x) ((x) << T6_VF_VALID_S) +#define T6_VF_VALID_F T6_VF_VALID_V(1U) + +#define T6_VF_S 0 +#define T6_VF_M 0xffU +#define T6_VF_G(x) (((x) >> T6_VF_S) & T6_VF_M) + #define MPS_CLS_SRAM_H_A 0xe004 #define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8) @@ -2433,6 +2532,8 @@ #define CIM_F CIM_V(1U) #define MC1_S 31 +#define MC1_V(x) ((x) << MC1_S) +#define MC1_F MC1_V(1U) #define PL_INT_ENABLE_A 0x19410 #define PL_INT_MAP0_A 0x19414 @@ -2463,6 +2564,18 @@ #define REV_V(x) ((x) << REV_S) #define REV_G(x) (((x) >> REV_S) & REV_M) +#define T6_UNKNOWNCMD_S 3 +#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S) +#define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U) + +#define T6_LIP0_S 2 +#define T6_LIP0_V(x) ((x) << T6_LIP0_S) +#define T6_LIP0_F T6_LIP0_V(1U) + +#define T6_LIPMISS_S 1 +#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S) +#define T6_LIPMISS_F T6_LIPMISS_V(1U) + #define LE_DB_INT_CAUSE_A 0x19c3c #define REQQPARERR_S 16 @@ -2485,6 +2598,14 @@ #define LIP0_V(x) ((x) << LIP0_S) #define LIP0_F LIP0_V(1U) +#define TCAMINTPERR_S 13 +#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S) +#define TCAMINTPERR_F TCAMINTPERR_V(1U) + +#define SSRAMINTPERR_S 10 +#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S) +#define SSRAMINTPERR_F SSRAMINTPERR_V(1U) + #define NCSI_INT_CAUSE_A 0x1a0d8 #define CIM_DM_PRTY_ERR_S 8 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h index 72ec1f91d29f..7bdee3bf75ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -63,6 +63,7 @@ #define FETCHBURSTMIN_64B_X 2 +#define FETCHBURSTMAX_256B_X 2 #define FETCHBURSTMAX_512B_X 3 #define HOSTFCMODE_STATUS_PAGE_X 2 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 0848317e5c4f..ab4674684acc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -772,7 +772,7 @@ struct fw_ldst_cmd { } addrval; struct fw_ldst_idctxt { __be32 physid; - __be32 msg_pkd; + __be32 msg_ctxtflush; __be32 ctxt_data7; __be32 ctxt_data6; __be32 ctxt_data5; @@ -788,15 +788,27 @@ struct fw_ldst_cmd { __be16 vctl; __be16 rval; } mdio; - struct fw_ldst_mps { - __be16 fid_ctl; - __be16 rplcpf_pkd; - __be32 rplc127_96; - __be32 rplc95_64; - __be32 rplc63_32; - __be32 rplc31_0; - __be32 atrb; - __be16 vlan[16]; + union fw_ldst_mps { + struct fw_ldst_mps_rplc { + __be16 fid_idx; + __be16 rplcpf_pkd; + __be32 rplc255_224; + __be32 rplc223_192; + __be32 rplc191_160; + __be32 rplc159_128; + __be32 rplc127_96; + __be32 rplc95_64; + __be32 rplc63_32; + __be32 rplc31_0; + } rplc; + struct fw_ldst_mps_atrb { + __be16 fid_mpsid; + __be16 r2[3]; + __be32 r3[2]; + __be32 r4; + __be32 atrb; + __be16 vlan[16]; + } atrb; } mps; struct fw_ldst_func { u8 access_ctl; @@ -822,6 +834,10 @@ struct fw_ldst_cmd { #define FW_LDST_CMD_MSG_S 31 #define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S) +#define FW_LDST_CMD_CTXTFLUSH_S 30 +#define FW_LDST_CMD_CTXTFLUSH_V(x) ((x) << FW_LDST_CMD_CTXTFLUSH_S) +#define FW_LDST_CMD_CTXTFLUSH_F FW_LDST_CMD_CTXTFLUSH_V(1U) + #define FW_LDST_CMD_PADDR_S 8 #define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S) @@ -831,8 +847,8 @@ struct fw_ldst_cmd { #define FW_LDST_CMD_FID_S 15 #define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S) -#define FW_LDST_CMD_CTL_S 0 -#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S) +#define FW_LDST_CMD_IDX_S 0 +#define FW_LDST_CMD_IDX_V(x) ((x) << FW_LDST_CMD_IDX_S) #define FW_LDST_CMD_RPLCPF_S 0 #define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S) @@ -2536,13 +2552,8 @@ enum fw_port_mod_sub_type { FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC, }; -/* port stats */ -#define FW_NUM_PORT_STATS 50 -#define FW_NUM_PORT_TX_STATS 23 -#define FW_NUM_PORT_RX_STATS 27 - enum fw_port_stats_tx_index { - FW_STAT_TX_PORT_BYTES_IX, + FW_STAT_TX_PORT_BYTES_IX = 0, FW_STAT_TX_PORT_FRAMES_IX, FW_STAT_TX_PORT_BCAST_IX, FW_STAT_TX_PORT_MCAST_IX, @@ -2564,11 +2575,12 @@ enum fw_port_stats_tx_index { FW_STAT_TX_PORT_PPP4_IX, FW_STAT_TX_PORT_PPP5_IX, FW_STAT_TX_PORT_PPP6_IX, - FW_STAT_TX_PORT_PPP7_IX + FW_STAT_TX_PORT_PPP7_IX, + FW_NUM_PORT_TX_STATS }; enum fw_port_stat_rx_index { - FW_STAT_RX_PORT_BYTES_IX, + FW_STAT_RX_PORT_BYTES_IX = 0, FW_STAT_RX_PORT_FRAMES_IX, FW_STAT_RX_PORT_BCAST_IX, FW_STAT_RX_PORT_MCAST_IX, @@ -2594,9 +2606,14 @@ enum fw_port_stat_rx_index { FW_STAT_RX_PORT_PPP5_IX, FW_STAT_RX_PORT_PPP6_IX, FW_STAT_RX_PORT_PPP7_IX, - FW_STAT_RX_PORT_LESS_64B_IX + FW_STAT_RX_PORT_LESS_64B_IX, + FW_STAT_RX_PORT_MAC_ERROR_IX, + FW_NUM_PORT_RX_STATS }; +/* port stats */ +#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + FW_NUM_PORT_RX_STATS) + struct fw_port_stats_cmd { __be32 op_to_portid; __be32 retval_len16; @@ -3025,7 +3042,8 @@ struct fw_hdr { enum fw_hdr_chip { FW_HDR_CHIP_T4, - FW_HDR_CHIP_T5 + FW_HDR_CHIP_T5, + FW_HDR_CHIP_T6 }; #define FW_HDR_FW_VER_MAJOR_S 24 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index b9d1cbac0eee..32b213559b02 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -45,4 +45,9 @@ #define T5FW_VERSION_MICRO 0x20 #define T5FW_VERSION_BUILD 0x00 +#define T6FW_VERSION_MAJOR 0x01 +#define T6FW_VERSION_MINOR 0x0D +#define T6FW_VERSION_MICRO 0x2D +#define T6FW_VERSION_BUILD 0x00 + #endif diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 2e41d1541d73..ad53e5ad2acd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -524,7 +524,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) */ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) { - u32 val; + u32 val = adapter->params.arch.sge_fl_db; /* The SGE keeps track of its Producer and Consumer Indices in terms * of Egress Queue Units so we can only tell it about integral numbers @@ -532,11 +532,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) */ if (fl->pend_cred >= FL_PER_EQ_UNIT) { if (is_t4(adapter->params.chip)) - val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT); + val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT); else - val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) | - DBTYPE_F; - val |= DBPRIO_F; + val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT); /* Make sure all memory writes to the Free List queue are * committed before we tell the hardware about them. @@ -1084,7 +1082,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq, * Figure out what HW csum a packet wants and return the appropriate control * bits. */ -static u64 hwcsum(const struct sk_buff *skb) +static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) { int csum_type; const struct iphdr *iph = ip_hdr(skb); @@ -1116,11 +1114,16 @@ static u64 hwcsum(const struct sk_buff *skb) goto nocsum; } - if (likely(csum_type >= TX_CSUM_TCPIP)) - return TXPKT_CSUM_TYPE_V(csum_type) | - TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) | - TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN); - else { + if (likely(csum_type >= TX_CSUM_TCPIP)) { + u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)); + int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; + + if (chip <= CHELSIO_T5) + hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); + else + hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); + return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; + } else { int start = skb_transport_offset(skb); return TXPKT_CSUM_TYPE_V(csum_type) | @@ -1308,10 +1311,15 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) * accounting. */ cpl = (void *)(lso + 1); - cntrl = (TXPKT_CSUM_TYPE_V(v6 ? + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); + else + cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + cntrl |= TXPKT_CSUM_TYPE_V(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - TXPKT_IPHDR_LEN_V(l3hdr_len) | - TXPKT_ETHHDR_LEN_V(eth_xtra_len)); + TXPKT_IPHDR_LEN_V(l3hdr_len); txq->tso++; txq->tx_cso += ssi->gso_segs; } else { @@ -1328,7 +1336,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) */ cpl = (void *)(wr + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) { - cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F; + cntrl = hwcsum(adapter->params.chip, skb) | + TXPKT_IPCSUM_DIS_F; txq->tx_cso++; } else cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; @@ -2162,8 +2171,8 @@ static void __iomem *bar2_address(struct adapter *adapter, u64 bar2_qoffset; int ret; - ret = t4_bar2_sge_qregs(adapter, qid, qtype, - &bar2_qoffset, pbar2_qid); + ret = t4vf_bar2_sge_qregs(adapter, qid, qtype, + &bar2_qoffset, pbar2_qid); if (ret) return NULL; @@ -2247,6 +2256,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, cmd.iqaddr = cpu_to_be64(rspq->phys_addr); if (fl) { + enum chip_type chip = + CHELSIO_CHIP_VERSION(adapter->params.chip); /* * Allocate the ring for the hardware free list (with space * for its status page) along with the associated software @@ -2286,7 +2297,9 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, cmd.fl0dcaen_to_fl0cidxfthresh = cpu_to_be16( FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) | - FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B)); + FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? + FETCHBURSTMAX_512B_X : + FETCHBURSTMAX_256B_X)); cmd.fl0size = cpu_to_be16(flsz); cmd.fl0addr = cpu_to_be64(fl->addr); } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index b9debb4f29a3..88b8981b4751 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -51,6 +51,7 @@ */ #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 +#define CHELSIO_T6 0x6 enum chip_type { T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), @@ -156,6 +157,12 @@ struct vpd_params { u32 cclk; /* Core Clock (KHz) */ }; +/* Stores chip specific parameters */ +struct arch_specific_params { + u32 sge_fl_db; + u16 mps_tcam_size; +}; + /* * Global Receive Side Scaling (RSS) parameters in host-native format. */ @@ -215,6 +222,7 @@ struct adapter_params { struct vpd_params vpd; /* Vital Product Data */ struct rss_params rss; /* Receive Side Scaling */ struct vf_resources vfres; /* Virtual Function Resource limits */ + struct arch_specific_params arch; /* chip specific params */ enum chip_type chip; /* chip code */ u8 nports; /* # of Ethernet "ports" */ }; @@ -284,11 +292,11 @@ int t4vf_fw_reset(struct adapter *); int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; -int t4_bar2_sge_qregs(struct adapter *adapter, - unsigned int qid, - enum t4_bar2_qtype qtype, - u64 *pbar2_qoffset, - unsigned int *pbar2_qid); +int t4vf_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); int t4vf_get_sge_params(struct adapter *); int t4vf_get_vpd_params(struct adapter *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 966ee900ed00..0db6dc9e9ed2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -428,7 +428,7 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, } /** - * t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter * @qid: the Queue ID * @qtype: the Ingress or Egress type for @qid @@ -452,11 +452,11 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, * then these "Inferred Queue ID" register may not be used. */ -int t4_bar2_sge_qregs(struct adapter *adapter, - unsigned int qid, - enum t4_bar2_qtype qtype, - u64 *pbar2_qoffset, - unsigned int *pbar2_qid) +int t4vf_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) { unsigned int page_shift, page_size, qpp_shift, qpp_mask; u64 bar2_page_offset, bar2_qoffset; @@ -1191,9 +1191,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, unsigned nfilters = 0; unsigned int rem = naddr; struct fw_vi_mac_cmd cmd, rpl; - unsigned int max_naddr = is_t4(adapter->params.chip) ? - NUM_MPS_CLS_SRAM_L_INSTANCES : - NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int max_naddr = adapter->params.arch.mps_tcam_size; if (naddr > max_naddr) return -EINVAL; @@ -1285,9 +1283,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, struct fw_vi_mac_exact *p = &cmd.u.exact[0]; size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, u.exact[1]), 16); - unsigned int max_naddr = is_t4(adapter->params.chip) ? - NUM_MPS_CLS_SRAM_L_INSTANCES : - NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size; /* * If this is a new allocation, determine whether it should be @@ -1310,7 +1306,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, if (ret == 0) { p = &rpl.u.exact[0]; ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); - if (ret >= max_naddr) + if (ret >= max_mac_addr) ret = -ENOMEM; } return ret; @@ -1590,11 +1586,25 @@ int t4vf_prep_adapter(struct adapter *adapter) switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { case CHELSIO_T4: adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); + adapter->params.arch.sge_fl_db = DBPRIO_F; + adapter->params.arch.mps_tcam_size = + NUM_MPS_CLS_SRAM_L_INSTANCES; break; case CHELSIO_T5: chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); + adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + break; + + case CHELSIO_T6: + chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid); + adapter->params.arch.sge_fl_db = 0; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; break; } diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c index a31b57adcb37..d106186f4f4a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c @@ -33,8 +33,8 @@ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) return -EPROTONOSUPPORT; }; data.type = FILTER_IPV4_5TUPLE; - data.u.ipv4.src_addr = ntohl(keys->addrs.src); - data.u.ipv4.dst_addr = ntohl(keys->addrs.dst); + data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src); + data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst); data.u.ipv4.src_port = ntohs(keys->ports.src); data.u.ipv4.dst_port = ntohs(keys->ports.dst); data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; @@ -158,8 +158,8 @@ static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h, struct enic_rfs_fltr_node *tpos; hlist_for_each_entry(tpos, h, node) - if (tpos->keys.addrs.src == k->addrs.src && - tpos->keys.addrs.dst == k->addrs.dst && + if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src && + tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst && tpos->keys.ports.ports == k->ports.ports && tpos->keys.basic.ip_proto == k->basic.ip_proto && tpos->keys.basic.n_proto == k->basic.n_proto) diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 117c0968dd0b..f3f1601a76f3 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev, { struct enic *enic = netdev_priv(netdev); struct vnic_devcmd_fw_info *fw_info; + int err; - enic_dev_fw_info(enic, &fw_info); + err = enic_dev_fw_info(enic, &fw_info); + /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info + * For other failures, like devcmd failure, we return previously + * recorded info. + */ + if (err == -ENOMEM) + return; strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); @@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct vnic_stats *vstats; unsigned int i; + int err; - enic_dev_stats_dump(enic, &vstats); + err = enic_dev_stats_dump(enic, &vstats); + /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + * For other failures, like devcmd failure, we return previously + * recorded stats. + */ + if (err == -ENOMEM) + return; for (i = 0; i < enic_n_tx_stats; i++) *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; @@ -346,10 +360,10 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd) break; } - fsp->h_u.tcp_ip4_spec.ip4src = n->keys.addrs.src; + fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys); fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0; - fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.addrs.dst; + fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys); fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0; fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 204bd182473b..eadae1b412c6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, { struct enic *enic = netdev_priv(netdev); struct vnic_stats *stats; + int err; - enic_dev_stats_dump(enic, &stats); + err = enic_dev_stats_dump(enic, &stats); + /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + * For other failures, like devcmd failure, we return previously + * recorded stats. + */ + if (err == -ENOMEM) + return net_stats; net_stats->tx_packets = stats->tx.tx_frames_ok; net_stats->tx_bytes = stats->tx.tx_bytes_ok; @@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ enic_calc_int_moderation(enic, &enic->rq[rq]); + enic_poll_unlock_napi(&enic->rq[rq]); if (work_done < work_to_do) { /* Some work done, but not enough to stay in polling, @@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) enic_set_int_moderation(enic, &enic->rq[rq]); vnic_intr_unmask(&enic->intr[intr]); } - enic_poll_unlock_napi(&enic->rq[rq]); return work_done; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index 36a2ed606c91..c4b2183bf352 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq, struct vnic_rq_buf *buf; u32 fetch_index; unsigned int count = rq->ring.desc_count; + int i; buf = rq->to_clean; - while (vnic_rq_desc_used(rq) > 0) { - + for (i = 0; i < rq->ring.desc_count; i++) { (*buf_clean)(rq, buf); - - buf = rq->to_clean = buf->next; - rq->ring.desc_avail++; + buf = buf->next; } + rq->ring.desc_avail = rq->ring.desc_count - 1; /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index badff181e719..8966f3159bb2 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -5189,16 +5189,16 @@ de4x5_parse_params(struct net_device *dev) if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true; if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) { - if (strstr(p, "TP")) { - lp->params.autosense = TP; - } else if (strstr(p, "TP_NW")) { + if (strstr(p, "TP_NW")) { lp->params.autosense = TP_NW; + } else if (strstr(p, "TP")) { + lp->params.autosense = TP; + } else if (strstr(p, "BNC_AUI")) { + lp->params.autosense = BNC; } else if (strstr(p, "BNC")) { lp->params.autosense = BNC; } else if (strstr(p, "AUI")) { lp->params.autosense = AUI; - } else if (strstr(p, "BNC_AUI")) { - lp->params.autosense = BNC; } else if (strstr(p, "10Mb")) { lp->params.autosense = _10Mb; } else if (strstr(p, "100Mb")) { diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 2c30c0c83f98..447d09272ab7 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -1115,7 +1115,7 @@ static void uli526x_timer(unsigned long data) netif_carrier_off(dev); } } - db->init=0; + db->init = 0; /* Timer active again */ db->timer.expires = ULI526X_TIMER_WUT; diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 1274b6fdac8a..cf0a5fcdaaaf 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -463,10 +463,8 @@ rio_open (struct net_device *dev) dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); } - init_timer (&np->timer); + setup_timer(&np->timer, rio_timer, (unsigned long)dev); np->timer.expires = jiffies + 1*HZ; - np->timer.data = (unsigned long) dev; - np->timer.function = rio_timer; add_timer (&np->timer); /* Start Tx/Rx */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 41150543906a..9eac3227d2ca 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1742,9 +1742,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) total_size = buf_len; get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; - get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, - get_fat_cmd.size, - &get_fat_cmd.dma); + get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + get_fat_cmd.size, + &get_fat_cmd.dma, GFP_ATOMIC); if (!get_fat_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure while reading FAT data\n"); @@ -1789,8 +1789,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) log_offset += buf_size; } err: - pci_free_consistent(adapter->pdev, get_fat_cmd.size, - get_fat_cmd.va, get_fat_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, + get_fat_cmd.va, get_fat_cmd.dma); spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2237,12 +2237,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, return -EINVAL; cmd.size = sizeof(struct be_cmd_resp_port_type); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); return -ENOMEM; } - memset(cmd.va, 0, cmd.size); spin_lock_bh(&adapter->mcc_lock); @@ -2267,7 +2267,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, } err: spin_unlock_bh(&adapter->mcc_lock); - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -2742,7 +2742,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) goto err; } cmd.size = sizeof(struct be_cmd_req_get_phy_info); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); status = -ENOMEM; @@ -2776,7 +2777,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) BE_SUPPORTED_SPEED_1GBPS; } } - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -2827,8 +2828,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); - attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, - &attribs_cmd.dma); + attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + attribs_cmd.size, + &attribs_cmd.dma, GFP_ATOMIC); if (!attribs_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); status = -ENOMEM; @@ -2855,8 +2857,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) err: mutex_unlock(&adapter->mbox_lock); if (attribs_cmd.va) - pci_free_consistent(adapter->pdev, attribs_cmd.size, - attribs_cmd.va, attribs_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size, + attribs_cmd.va, attribs_cmd.dma); return status; } @@ -2994,9 +2996,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); - get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, - get_mac_list_cmd.size, - &get_mac_list_cmd.dma); + get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + get_mac_list_cmd.size, + &get_mac_list_cmd.dma, + GFP_ATOMIC); if (!get_mac_list_cmd.va) { dev_err(&adapter->pdev->dev, @@ -3069,8 +3072,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, out: spin_unlock_bh(&adapter->mcc_lock); - pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, - get_mac_list_cmd.va, get_mac_list_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, + get_mac_list_cmd.va, get_mac_list_cmd.dma); return status; } @@ -3123,8 +3126,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_req_set_mac_list); - cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, - &cmd.dma, GFP_KERNEL); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_KERNEL); if (!cmd.va) return -ENOMEM; @@ -3325,7 +3328,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); status = -ENOMEM; @@ -3360,7 +3364,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) err: mutex_unlock(&adapter->mbox_lock); if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } @@ -3374,8 +3379,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, - &extfat_cmd.dma); + extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + extfat_cmd.size, &extfat_cmd.dma, + GFP_ATOMIC); if (!extfat_cmd.va) return -ENOMEM; @@ -3397,8 +3403,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); err: - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, - extfat_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); return status; } @@ -3411,8 +3417,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, - &extfat_cmd.dma); + extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + extfat_cmd.size, &extfat_cmd.dma, + GFP_ATOMIC); if (!extfat_cmd.va) { dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", @@ -3430,8 +3437,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) level = cfgs->module[0].trace_lvl[j].dbg_lvl; } } - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, - extfat_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); err: return level; } @@ -3629,7 +3636,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_get_func_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); status = -ENOMEM; @@ -3669,7 +3677,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) err: mutex_unlock(&adapter->mbox_lock); if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } @@ -3690,7 +3699,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_get_profile_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) return -ENOMEM; @@ -3736,7 +3746,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, res->vf_if_cap_flags = vf_res->cap_flags; err: if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } @@ -3751,7 +3762,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_req_set_profile_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) return -ENOMEM; @@ -3767,7 +3779,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, status = be_cmd_notify_wait(adapter, &wrb); if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 675cbacef772..b2476dbfd103 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -263,8 +263,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, int status = 0; read_cmd.size = LANCER_READ_FILE_CHUNK; - read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, - &read_cmd.dma); + read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, + &read_cmd.dma, GFP_ATOMIC); if (!read_cmd.va) { dev_err(&adapter->pdev->dev, @@ -288,8 +288,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, break; } } - pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, - read_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va, + read_cmd.dma); return status; } @@ -825,8 +825,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter) }; ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); - ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, - &ddrdma_cmd.dma, GFP_KERNEL); + ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + ddrdma_cmd.size, &ddrdma_cmd.dma, + GFP_KERNEL); if (!ddrdma_cmd.va) return -ENOMEM; @@ -948,8 +949,9 @@ static int be_read_eeprom(struct net_device *netdev, memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); - eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, - &eeprom_cmd.dma, GFP_KERNEL); + eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + eeprom_cmd.size, &eeprom_cmd.dma, + GFP_KERNEL); if (!eeprom_cmd.va) return -ENOMEM; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 75696d4f9ed0..c0f34845cf59 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2593,11 +2593,11 @@ static int be_evt_queues_create(struct be_adapter *adapter) adapter->cfg_num_qs); for_all_evt_queues(adapter, eqo, i) { + int numa_node = dev_to_node(&adapter->pdev->dev); if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) return -ENOMEM; - cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev), - eqo->affinity_mask); - + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + eqo->affinity_mask); netif_napi_add(adapter->netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT); napi_hash_add(&eqo->napi); @@ -4855,8 +4855,8 @@ static int lancer_fw_download(struct be_adapter *adapter, flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + LANCER_FW_DOWNLOAD_CHUNK; - flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, - &flash_cmd.dma, GFP_KERNEL); + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, + &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) return -ENOMEM; @@ -4965,8 +4965,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) } flash_cmd.size = sizeof(struct be_cmd_write_flashrom); - flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, - GFP_KERNEL); + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, + GFP_KERNEL); if (!flash_cmd.va) return -ENOMEM; @@ -5521,16 +5521,15 @@ static int be_drv_init(struct be_adapter *adapter) int status = 0; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; - mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, - &mbox_mem_alloc->dma, - GFP_KERNEL); + mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, + &mbox_mem_alloc->dma, + GFP_KERNEL); if (!mbox_mem_alloc->va) return -ENOMEM; mbox_mem_align->size = sizeof(struct be_mcc_mailbox); mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); - memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); rx_filter->size = sizeof(struct be_cmd_req_rx_filter); rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index a583d89b13c4..a15663ad7f5e 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -353,6 +353,7 @@ static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; writel(tmp, fep->hwp + FEC_ATIME_INC); + corr_period = corr_period > 1 ? corr_period - 1 : corr_period; writel(corr_period, fep->hwp + FEC_ATIME_CORR); /* dummy read to update the timer. */ timecounter_read(&fep->tc); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index de7919322190..b9df0cbd0a38 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2084,12 +2084,8 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev, static int emac_get_regs_len(struct emac_instance *dev) { - if (emac_has_feature(dev, EMAC_FTR_EMAC4)) - return sizeof(struct emac_ethtool_regs_subhdr) + - EMAC4_ETHTOOL_REGS_SIZE(dev); - else return sizeof(struct emac_ethtool_regs_subhdr) + - EMAC_ETHTOOL_REGS_SIZE(dev); + sizeof(struct emac_regs); } static int emac_ethtool_get_regs_len(struct net_device *ndev) @@ -2114,15 +2110,15 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf) struct emac_ethtool_regs_subhdr *hdr = buf; hdr->index = dev->cell_index; - if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { + if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { + hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER; + } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { hdr->version = EMAC4_ETHTOOL_REGS_VER; - memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev)); - return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev); } else { hdr->version = EMAC_ETHTOOL_REGS_VER; - memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev)); - return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev); } + memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs)); + return (void *)(hdr + 1) + sizeof(struct emac_regs); } static void emac_ethtool_get_regs(struct net_device *ndev, diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 67f342a9f65e..28df37420da9 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -461,10 +461,7 @@ struct emac_ethtool_regs_subhdr { }; #define EMAC_ETHTOOL_REGS_VER 0 -#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ - (dev)->rsrc_regs.start + 1) -#define EMAC4_ETHTOOL_REGS_VER 1 -#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ - (dev)->rsrc_regs.start + 1) +#define EMAC4_ETHTOOL_REGS_VER 1 +#define EMAC4SYNC_ETHTOOL_REGS_VER 2 #endif /* __IBM_NEWEMAC_CORE_H */ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 35357ae2fe75..d2657a412768 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2922,9 +2922,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - init_timer(&nic->watchdog); - nic->watchdog.function = e100_watchdog; - nic->watchdog.data = (unsigned long)nic; + setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic); INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 08f22f348800..2af603f3e418 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h index 535a9430976d..a2162e11673e 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 32e77755a9c6..5f7016442ec4 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h index 2e758f796d60..abc6a9abff98 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.h +++ b/drivers/net/ethernet/intel/e1000e/82571.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 0570c668ec3d..133d4074dbe4 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 0abc942c966e..0b748d1959d9 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -98,6 +98,8 @@ struct e1000_info; #define DEFAULT_RADV 8 #define BURST_RDTR 0x20 #define BURST_RADV 0x20 +#define PCICFG_DESC_RING_STATUS 0xe4 +#define FLUSH_DESC_REQUIRED 0x100 /* in the case of WTHRESH, it appears at least the 82571/2 hardware * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when @@ -384,6 +386,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); #define INCVALUE_SHIFT_25MHz 18 #define INCPERIOD_25MHz 1 +#define INCVALUE_24MHz 125 +#define INCVALUE_SHIFT_24MHz 14 +#define INCPERIOD_24MHz 3 + /* Another drawback of scaling the incvalue by a large factor is the * 64-bit SYSTIM register overflows more quickly. This is dealt with * by simply reading the clock before it overflows. diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 11f486e4ff7b..ad6daa656d3e 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1516,8 +1516,19 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) static int e1000_setup_loopback_test(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 rctl; - + u32 rctl, fext_nvm11, tarc0; + + if (hw->mac.type == e1000_pch_spt) { + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + tarc0 = er32(TARC(0)); + /* clear bits 28 & 29 (control of MULR concurrent requests) */ + tarc0 &= 0xcfffffff; + /* set bit 29 (value of MULR requests is now 2) */ + tarc0 |= 0x20000000; + ew32(TARC(0), tarc0); + } if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { switch (hw->mac.type) { @@ -1542,7 +1553,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter) static void e1000_loopback_cleanup(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 rctl; + u32 rctl, fext_nvm11, tarc0; u16 phy_reg; rctl = er32(RCTL); @@ -1550,6 +1561,16 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) ew32(RCTL, rctl); switch (hw->mac.type) { + case e1000_pch_spt: + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + tarc0 = er32(TARC(0)); + /* clear bits 28 & 29 (control of MULR concurrent requests) */ + /* set bit 29 (value of MULR requests is now 0) */ + tarc0 &= 0xcfffffff; + ew32(TARC(0), tarc0); + /* fall through */ case e1000_80003es2lan: if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 19e8c487db06..c9da4654e9ca 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index e18443a00bdb..b074b9a667b3 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1014,7 +1014,6 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) u16 speed, duplex, scale = 0; u16 max_snoop, max_nosnoop; u16 max_ltr_enc; /* max LTR latency encoded */ - s64 lat_ns; /* latency (ns) */ u64 value; u32 rxa; @@ -1040,14 +1039,10 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, * 1=2^5ns, 2=2^10ns,...5=2^25ns. */ - lat_ns = ((s64)rxa * 1024 - - (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000; - if (lat_ns < 0) { - value = 0; - } else { - value = lat_ns; - do_div(value, speed); - } + rxa *= 512; + value = (rxa > hw->adapter->max_frame_size) ? + (rxa - hw->adapter->max_frame_size) * (16000 / speed) : + 0; while (value > PCI_LTR_VALUE_MASK) { scale++; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 770a573b9eea..26459853c6be 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -98,8 +98,15 @@ #define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 /* bit for disabling packet buffer read */ #define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 - +#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004 #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800 +#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000 +#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200 +#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 + +/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ +#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000 #define K1_ENTRY_LATENCY 0 #define K1_MIN_TIME 1 diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 30b74d590bee..e59d7c283cd4 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h index 0513d90cdeea..8284618af9ff 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.h +++ b/drivers/net/ethernet/intel/e1000e/mac.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index 06edfca1a35e..cc9b3befc2bc 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h index a8c27f98f7b0..0b9ea5952b07 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.h +++ b/drivers/net/ethernet/intel/e1000e/manage.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 7dd2c11c3f61..e62b9dcb91fe 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -48,7 +48,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION +#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -3525,22 +3525,30 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - case e1000_pch_spt: - /* On I217, I218 and I219, the clock frequency is 25MHz - * or 96MHz as indicated by the System Clock Frequency - * Indication - */ - if (((hw->mac.type != e1000_pch_lpt) && - (hw->mac.type != e1000_pch_spt)) || - (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 96MHz frequency */ incperiod = INCPERIOD_96MHz; incvalue = INCVALUE_96MHz; shift = INCVALUE_SHIFT_96MHz; adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + } else { + /* Stable 25MHz frequency */ + incperiod = INCPERIOD_25MHz; + incvalue = INCVALUE_25MHz; + shift = INCVALUE_SHIFT_25MHz; + adapter->cc.shift = shift; + } + break; + case e1000_pch_spt: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHz; + incvalue = INCVALUE_24MHz; + shift = INCVALUE_SHIFT_24MHz; + adapter->cc.shift = shift; break; } - /* fall-through */ + return -EINVAL; case e1000_82574: case e1000_82583: /* Stable 25MHz frequency */ @@ -3787,6 +3795,108 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) adapter->hw.phy.ops.power_down(&adapter->hw); } +/** + * e1000_flush_tx_ring - remove all descriptors from the tx_ring + * + * We want to clear all pending descriptors from the TX ring. + * zeroing happens when the HW reads the regs. We assign the ring itself as + * the data of the next descriptor. We don't care about the data we are about + * to reset the HW. + */ +static void e1000_flush_tx_ring(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc = NULL; + u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS; + u16 size = 512; + + tctl = er32(TCTL); + ew32(TCTL, tctl | E1000_TCTL_EN); + tdt = er32(TDT(0)); + BUG_ON(tdt != tx_ring->next_to_use); + tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); + tx_desc->buffer_addr = tx_ring->dma; + + tx_desc->lower.data = cpu_to_le32(txd_lower | size); + tx_desc->upper.data = 0; + /* flush descriptors to memory before notifying the HW */ + wmb(); + tx_ring->next_to_use++; + if (tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + ew32(TDT(0), tx_ring->next_to_use); + mmiowb(); + usleep_range(200, 250); +} + +/** + * e1000_flush_rx_ring - remove all descriptors from the rx_ring + * + * Mark all descriptors in the RX ring as consumed and disable the rx ring + */ +static void e1000_flush_rx_ring(struct e1000_adapter *adapter) +{ + u32 rctl, rxdctl; + struct e1000_hw *hw = &adapter->hw; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + usleep_range(100, 150); + + rxdctl = er32(RXDCTL(0)); + /* zero the lower 14 bits (prefetch and host thresholds) */ + rxdctl &= 0xffffc000; + + /* update thresholds: prefetch threshold to 31, host threshold to 1 + * and make sure the granularity is "descriptors" and not "cache lines" + */ + rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); + + ew32(RXDCTL(0), rxdctl); + /* momentarily enable the RX ring for the changes to take effect */ + ew32(RCTL, rctl | E1000_RCTL_EN); + e1e_flush(); + usleep_range(100, 150); + ew32(RCTL, rctl & ~E1000_RCTL_EN); +} + +/** + * e1000_flush_desc_rings - remove all descriptors from the descriptor rings + * + * In i219, the descriptor rings must be emptied before resetting the HW + * or before changing the device state to D3 during runtime (runtime PM). + * + * Failure to do this will cause the HW to enter a unit hang state which can + * only be released by PCI reset on the device + * + */ + +static void e1000_flush_desc_rings(struct e1000_adapter *adapter) +{ + u16 hang_state; + u32 fext_nvm11, tdlen; + struct e1000_hw *hw = &adapter->hw; + + /* First, disable MULR fix in FEXTNVM11 */ + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + /* do nothing if we're not in faulty state, or if the queue is empty */ + tdlen = er32(TDLEN(0)); + pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, + &hang_state); + if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) + return; + e1000_flush_tx_ring(adapter); + /* recheck, maybe the fault is caused by the rx ring */ + pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, + &hang_state); + if (hang_state & FLUSH_DESC_REQUIRED) + e1000_flush_rx_ring(adapter); +} + /** * e1000e_reset - bring the hardware into a known good state * @@ -3943,6 +4053,8 @@ void e1000e_reset(struct e1000_adapter *adapter) } } + if (hw->mac.type == e1000_pch_spt) + e1000_flush_desc_rings(adapter); /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); @@ -4016,6 +4128,20 @@ void e1000e_reset(struct e1000_adapter *adapter) phy_data &= ~IGP02E1000_PM_SPD; e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); } + if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) { + u32 reg; + + /* Fextnvm7 @ 0xe4[2] = 1 */ + reg = er32(FEXTNVM7); + reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE; + ew32(FEXTNVM7, reg); + /* Fextnvm9 @ 0x5bb4[13:12] = 11 */ + reg = er32(FEXTNVM9); + reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS | + E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS; + ew32(FEXTNVM9, reg); + } + } int e1000e_up(struct e1000_adapter *adapter) @@ -4115,8 +4241,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) spin_unlock(&adapter->stats64_lock); e1000e_flush_descriptors(adapter); - e1000_clean_tx_ring(adapter->tx_ring); - e1000_clean_rx_ring(adapter->rx_ring); adapter->link_speed = 0; adapter->link_duplex = 0; @@ -4127,8 +4251,14 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) e1000_lv_jumbo_workaround_ich8lan(hw, false)) e_dbg("failed to disable jumbo frame workaround mode\n"); - if (reset && !pci_channel_offline(adapter->pdev)) - e1000e_reset(adapter); + if (!pci_channel_offline(adapter->pdev)) { + if (reset) + e1000e_reset(adapter); + else if (hw->mac.type == e1000_pch_spt) + e1000_flush_desc_rings(adapter); + } + e1000_clean_tx_ring(adapter->tx_ring); + e1000_clean_rx_ring(adapter->rx_ring); } void e1000e_reinit_locked(struct e1000_adapter *adapter) @@ -4151,9 +4281,16 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) cc); struct e1000_hw *hw = &adapter->hw; cycle_t systim, systim_next; + /* SYSTIMH latching upon SYSTIML read does not work well. To fix that + * we don't want to allow overflow of SYSTIML and a change to SYSTIMH + * to occur between reads, so if we read a vale close to overflow, we + * wait for overflow to occur and read both registers when its safe. + */ + u32 systim_overflow_latch_fix = 0x3FFFFFFF; - /* latch SYSTIMH on read of SYSTIML */ - systim = (cycle_t)er32(SYSTIML); + do { + systim = (cycle_t)er32(SYSTIML); + } while (systim > systim_overflow_latch_fix); systim |= (cycle_t)er32(SYSTIMH) << 32; if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { @@ -7301,7 +7438,7 @@ static int __init e1000_init_module(void) pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); - pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n"); + pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index fa6b1036a327..49f205c023bf 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h index 342bf69efab5..5d46967e0d1f 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.h +++ b/drivers/net/ethernet/intel/e1000e/nvm.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index aa1923f7ebdd..6d8c39abee16 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index b2005e13fb01..de13aeacae97 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h index 537d2780b408..55bfe473514d 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.h +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 8d7b21dc7e19..25a0ad5102d6 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 85eefc4832ba..b24e5fee17f2 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -38,6 +38,8 @@ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ +#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ #define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 33c35d3b7420..ec76c3fa3a04 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -182,6 +182,7 @@ struct i40e_lump_tracking { enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, I40E_FD_STAT_SB, + I40E_FD_STAT_ATR_TUNNEL, I40E_FD_STAT_PF_COUNT }; #define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT) @@ -189,6 +190,8 @@ enum i40e_fd_stat_idx { (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR) #define I40E_FD_SB_STAT_IDX(pf_id) \ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB) +#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \ + (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL) struct i40e_fdir_filter { struct hlist_node fdir_node; @@ -263,8 +266,6 @@ struct i40e_pf { struct hlist_head fdir_filter_list; u16 fdir_pf_active_filters; - u16 fd_sb_cnt_idx; - u16 fd_atr_cnt_idx; unsigned long fd_flush_timestamp; u32 fd_flush_cnt; u32 fd_add_err; @@ -317,6 +318,7 @@ struct i40e_pf { #endif #define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) #define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) +#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 34170eabca7d..da0faf478af0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + /* By default we are in VEPA mode, if this is the first VF/VMDq + * VSI to be added switch to VEB mode. + */ + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, + BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } + vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); if (vsi) dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4cbaaeb902c4..9a68c65b17ea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), + I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), /* LPI stats */ @@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data) return *data; } +static inline bool i40e_active_vfs(struct i40e_pf *pf) +{ + struct i40e_vf *vfs = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++) + if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE) + return true; + return false; +} + static void i40e_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { @@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev, netif_info(pf, drv, netdev, "offline testing starting\n"); set_bit(__I40E_TESTING, &pf->state); + + if (i40e_active_vfs(pf)) { + dev_warn(&pf->pdev->dev, + "Please take active VFS offline and restart the adapter before running NIC diagnostics\n"); + data[I40E_ETH_TEST_REG] = 1; + data[I40E_ETH_TEST_EEPROM] = 1; + data[I40E_ETH_TEST_INTR] = 1; + data[I40E_ETH_TEST_LOOPBACK] = 1; + data[I40E_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__I40E_TESTING, &pf->state); + goto skip_ol_tests; + } + /* If the device is online then take it offline */ if (if_running) /* indicate we're in test mode */ @@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev, data[I40E_ETH_TEST_LOOPBACK] = 0; } +skip_ol_tests: + netif_info(pf, drv, netdev, "testing finished\n"); } @@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, input->pctype = 0; input->dest_vsi = vsi->id; input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; - input->cnt_index = pf->fd_sb_cnt_idx; + input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); input->flow_type = fsp->flow_type; input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 1803afeef23e..c8b621e0e7cd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof) * * The FC EOF is converted to the value understood by HW for descriptor * programming. Never call this w/o calling i40e_fcoe_eof_is_supported() - * first. + * first and that already checks for all supported valid eof values. **/ static inline u32 i40e_fcoe_ctxt_eof(u8 eof) { @@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof) case FC_EOF_A: return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A; default: - /* FIXME: still returns 0 */ - pr_err("Unrecognized EOF %x\n", eof); - return 0; + /* Supported valid eof shall be already checked by + * calling i40e_fcoe_eof_is_supported() first, + * therefore this default case shall never hit. + */ + WARN_ON(1); + return -EINVAL; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a54c14491e3b..52d7d8b8f1f9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_BUILD 4 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) dcb_cfg = &hw->local_dcbx_config; - /* See if DCB enabled with PFC TC */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || - !(dcb_cfg->pfc.pfcenable)) { + /* Collect Link XOFF stats when PFC is disabled */ + if (!dcb_cfg->pfc.pfcenable) { i40e_update_link_xoff_rx(pf); return; } @@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) &osd->rx_jabber, &nsd->rx_jabber); /* FDIR stats */ - i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_atr_match, &nsd->fd_atr_match); - i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_sb_match, &nsd->fd_sb_match); + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), + pf->stat_offsets_loaded, + &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); val = rd32(hw, I40E_PRTPM_EEE_STAT); nsd->tx_lpi_status = @@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi) pf->fd_add_err = pf->fd_atr_cnt = 0; if (pf->fd_tcp_rule > 0) { pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); pf->fd_tcp_rule = 0; } i40e_fdir_filter_restore(vsi); @@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; - dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } } /* Wait for some more space to be available to turn on ATR */ @@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); } } } @@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (!(time_after(jiffies, min_flush_time)) && (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { - dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); disable_atr = true; } @@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (!disable_atr) pf->flags |= I40E_FLAG_FD_ATR_ENABLED; clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); - dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } } } @@ -6097,6 +6107,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + veb->bridge_mode = BRIDGE_MODE_VEB; + else + veb->bridge_mode = BRIDGE_MODE_VEPA; i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ @@ -7676,12 +7690,8 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - /* Setup a counter for fd_atr per PF */ - pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; - /* Setup a counter for fd_sb per PF */ - pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); } else { dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); @@ -7771,7 +7781,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; pf->fdir_pf_active_filters = 0; pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); /* if ATR was auto disabled it can be re-enabled. */ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) @@ -8031,7 +8042,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, } else if (mode != veb->bridge_mode) { /* Existing HW bridge but different mode needs reset */ veb->bridge_mode = mode; - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ + if (mode == BRIDGE_MODE_VEB) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + else + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); break; } } @@ -8343,11 +8359,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; - if (i40e_is_vsi_uplink_mode_veb(vsi)) { + if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && + (i40e_is_vsi_uplink_mode_veb(vsi))) { ctxt.info.valid_sections |= - cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = - cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; @@ -8746,6 +8763,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, __func__); return NULL; } + /* We come up by default in VEPA mode if SRIOV is not + * already enabled, in which case we can't force VEPA + * mode. + */ + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + veb->bridge_mode = BRIDGE_MODE_VEPA; + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + } i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { @@ -9856,6 +9881,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_switch_setup; } +#ifdef CONFIG_PCI_IOV + /* prep for VF support */ + if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + (pf->flags & I40E_FLAG_MSIX_ENABLED) && + !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + if (pci_num_vf(pdev)) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + } +#endif err = i40e_setup_pf_switch(pf, false); if (err) { dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0b4a7be2c7d2..9a4f2bc70cd2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); - /* set the timestamp */ - tx_buf->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. */ @@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, if (add) { pf->fd_tcp_rule++; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; } } else { @@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, (pf->fd_tcp_rule - 1) : 0; if (pf->fd_tcp_rule == 0) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); } } @@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { - dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; } @@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->vsi->seid, tx_ring->queue_index, tx_ring->next_to_use, i); - dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->tx_bi[i].time_stamp, jiffies); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); @@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1923,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * i40e_atr - Add a Flow Director ATR filter * @tx_ring: ring to add programming descriptor to * @skb: send buffer - * @flags: send flags + * @tx_flags: send tx flags * @protocol: wire protocol **/ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 flags, __be16 protocol) + u32 tx_flags, __be16 protocol) { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; @@ -1952,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!tx_ring->atr_sample_rate) return; - /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(skb); + if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) + return; - /* Currently only IPv4/IPv6 with TCP is supported */ - if (protocol == htons(ETH_P_IP)) { - if (hdr.ipv4->protocol != IPPROTO_TCP) - return; + if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) { + /* snag network header to get L4 type and address */ + hdr.network = skb_network_header(skb); - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - } else if (protocol == htons(ETH_P_IPV6)) { - if (hdr.ipv6->nexthdr != IPPROTO_TCP) + /* Currently only IPv4/IPv6 with TCP is supported + * access ihl as u8 to avoid unaligned access on ia64 + */ + if (tx_flags & I40E_TX_FLAGS_IPV4) + hlen = (hdr.network[0] & 0x0F) << 2; + else if (protocol == htons(ETH_P_IPV6)) + hlen = sizeof(struct ipv6hdr); + else return; - - hlen = sizeof(struct ipv6hdr); } else { - return; + hdr.network = skb_inner_network_header(skb); + hlen = skb_inner_network_header_len(skb); } + /* Currently only IPv4/IPv6 with TCP is supported + * Note: tx_flags gets modified to reflect inner protocols in + * tx_enable_csum function if encap is enabled. + */ + if ((tx_flags & I40E_TX_FLAGS_IPV4) && + (hdr.ipv4->protocol != IPPROTO_TCP)) + return; + else if ((tx_flags & I40E_TX_FLAGS_IPV6) && + (hdr.ipv6->nexthdr != IPPROTO_TCP)) + return; + th = (struct tcphdr *)(hdr.network + hlen); /* Due to lack of space, no more new filters can be programmed */ @@ -2020,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; - dtype_cmd |= - ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & - I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) + dtype_cmd |= + ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + else + dtype_cmd |= + ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); fdir_desc->rsvd = cpu_to_le32(0); @@ -2043,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, * otherwise returns 0 to indicate the flags has been set properly. **/ #ifdef I40E_FCOE -int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) -#else -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, +inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags) +#else +static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) #endif { __be16 protocol = skb->protocol; @@ -2117,16 +2130,14 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, * i40e_tso - set up the tso context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol * @hdr_len: ptr to the size of the packet header * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len, - u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss, + u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -2218,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer - * @tx_flags: Tx flags currently set + * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) @@ -2239,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: return; @@ -2248,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, this_ipv6_hdr = inner_ipv6_hdr(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb); - if (tx_flags & I40E_TX_FLAGS_IPV4) { - - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; } @@ -2271,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; if (this_ip_hdr->version == 6) { - tx_flags &= ~I40E_TX_FLAGS_IPV4; - tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags &= ~I40E_TX_FLAGS_IPV4; + *tx_flags |= I40E_TX_FLAGS_IPV6; } } else { network_hdr_len = skb_network_header_len(skb); @@ -2282,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, } /* Enable IP checksum offloads */ - if (tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_hdr = this_ip_hdr->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; this_ip_hdr->check = 0; } else { @@ -2296,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the td_offset for IP header length */ *td_offset = (network_hdr_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { l4_hdr = this_ipv6_hdr->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; /* Now set the td_offset for IP header length */ @@ -2394,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * Returns 0 if stop is not needed **/ #ifdef I40E_FCOE -int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) #else -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) #endif { if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) @@ -2408,14 +2419,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @tx_flags: collected send information - * @hdr_len: size of the packet header * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, - const u8 hdr_len) +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) { struct skb_frag_struct *frag; bool linearize = false; @@ -2427,7 +2436,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, gso_segs = skb_shinfo(skb)->gso_segs; if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { - u16 j = 1; + u16 j = 0; if (num_frags < (I40E_MAX_BUFFER_TXD)) goto linearize_chk_done; @@ -2438,21 +2447,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, goto linearize_chk_done; } frag = &skb_shinfo(skb)->frags[0]; - size = hdr_len; /* we might still have more fragments per segment */ do { size += skb_frag_size(frag); frag++; j++; + if ((size >= skb_shinfo(skb)->gso_size) && + (j < I40E_MAX_BUFFER_TXD)) { + size = (size % skb_shinfo(skb)->gso_size); + j = (size) ? 1 : 0; + } if (j == I40E_MAX_BUFFER_TXD) { - if (size < skb_shinfo(skb)->gso_size) { - linearize = true; - break; - } - j = 1; - size -= skb_shinfo(skb)->gso_size; - if (size) - j++; - size += hdr_len; + linearize = true; + break; } num_frags--; } while (num_frags); @@ -2476,13 +2482,13 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, * @td_offset: offset for checksum or crc **/ #ifdef I40E_FCOE -void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) -#else -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, +inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset) +#else +static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) #endif { unsigned int data_len = skb->data_len; @@ -2588,9 +2594,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->queue_index), first->bytecount); - /* set the timestamp */ - first->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -2643,11 +2646,11 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, * one descriptor. **/ #ifdef I40E_FCOE -int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) -#else -static int i40e_xmit_descriptor_count(struct sk_buff *skb, +inline int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring) +#else +static inline int i40e_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) #endif { unsigned int f; @@ -2709,7 +2712,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss, &cd_tunneling); if (tso < 0) @@ -2722,7 +2725,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; - if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (i40e_chk_linearize(skb, tx_flags)) if (skb_linearize(skb)) goto out_drop; @@ -2735,7 +2738,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_flags |= I40E_TX_FLAGS_CSUM; - i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 4b0b8102cdc3..0dc48dc9ca61 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -139,6 +139,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_TSYN (u32)(1 << 8) #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -146,7 +147,6 @@ enum i40e_dyn_idx_t { struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; - unsigned long time_stamp; union { struct sk_buff *skb; void *raw_buf; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 568e855da0f3..9a5a75b1e2bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats { /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; + u64 fd_atr_tunnel_match; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 78d1c4ff565e..23f95cdbdfcc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) int pre_existing_vfs = pci_num_vf(pdev); int err = 0; + if (pf->state & __I40E_TESTING) { + dev_warn(&pdev->dev, + "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); + err = -EPERM; + goto err_out; + } + dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); if (pre_existing_vfs && pre_existing_vfs != num_vfs) i40e_free_vfs(pf); @@ -1018,11 +1025,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct i40e_pf *pf = pci_get_drvdata(pdev); - if (num_vfs) + if (num_vfs) { + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, + BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } return i40e_pci_sriov_enable(pdev, num_vfs); + } if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); return -EINVAL; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 3ef23091439f..f54996f19629 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->vsi->seid, tx_ring->queue_index, tx_ring->next_to_use, i); - dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->tx_bi[i].time_stamp, jiffies); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); @@ -1128,9 +1124,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1350,7 +1343,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) } /** - * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer * @tx_ring: ring to send buffer on * @flags: the tx flags to be set @@ -1361,9 +1354,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) +static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) { __be16 protocol = skb->protocol; u32 tx_flags = 0; @@ -1406,16 +1399,14 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, * i40e_tso - set up the tso context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol * @hdr_len: ptr to the size of the packet header * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len, - u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss, + u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -1466,12 +1457,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer - * @tx_flags: Tx flags currently set + * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) @@ -1487,6 +1478,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: return; @@ -1496,18 +1488,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, this_ipv6_hdr = inner_ipv6_hdr(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb); - if (tx_flags & I40E_TX_FLAGS_IPV4) { - - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; } @@ -1519,8 +1510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; if (this_ip_hdr->version == 6) { - tx_flags &= ~I40E_TX_FLAGS_IPV4; - tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags &= ~I40E_TX_FLAGS_IPV4; + *tx_flags |= I40E_TX_FLAGS_IPV6; } @@ -1532,12 +1523,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, } /* Enable IP checksum offloads */ - if (tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_hdr = this_ip_hdr->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; this_ip_hdr->check = 0; } else { @@ -1546,7 +1537,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the td_offset for IP header length */ *td_offset = (network_hdr_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { l4_hdr = this_ipv6_hdr->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; /* Now set the td_offset for IP header length */ @@ -1617,14 +1608,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, * i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @tx_flags: collected send information - * @hdr_len: size of the packet header * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, - const u8 hdr_len) +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) { struct skb_frag_struct *frag; bool linearize = false; @@ -1636,7 +1625,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, gso_segs = skb_shinfo(skb)->gso_segs; if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { - u16 j = 1; + u16 j = 0; if (num_frags < (I40E_MAX_BUFFER_TXD)) goto linearize_chk_done; @@ -1647,21 +1636,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, goto linearize_chk_done; } frag = &skb_shinfo(skb)->frags[0]; - size = hdr_len; /* we might still have more fragments per segment */ do { size += skb_frag_size(frag); frag++; j++; + if ((size >= skb_shinfo(skb)->gso_size) && + (j < I40E_MAX_BUFFER_TXD)) { + size = (size % skb_shinfo(skb)->gso_size); + j = (size) ? 1 : 0; + } if (j == I40E_MAX_BUFFER_TXD) { - if (size < skb_shinfo(skb)->gso_size) { - linearize = true; - break; - } - j = 1; - size -= skb_shinfo(skb)->gso_size; - if (size) - j++; - size += hdr_len; + linearize = true; + break; } num_frags--; } while (num_frags); @@ -1675,7 +1661,44 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, } /** - * i40e_tx_map - Build the Tx descriptor + * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40evf_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40evf_maybe_stop_tx(tx_ring, size); +} + +/** + * i40evf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer * @first: first buffer info buffer to use @@ -1684,9 +1707,9 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); @@ -1792,9 +1815,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->queue_index), first->bytecount); - /* set the timestamp */ - first->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -1811,8 +1831,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - writel(i, tx_ring->tail); + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) + writel(i, tx_ring->tail); return; @@ -1834,44 +1858,7 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, } /** - * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns -EBUSY if a stop is needed, else 0 - **/ -static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Memory barrier before checking head and tail */ - smp_mb(); - - /* Check again in a case another CPU has just made room available. */ - if (likely(I40E_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -/** - * i40e_maybe_stop_tx - 1st level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40e_maybe_stop_tx(tx_ring, size); -} - -/** - * i40e_xmit_descriptor_count - calculate number of tx descriptors needed + * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed * @skb: send buffer * @tx_ring: ring to send buffer on * @@ -1879,8 +1866,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * there is not enough descriptors available in this ring since we need at least * one descriptor. **/ -static int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) +static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) { unsigned int f; int count = 0; @@ -1895,7 +1882,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return 0; } @@ -1921,11 +1908,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u32 td_cmd = 0; u8 hdr_len = 0; int tso; - if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; /* prepare the xmit flags */ - if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; /* obtain protocol of skb */ @@ -1940,7 +1927,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss, &cd_tunneling); if (tso < 0) @@ -1948,7 +1935,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; - if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (i40e_chk_linearize(skb, tx_flags)) if (skb_linearize(skb)) goto out_drop; @@ -1961,17 +1948,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_flags |= I40E_TX_FLAGS_CSUM; - i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); } i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); - i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); - - i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 1e49bb1fbac1..e7a34f899f2c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -138,6 +138,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -145,7 +146,6 @@ enum i40e_dyn_idx_t { struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; - unsigned long time_stamp; union { struct sk_buff *skb; void *raw_buf; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index ec9d83a93379..c463ec41579c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats { /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; + u64 fd_atr_tunnel_match; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e3b9b63ad010..c3a9392cbc19 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, igb->perout[i].start.tv_nsec = rq->perout.start.nsec; igb->perout[i].period.tv_sec = ts.tv_sec; igb->perout[i].period.tv_nsec = ts.tv_nsec; - wr32(trgttiml, rq->perout.start.sec); - wr32(trgttimh, rq->perout.start.nsec); + wr32(trgttimh, rq->perout.start.sec); + wr32(trgttiml, rq->perout.start.nsec); tsauxc |= tsauxc_mask; tsim |= tsim_mask; } else { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 9f6fb19062a0..9a1d0f142b09 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2594,18 +2594,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_fdir_filter *input; union ixgbe_atr_input mask; + u8 queue; int err; if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) return -EOPNOTSUPP; - /* - * Don't allow programming if the action is a queue greater than - * the number of online Rx queues. + /* ring_cookie is a masked into a set of queues and ixgbe pools or + * we use the drop index. */ - if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && - (fsp->ring_cookie >= adapter->num_rx_queues)) - return -EINVAL; + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (!vf && (ring >= adapter->num_rx_queues)) + return -EINVAL; + else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } /* Don't allow indexes to exist outside of available space */ if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { @@ -2683,10 +2700,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, /* program filters to filter memory */ err = ixgbe_fdir_write_perfect_filter_82599(hw, - &input->filter, input->sw_idx, - (input->action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[input->action]->reg_idx); + &input->filter, input->sw_idx, queue); if (err) goto err_out_w_lock; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1c75829eb166..d52639bc491f 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -3125,9 +3125,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) mib_counters_clear(mp); - init_timer(&mp->mib_counters_timer); - mp->mib_counters_timer.data = (unsigned long)mp; - mp->mib_counters_timer.function = mib_counters_timer_wrapper; + setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper, + (unsigned long)mp); mp->mib_counters_timer.expires = jiffies + 30 * HZ; spin_lock_init(&mp->mib_counters_lock); @@ -3136,9 +3135,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT); - init_timer(&mp->rx_oom); - mp->rx_oom.data = (unsigned long)mp; - mp->rx_oom.function = oom_timer_wrapper; + setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 77610452358b..68ae765873a9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -714,8 +714,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, msecs_to_jiffies(timeout))) { mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", op); - err = -EIO; - goto out_reset; + if (op == MLX4_CMD_NOP) { + err = -EBUSY; + goto out; + } else { + err = -EIO; + goto out_reset; + } } err = context->result; @@ -2912,7 +2917,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->mac = mac; - mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", + mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n", vf, port, s_info->mac); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index e71f31387ac6..3348e646db70 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, u64 mtt_addr; int err; - if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) + if (vector >= dev->caps.num_comp_vectors) return -EINVAL; cq->vector = vector; @@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, cq_context->flags |= cpu_to_be32(1 << 19); cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); - cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; + cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); @@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, init_completion(&cq->free); cq->comp = mlx4_add_cq_to_tasklet; cq->tasklet_ctx.priv = - &priv->eq_table.eq[cq->vector].tasklet_ctx; + &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx; INIT_LIST_HEAD(&cq->tasklet_ctx.list); - cq->irq = priv->eq_table.eq[cq->vector].irq; + cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq; return 0; err_radix: @@ -368,7 +368,10 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) if (err) mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); - synchronize_irq(priv->eq_table.eq[cq->vector].irq); + synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); + if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != + priv->eq_table.eq[MLX4_EQ_ASYNC].irq) + synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 22da4d0d0f05..63769df872a4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, cq->ring = ring; cq->is_tx = mode; + cq->vector = mdev->dev->caps.num_comp_vectors; /* Allocate HW buffers on provided NUMA node. * dev->numa_node is used in mtt range allocation flow. @@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int err = 0; char name[25]; int timestamp_en = 0; - struct cpu_rmap *rmap = -#ifdef CONFIG_RFS_ACCEL - priv->dev->rx_cpu_rmap; -#else - NULL; -#endif + bool assigned_eq = false; cq->dev = mdev->pndev[priv->port]; cq->mcq.set_ci_db = cq->wqres.db.db; @@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, memset(cq->buf, 0, cq->buf_size); if (cq->is_tx == RX) { - if (mdev->dev->caps.comp_pool) { - if (!cq->vector) { - sprintf(name, "%s-%d", priv->dev->name, - cq->ring); - /* Set IRQ for specific name (per ring) */ - if (mlx4_assign_eq(mdev->dev, name, rmap, - &cq->vector)) { - cq->vector = (cq->ring + 1 + priv->port) - % mdev->dev->caps.num_comp_vectors; - mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", - name); - } - + if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, + cq->vector)) { + cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); + + err = mlx4_assign_eq(mdev->dev, priv->port, + &cq->vector); + if (err) { + mlx4_err(mdev, "Failed assigning an EQ to %s\n", + name); + goto free_eq; } - } else { - cq->vector = (cq->ring + 1 + priv->port) % - mdev->dev->caps.num_comp_vectors; + + assigned_eq = true; } cq->irq_desc = @@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, cq->vector, 0, timestamp_en); if (err) - return err; + goto free_eq; cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; @@ -168,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, NAPI_POLL_WEIGHT); } else { - struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; - - err = irq_set_affinity_hint(cq->mcq.irq, - ring->affinity_mask); - if (err) - mlx4_warn(mdev, "Failed setting affinity hint\n"); - netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); napi_hash_add(&cq->napi); } @@ -182,6 +167,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, napi_enable(&cq->napi); return 0; + +free_eq: + if (assigned_eq) + mlx4_release_eq(mdev->dev, cq->vector); + cq->vector = mdev->dev->caps.num_comp_vectors; + return err; } void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) @@ -191,9 +182,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); - if (priv->mdev->dev->caps.comp_pool && cq->vector) { + if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && + cq->is_tx == RX) mlx4_release_eq(priv->mdev->dev, cq->vector); - } cq->vector = 0; cq->buf_size = 0; cq->buf = NULL; @@ -207,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) if (!cq->is_tx) { napi_hash_del(&cq->napi); synchronize_rcu(); - irq_set_affinity_hint(cq->mcq.irq, NULL); } netif_napi_del(&cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 32f5ec737472..98efb5842fca 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1501,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) { struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; int numa_node = priv->mdev->dev->numa_node; - int ret = 0; if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) return -ENOMEM; - ret = cpumask_set_cpu_local_first(ring_idx, numa_node, - ring->affinity_mask); - if (ret) - free_cpumask_var(ring->affinity_mask); - - return ret; + cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), + ring->affinity_mask); + return 0; } static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) @@ -1958,7 +1954,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) int i; #ifdef CONFIG_RFS_ACCEL - free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); priv->dev->rx_cpu_rmap = NULL; #endif @@ -2016,11 +2011,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) } #ifdef CONFIG_RFS_ACCEL - if (priv->mdev->dev->caps.comp_pool) { - priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); - if (!priv->dev->rx_cpu_rmap) - goto err; - } + priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port); #endif return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 2a77a6b19121..35f726c17e48 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -337,15 +337,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) struct mlx4_dev *dev = mdev->dev; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { - if (!dev->caps.comp_pool) - num_of_eqs = max_t(int, MIN_RX_RINGS, - min_t(int, - dev->caps.num_comp_vectors, - DEF_RX_RINGS)); - else - num_of_eqs = min_t(int, MAX_MSIX_P_PORT, - dev->caps.comp_pool/ - dev->caps.num_ports) - 1; + num_of_eqs = max_t(int, MIN_RX_RINGS, + min_t(int, + mlx4_get_eqs_per_port(mdev->dev, i), + DEF_RX_RINGS)); num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : min_t(int, num_of_eqs, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f7bf312fb443..7bed3a88579f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -144,9 +144,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->queue_index = queue_index; if (queue_index < priv->num_tx_rings_p_up) - cpumask_set_cpu_local_first(queue_index, - priv->mdev->dev->numa_node, - &ring->affinity_mask); + cpumask_set_cpu(cpumask_local_spread(queue_index, + priv->mdev->dev->numa_node), + &ring->affinity_mask); *pring = ring; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 80bcd648c5e0..aae13adfb492 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -221,6 +221,22 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave, slave_event(dev, slave, eqe); } +#if defined(CONFIG_SMP) +static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) +{ + int hint_err; + struct mlx4_dev *dev = &priv->dev; + struct mlx4_eq *eq = &priv->eq_table.eq[vec]; + + if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) + return; + + hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); + if (hint_err) + mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err); +} +#endif + int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) { struct mlx4_eqe eqe; @@ -895,8 +911,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev) * we need to map, take the difference of highest index and * the lowest index we'll use and add 1. */ - return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + - dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; + return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - + dev->caps.reserved_eqs / 4 + 1; } static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) @@ -1085,32 +1101,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev, static void mlx4_free_irqs(struct mlx4_dev *dev) { struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; - struct mlx4_priv *priv = mlx4_priv(dev); - int i, vec; + int i; if (eq_table->have_irq) free_irq(dev->persist->pdev->irq, dev); for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) if (eq_table->eq[i].have_irq) { + free_cpumask_var(eq_table->eq[i].affinity_mask); +#if defined(CONFIG_SMP) + irq_set_affinity_hint(eq_table->eq[i].irq, NULL); +#endif free_irq(eq_table->eq[i].irq, eq_table->eq + i); eq_table->eq[i].have_irq = 0; } - for (i = 0; i < dev->caps.comp_pool; i++) { - /* - * Freeing the assigned irq's - * all bits should be 0, but we need to validate - */ - if (priv->msix_ctl.pool_bm & 1ULL << i) { - /* NO need protecting*/ - vec = dev->caps.num_comp_vectors + 1 + i; - free_irq(priv->eq_table.eq[vec].irq, - &priv->eq_table.eq[vec]); - } - } - - kfree(eq_table->irq_names); } @@ -1191,76 +1196,73 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) } priv->eq_table.irq_names = - kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + - dev->caps.comp_pool), + kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), GFP_KERNEL); if (!priv->eq_table.irq_names) { err = -ENOMEM; - goto err_out_bitmap; + goto err_out_clr_int; } - for (i = 0; i < dev->caps.num_comp_vectors; ++i) { - err = mlx4_create_eq(dev, dev->caps.num_cqs - - dev->caps.reserved_cqs + - MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, - &priv->eq_table.eq[i]); - if (err) { - --i; - goto err_out_unmap; - } - } - - err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, - &priv->eq_table.eq[dev->caps.num_comp_vectors]); - if (err) - goto err_out_comp; - - /*if additional completion vectors poolsize is 0 this loop will not run*/ - for (i = dev->caps.num_comp_vectors + 1; - i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { + if (i == MLX4_EQ_ASYNC) { + err = mlx4_create_eq(dev, + MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, + 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]); + } else { + struct mlx4_eq *eq = &priv->eq_table.eq[i]; +#ifdef CONFIG_RFS_ACCEL + int port = find_first_bit(eq->actv_ports.ports, + dev->caps.num_ports) + 1; + + if (port <= dev->caps.num_ports) { + struct mlx4_port_info *info = + &mlx4_priv(dev)->port[port]; + + if (!info->rmap) { + info->rmap = alloc_irq_cpu_rmap( + mlx4_get_eqs_per_port(dev, port)); + if (!info->rmap) { + mlx4_warn(dev, "Failed to allocate cpu rmap\n"); + err = -ENOMEM; + goto err_out_unmap; + } + } - err = mlx4_create_eq(dev, dev->caps.num_cqs - - dev->caps.reserved_cqs + - MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, - &priv->eq_table.eq[i]); - if (err) { - --i; - goto err_out_unmap; + err = irq_cpu_rmap_add( + info->rmap, eq->irq); + if (err) + mlx4_warn(dev, "Failed adding irq rmap\n"); + } +#endif + err = mlx4_create_eq(dev, dev->caps.num_cqs - + dev->caps.reserved_cqs + + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? + i + 1 - !!(i > MLX4_EQ_ASYNC) : 0, + eq); } + if (err) + goto err_out_unmap; } - if (dev->flags & MLX4_FLAG_MSI_X) { const char *eq_name; - for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { - if (i < dev->caps.num_comp_vectors) { - snprintf(priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, - "mlx4-comp-%d@pci:%s", i, - pci_name(dev->persist->pdev)); - } else { - snprintf(priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, - "mlx4-async@pci:%s", - pci_name(dev->persist->pdev)); - } + snprintf(priv->eq_table.irq_names + + MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, + "mlx4-async@pci:%s", + pci_name(dev->persist->pdev)); + eq_name = priv->eq_table.irq_names + + MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE; - eq_name = priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE; - err = request_irq(priv->eq_table.eq[i].irq, - mlx4_msi_x_interrupt, 0, eq_name, - priv->eq_table.eq + i); - if (err) - goto err_out_async; + err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq, + mlx4_msi_x_interrupt, 0, eq_name, + priv->eq_table.eq + MLX4_EQ_ASYNC); + if (err) + goto err_out_unmap; - priv->eq_table.eq[i].have_irq = 1; - } + priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1; } else { snprintf(priv->eq_table.irq_names, MLX4_IRQNAME_SIZE, @@ -1269,36 +1271,38 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, IRQF_SHARED, priv->eq_table.irq_names, dev); if (err) - goto err_out_async; + goto err_out_unmap; priv->eq_table.have_irq = 1; } err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); if (err) mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); - for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) - eq_set_ci(&priv->eq_table.eq[i], 1); + /* arm ASYNC eq */ + eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1); return 0; -err_out_async: - mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); - -err_out_comp: - i = dev->caps.num_comp_vectors - 1; - err_out_unmap: - while (i >= 0) { - mlx4_free_eq(dev, &priv->eq_table.eq[i]); - --i; + while (i >= 0) + mlx4_free_eq(dev, &priv->eq_table.eq[i--]); +#ifdef CONFIG_RFS_ACCEL + for (i = 1; i <= dev->caps.num_ports; i++) { + if (mlx4_priv(dev)->port[i].rmap) { + free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); + mlx4_priv(dev)->port[i].rmap = NULL; + } } +#endif + mlx4_free_irqs(dev); + +err_out_clr_int: if (!mlx4_is_slave(dev)) mlx4_unmap_clr_int(dev); - mlx4_free_irqs(dev); err_out_bitmap: mlx4_unmap_uar(dev); @@ -1316,11 +1320,19 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) int i; mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); +#ifdef CONFIG_RFS_ACCEL + for (i = 1; i <= dev->caps.num_ports; i++) { + if (mlx4_priv(dev)->port[i].rmap) { + free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); + mlx4_priv(dev)->port[i].rmap = NULL; + } + } +#endif mlx4_free_irqs(dev); - for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) mlx4_free_eq(dev, &priv->eq_table.eq[i]); if (!mlx4_is_slave(dev)) @@ -1371,87 +1383,169 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) /* Return to default */ mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); return err; } EXPORT_SYMBOL(mlx4_test_interrupts); -int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, - int *vector) +bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + vector = MLX4_CQ_TO_EQ_VECTOR(vector); + if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) || + (vector == MLX4_EQ_ASYNC)) + return false; + + return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports); +} +EXPORT_SYMBOL(mlx4_is_eq_vector_valid); + +u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + unsigned int i; + unsigned int sum = 0; + + for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) + sum += !!test_bit(port - 1, + priv->eq_table.eq[i].actv_ports.ports); + + return sum; +} +EXPORT_SYMBOL(mlx4_get_eqs_per_port); + +int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + vector = MLX4_CQ_TO_EQ_VECTOR(vector); + if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1)) + return -EINVAL; + + return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports, + dev->caps.num_ports) > 1); +} +EXPORT_SYMBOL(mlx4_is_eq_shared); + +struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port) { + return mlx4_priv(dev)->port[port].rmap; +} +EXPORT_SYMBOL(mlx4_get_cpu_rmap); +int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector) +{ struct mlx4_priv *priv = mlx4_priv(dev); - int vec = 0, err = 0, i; + int err = 0, i = 0; + u32 min_ref_count_val = (u32)-1; + int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector); + int *prequested_vector = NULL; + mutex_lock(&priv->msix_ctl.pool_lock); - for (i = 0; !vec && i < dev->caps.comp_pool; i++) { - if (~priv->msix_ctl.pool_bm & 1ULL << i) { - priv->msix_ctl.pool_bm |= 1ULL << i; - vec = dev->caps.num_comp_vectors + 1 + i; - snprintf(priv->eq_table.irq_names + - vec * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, "%s", name); -#ifdef CONFIG_RFS_ACCEL - if (rmap) { - err = irq_cpu_rmap_add(rmap, - priv->eq_table.eq[vec].irq); - if (err) - mlx4_warn(dev, "Failed adding irq rmap\n"); + if (requested_vector < (dev->caps.num_comp_vectors + 1) && + (requested_vector >= 0) && + (requested_vector != MLX4_EQ_ASYNC)) { + if (test_bit(port - 1, + priv->eq_table.eq[requested_vector].actv_ports.ports)) { + prequested_vector = &requested_vector; + } else { + struct mlx4_eq *eq; + + for (i = 1; i < port; + requested_vector += mlx4_get_eqs_per_port(dev, i++)) + ; + + eq = &priv->eq_table.eq[requested_vector]; + if (requested_vector < dev->caps.num_comp_vectors + 1 && + test_bit(port - 1, eq->actv_ports.ports)) { + prequested_vector = &requested_vector; } -#endif - err = request_irq(priv->eq_table.eq[vec].irq, - mlx4_msi_x_interrupt, 0, - &priv->eq_table.irq_names[vec<<5], - priv->eq_table.eq + vec); - if (err) { - /*zero out bit by fliping it*/ - priv->msix_ctl.pool_bm ^= 1 << i; - vec = 0; - continue; - /*we dont want to break here*/ + } + } + + if (!prequested_vector) { + requested_vector = -1; + for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1; + i++) { + struct mlx4_eq *eq = &priv->eq_table.eq[i]; + + if (min_ref_count_val > eq->ref_count && + test_bit(port - 1, eq->actv_ports.ports)) { + min_ref_count_val = eq->ref_count; + requested_vector = i; } + } + + if (requested_vector < 0) { + err = -ENOSPC; + goto err_unlock; + } + + prequested_vector = &requested_vector; + } + + if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) && + dev->flags & MLX4_FLAG_MSI_X) { + set_bit(*prequested_vector, priv->msix_ctl.pool_bm); + snprintf(priv->eq_table.irq_names + + *prequested_vector * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, "mlx4-%d@%s", + *prequested_vector, dev_name(&dev->persist->pdev->dev)); + + err = request_irq(priv->eq_table.eq[*prequested_vector].irq, + mlx4_msi_x_interrupt, 0, + &priv->eq_table.irq_names[*prequested_vector << 5], + priv->eq_table.eq + *prequested_vector); - eq_set_ci(&priv->eq_table.eq[vec], 1); + if (err) { + clear_bit(*prequested_vector, priv->msix_ctl.pool_bm); + *prequested_vector = -1; + } else { +#if defined(CONFIG_SMP) + mlx4_set_eq_affinity_hint(priv, *prequested_vector); +#endif + eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1); + priv->eq_table.eq[*prequested_vector].have_irq = 1; } } + + if (!err && *prequested_vector >= 0) + priv->eq_table.eq[*prequested_vector].ref_count++; + +err_unlock: mutex_unlock(&priv->msix_ctl.pool_lock); - if (vec) { - *vector = vec; - } else { + if (!err && *prequested_vector >= 0) + *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector); + else *vector = 0; - err = (i == dev->caps.comp_pool) ? -ENOSPC : err; - } + return err; } EXPORT_SYMBOL(mlx4_assign_eq); -int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec) +int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec) { struct mlx4_priv *priv = mlx4_priv(dev); - return priv->eq_table.eq[vec].irq; + return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq; } EXPORT_SYMBOL(mlx4_eq_get_irq); void mlx4_release_eq(struct mlx4_dev *dev, int vec) { struct mlx4_priv *priv = mlx4_priv(dev); - /*bm index*/ - int i = vec - dev->caps.num_comp_vectors - 1; - - if (likely(i >= 0)) { - /*sanity check , making sure were not trying to free irq's - Belonging to a legacy EQ*/ - mutex_lock(&priv->msix_ctl.pool_lock); - if (priv->msix_ctl.pool_bm & 1ULL << i) { - free_irq(priv->eq_table.eq[vec].irq, - &priv->eq_table.eq[vec]); - priv->msix_ctl.pool_bm &= ~(1ULL << i); - } - mutex_unlock(&priv->msix_ctl.pool_lock); - } + int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec); + mutex_lock(&priv->msix_ctl.pool_lock); + priv->eq_table.eq[eq_vec].ref_count--; + + /* once we allocated EQ, we don't release it because it might be binded + * to cpu_rmap. + */ + mutex_unlock(&priv->msix_ctl.pool_lock); } EXPORT_SYMBOL(mlx4_release_eq); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 70d33f6e2a41..7d57777e65c5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2364,11 +2364,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) if (err) { if (dev->flags & MLX4_FLAG_MSI_X) { mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", - priv->eq_table.eq[dev->caps.num_comp_vectors].irq); + priv->eq_table.eq[MLX4_EQ_ASYNC].irq); mlx4_warn(dev, "Trying again without MSI-X\n"); } else { mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", - priv->eq_table.eq[dev->caps.num_comp_vectors].irq); + priv->eq_table.eq[MLX4_EQ_ASYNC].irq); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } @@ -2481,14 +2481,45 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) return err; } +static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) +{ + int requested_cpu = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_eq *eq; + int off = 0; + int i; + + if (eqn > dev->caps.num_comp_vectors) + return -EINVAL; + + for (i = 1; i < port; i++) + off += mlx4_get_eqs_per_port(dev, i); + + requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); + + /* Meaning EQs are shared, and this call comes from the second port */ + if (requested_cpu < 0) + return 0; + + eq = &priv->eq_table.eq[eqn]; + + if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_set_cpu(requested_cpu, eq->affinity_mask); + + return 0; +} + static void mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; int i; + int port = 0; if (msi_x) { - int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; + int nreq = dev->caps.num_ports * num_online_cpus() + 1; nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); @@ -2503,20 +2534,55 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, nreq); - if (nreq < 0) { + if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { kfree(entries); goto no_msi; - } else if (nreq < MSIX_LEGACY_SZ + - dev->caps.num_ports * MIN_MSIX_P_PORT) { - /*Working in legacy mode , all EQ's shared*/ - dev->caps.comp_pool = 0; - dev->caps.num_comp_vectors = nreq - 1; - } else { - dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; - dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; } - for (i = 0; i < nreq; ++i) - priv->eq_table.eq[i].irq = entries[i].vector; + /* 1 is reserved for events (asyncrounous EQ) */ + dev->caps.num_comp_vectors = nreq - 1; + + priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; + bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, + dev->caps.num_ports); + + for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { + if (i == MLX4_EQ_ASYNC) + continue; + + priv->eq_table.eq[i].irq = + entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; + + if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { + bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, + dev->caps.num_ports); + /* We don't set affinity hint when there + * aren't enough EQs + */ + } else { + set_bit(port, + priv->eq_table.eq[i].actv_ports.ports); + if (mlx4_init_affinity_hint(dev, port + 1, i)) + mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", + i); + } + /* We divide the Eqs evenly between the two ports. + * (dev->caps.num_comp_vectors / dev->caps.num_ports) + * refers to the number of Eqs per port + * (i.e eqs_per_port). Theoretically, we would like to + * write something like (i + 1) % eqs_per_port == 0. + * However, since there's an asynchronous Eq, we have + * to skip over it by comparing this condition to + * !!((i + 1) > MLX4_EQ_ASYNC). + */ + if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && + ((i + 1) % + (dev->caps.num_comp_vectors / dev->caps.num_ports)) == + !!((i + 1) > MLX4_EQ_ASYNC)) + /* If dev->caps.num_comp_vectors < dev->caps.num_ports, + * everything is shared anyway. + */ + port++; + } dev->flags |= MLX4_FLAG_MSI_X; @@ -2526,10 +2592,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) no_msi: dev->caps.num_comp_vectors = 1; - dev->caps.comp_pool = 0; - for (i = 0; i < 2; ++i) + BUG_ON(MLX4_EQ_ASYNC >= 2); + for (i = 0; i < 2; ++i) { priv->eq_table.eq[i].irq = dev->persist->pdev->irq; + if (i != MLX4_EQ_ASYNC) { + bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, + dev->caps.num_ports); + } + } } static int mlx4_init_port_info(struct mlx4_dev *dev, int port) @@ -2594,6 +2665,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); device_remove_file(&info->dev->persist->pdev->dev, &info->port_mtu_attr); +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(info->rmap); + info->rmap = NULL; +#endif } static int mlx4_init_steering(struct mlx4_dev *dev) @@ -2749,6 +2824,7 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, free_mem: dev->persist->num_vfs = 0; kfree(dev->dev_vfs); + dev->dev_vfs = NULL; return dev_flags & ~MLX4_FLAG_MASTER; } @@ -2900,6 +2976,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, existing_vfs, reset_flow); + mlx4_close_fw(dev); mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); dev->flags = dev_flags; if (!SRIOV_VALID_STATE(dev->flags)) { @@ -3024,7 +3101,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, if (err) goto err_master_mfunc; - priv->msix_ctl.pool_bm = 0; + bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); mutex_init(&priv->msix_ctl.pool_lock); mlx4_enable_msi_x(dev); @@ -3046,7 +3123,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, !mlx4_is_mfunc(dev)) { dev->flags &= ~MLX4_FLAG_MSI_X; dev->caps.num_comp_vectors = 1; - dev->caps.comp_pool = 0; pci_disable_msix(pdev); err = mlx4_setup_hca(dev); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 502d3dd2c888..f424900d23a6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -287,6 +287,12 @@ struct mlx4_icm_table { #define MLX4_CQE_SIZE_MASK_STRIDE 0x3 #define MLX4_EQE_SIZE_MASK_STRIDE 0x30 +#define MLX4_EQ_ASYNC 0 +#define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \ + !!((int)(vector) >= MLX4_EQ_ASYNC)) +#define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \ + !!((int)(vector) >= MLX4_EQ_ASYNC)) + /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ @@ -391,6 +397,9 @@ struct mlx4_eq { struct mlx4_buf_list *page_list; struct mlx4_mtt mtt; struct mlx4_eq_tasklet tasklet_ctx; + struct mlx4_active_ports actv_ports; + u32 ref_count; + cpumask_var_t affinity_mask; }; struct mlx4_slave_eqe { @@ -808,6 +817,7 @@ struct mlx4_port_info { struct mlx4_vlan_table vlan_table; struct mlx4_roce_gid_table gid_table; int base_qpn; + struct cpu_rmap *rmap; }; struct mlx4_sense { @@ -818,7 +828,7 @@ struct mlx4_sense { }; struct mlx4_msix_ctl { - u64 pool_bm; + DECLARE_BITMAP(pool_bm, MAX_MSIX); struct mutex pool_lock; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d021f079f181..edd8fd69ec9a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -338,7 +338,7 @@ struct mlx4_en_cq { struct napi_struct napi; int size; int buf_size; - unsigned vector; + int vector; enum cq_type is_tx; u16 moder_time; u16 moder_cnt; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 15ec08181658..ab48386bfefc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3973,6 +3973,22 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, return 0; } +static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, + struct _rule_hw *eth_header) +{ + if (is_multicast_ether_addr(eth_header->eth.dst_mac) || + is_broadcast_ether_addr(eth_header->eth.dst_mac)) { + struct mlx4_net_trans_rule_hw_eth *eth = + (struct mlx4_net_trans_rule_hw_eth *)eth_header; + struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1); + bool last_rule = next_rule->size == 0 && next_rule->id == 0 && + next_rule->rsvd == 0; + + if (last_rule) + ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC); + } +} + /* * In case of missing eth header, append eth header with a MAC address * assigned to the VF. @@ -4125,6 +4141,12 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, rule_header = (struct _rule_hw *)(ctrl + 1); header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); + if (header_id == MLX4_NET_TRANS_RULE_ID_ETH) + handle_eth_header_mcast_prio(ctrl, rule_header); + + if (slave == dev->caps.function) + goto execute; + switch (header_id) { case MLX4_NET_TRANS_RULE_ID_ETH: if (validate_eth_header_mac(slave, rule_header, rlist)) { @@ -4151,6 +4173,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, goto err_put; } +execute: err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, vhcr->in_modifier, 0, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 8ff57e8e3e91..158c88c69ef9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -3,6 +3,18 @@ # config MLX5_CORE - tristate + tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver" depends on PCI default n + ---help--- + Core driver for low level functionality of the ConnectX-4 and + Connect-IB cards by Mellanox Technologies. + +config MLX5_CORE_EN + bool "Mellanox Technologies ConnectX-4 Ethernet support" + depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE + default n + ---help--- + Ethernet support in Mellanox Technologies ConnectX-4 NIC. + Ethernet and Infiniband support in ConnectX-4 are currently mutually + exclusive. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 105780bb980b..26a68b8af2c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -2,4 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o + mad.o transobj.o vport.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \ + en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \ + en_txrx.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index ac0f7bf4be95..0715b497511f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -42,95 +42,36 @@ #include "mlx5_core.h" /* Handling for queue buffers -- we allocate a bunch of memory and - * register it in a memory region at HCA virtual address 0. If the - * requested size is > max_direct, we split the allocation into - * multiple pages, so we don't require too much contiguous memory. + * register it in a memory region at HCA virtual address 0. */ -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, - struct mlx5_buf *buf) +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) { dma_addr_t t; buf->size = size; - if (size <= max_direct) { - buf->nbufs = 1; - buf->npages = 1; - buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; - buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, - size, &t, GFP_KERNEL); - if (!buf->direct.buf) - return -ENOMEM; - - buf->direct.map = t; - - while (t & ((1 << buf->page_shift) - 1)) { - --buf->page_shift; - buf->npages *= 2; - } - } else { - int i; - - buf->direct.buf = NULL; - buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; - buf->npages = buf->nbufs; - buf->page_shift = PAGE_SHIFT; - buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), - GFP_KERNEL); - if (!buf->page_list) - return -ENOMEM; - - for (i = 0; i < buf->nbufs; i++) { - buf->page_list[i].buf = - dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE, - &t, GFP_KERNEL); - if (!buf->page_list[i].buf) - goto err_free; - - buf->page_list[i].map = t; - } - - if (BITS_PER_LONG == 64) { - struct page **pages; - pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL); - if (!pages) - goto err_free; - for (i = 0; i < buf->nbufs; i++) - pages[i] = virt_to_page(buf->page_list[i].buf); - buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); - kfree(pages); - if (!buf->direct.buf) - goto err_free; - } - } + buf->npages = 1; + buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); + if (!buf->direct.buf) + return -ENOMEM; - return 0; + buf->direct.map = t; -err_free: - mlx5_buf_free(dev, buf); + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } - return -ENOMEM; + return 0; } EXPORT_SYMBOL_GPL(mlx5_buf_alloc); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) { - int i; - - if (buf->nbufs == 1) - dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, - buf->direct.map); - else { - if (BITS_PER_LONG == 64) - vunmap(buf->direct.buf); - - for (i = 0; i < buf->nbufs; i++) - if (buf->page_list[i].buf) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - buf->page_list[i].buf, - buf->page_list[i].map); - kfree(buf->page_list); - } + dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); } EXPORT_SYMBOL_GPL(mlx5_buf_free); @@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) int i; for (i = 0; i < buf->npages; i++) { - if (buf->nbufs == 1) - addr = buf->direct.map + (i << buf->page_shift); - else - addr = buf->page_list[i].map; + addr = buf->direct.map + (i << buf->page_shift); pas[i] = cpu_to_be64(addr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e3273faf4568..75ff58dc1ff5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -75,25 +75,6 @@ enum { MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, }; -enum { - MLX5_CMD_STAT_OK = 0x0, - MLX5_CMD_STAT_INT_ERR = 0x1, - MLX5_CMD_STAT_BAD_OP_ERR = 0x2, - MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, - MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, - MLX5_CMD_STAT_BAD_RES_ERR = 0x5, - MLX5_CMD_STAT_RES_BUSY = 0x6, - MLX5_CMD_STAT_LIM_ERR = 0x8, - MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, - MLX5_CMD_STAT_IX_ERR = 0xa, - MLX5_CMD_STAT_NO_RES_ERR = 0xf, - MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, - MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, - MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, - MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, - MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, -}; - static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, @@ -390,8 +371,17 @@ const char *mlx5_command_str(int command) case MLX5_CMD_OP_ARM_RQ: return "ARM_RQ"; - case MLX5_CMD_OP_RESIZE_SRQ: - return "RESIZE_SRQ"; + case MLX5_CMD_OP_CREATE_XRC_SRQ: + return "CREATE_XRC_SRQ"; + + case MLX5_CMD_OP_DESTROY_XRC_SRQ: + return "DESTROY_XRC_SRQ"; + + case MLX5_CMD_OP_QUERY_XRC_SRQ: + return "QUERY_XRC_SRQ"; + + case MLX5_CMD_OP_ARM_XRC_SRQ: + return "ARM_XRC_SRQ"; case MLX5_CMD_OP_ALLOC_PD: return "ALLOC_PD"; @@ -408,8 +398,8 @@ const char *mlx5_command_str(int command) case MLX5_CMD_OP_ATTACH_TO_MCG: return "ATTACH_TO_MCG"; - case MLX5_CMD_OP_DETACH_FROM_MCG: - return "DETACH_FROM_MCG"; + case MLX5_CMD_OP_DETTACH_FROM_MCG: + return "DETTACH_FROM_MCG"; case MLX5_CMD_OP_ALLOC_XRCD: return "ALLOC_XRCD"; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index eb0cf81f5f45..04ab7e445eae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -219,6 +219,24 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, } EXPORT_SYMBOL(mlx5_core_modify_cq); +int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, + struct mlx5_core_cq *cq, + u16 cq_period, + u16 cq_max_count) +{ + struct mlx5_modify_cq_mbox_in in; + + memset(&in, 0, sizeof(in)); + + in.cqn = cpu_to_be32(cq->cqn); + in.ctx.cq_period = cpu_to_be16(cq_period); + in.ctx.cq_max_count = cpu_to_be16(cq_max_count); + in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD | + MLX5_CQ_MODIFY_COUNT); + + return mlx5_core_modify_cq(dev, cq, &in, sizeof(in)); +} + int mlx5_init_cq_table(struct mlx5_core_dev *dev) { struct mlx5_cq_table *table = &dev->priv.cq_table; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h new file mode 100644 index 000000000000..e9edb7210de1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "wq.h" +#include "transobj.h" +#include "mlx5_core.h" + +#define MLX5E_MAX_NUM_TC 8 + +#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7 +#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd + +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7 +#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd + +#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024) +#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 +#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 +#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 +#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7 +#define MLX5E_PARAMS_MIN_MTU 46 + +#define MLX5E_TX_CQ_POLL_BUDGET 128 +#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ + +static const char vport_strings[][ETH_GSTRING_LEN] = { + /* vport statistics */ + "rx_packets", + "rx_bytes", + "tx_packets", + "tx_bytes", + "rx_error_packets", + "rx_error_bytes", + "tx_error_packets", + "tx_error_bytes", + "rx_unicast_packets", + "rx_unicast_bytes", + "tx_unicast_packets", + "tx_unicast_bytes", + "rx_multicast_packets", + "rx_multicast_bytes", + "tx_multicast_packets", + "tx_multicast_bytes", + "rx_broadcast_packets", + "rx_broadcast_bytes", + "tx_broadcast_packets", + "tx_broadcast_bytes", + + /* SW counters */ + "tso_packets", + "tso_bytes", + "lro_packets", + "lro_bytes", + "rx_csum_good", + "rx_csum_none", + "tx_csum_offload", + "tx_queue_stopped", + "tx_queue_wake", + "tx_queue_dropped", + "rx_wqe_err", +}; + +struct mlx5e_vport_stats { + /* HW counters */ + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 rx_error_packets; + u64 rx_error_bytes; + u64 tx_error_packets; + u64 tx_error_bytes; + u64 rx_unicast_packets; + u64 rx_unicast_bytes; + u64 tx_unicast_packets; + u64 tx_unicast_bytes; + u64 rx_multicast_packets; + u64 rx_multicast_bytes; + u64 tx_multicast_packets; + u64 tx_multicast_bytes; + u64 rx_broadcast_packets; + u64 rx_broadcast_bytes; + u64 tx_broadcast_packets; + u64 tx_broadcast_bytes; + + /* SW counters */ + u64 tso_packets; + u64 tso_bytes; + u64 lro_packets; + u64 lro_bytes; + u64 rx_csum_good; + u64 rx_csum_none; + u64 tx_csum_offload; + u64 tx_queue_stopped; + u64 tx_queue_wake; + u64 tx_queue_dropped; + u64 rx_wqe_err; + +#define NUM_VPORT_COUNTERS 31 +}; + +static const char rq_stats_strings[][ETH_GSTRING_LEN] = { + "packets", + "csum_none", + "lro_packets", + "lro_bytes", + "wqe_err" +}; + +struct mlx5e_rq_stats { + u64 packets; + u64 csum_none; + u64 lro_packets; + u64 lro_bytes; + u64 wqe_err; +#define NUM_RQ_STATS 5 +}; + +static const char sq_stats_strings[][ETH_GSTRING_LEN] = { + "packets", + "tso_packets", + "tso_bytes", + "csum_offload_none", + "stopped", + "wake", + "dropped", + "nop" +}; + +struct mlx5e_sq_stats { + u64 packets; + u64 tso_packets; + u64 tso_bytes; + u64 csum_offload_none; + u64 stopped; + u64 wake; + u64 dropped; + u64 nop; +#define NUM_SQ_STATS 8 +}; + +struct mlx5e_stats { + struct mlx5e_vport_stats vport; +}; + +struct mlx5e_params { + u8 log_sq_size; + u8 log_rq_size; + u16 num_channels; + u8 default_vlan_prio; + u8 num_tc; + u16 rx_cq_moderation_usec; + u16 rx_cq_moderation_pkts; + u16 tx_cq_moderation_usec; + u16 tx_cq_moderation_pkts; + u16 min_rx_wqes; + u16 rx_hash_log_tbl_sz; + bool lro_en; + u32 lro_wqe_sz; +}; + +enum { + MLX5E_RQ_STATE_POST_WQES_ENABLE, +}; + +enum cq_flags { + MLX5E_CQ_HAS_CQES = 1, +}; + +struct mlx5e_cq { + /* data path - accessed per cqe */ + struct mlx5_cqwq wq; + void *sqrq; + unsigned long flags; + + /* data path - accessed per napi poll */ + struct napi_struct *napi; + struct mlx5_core_cq mcq; + struct mlx5e_channel *channel; + + /* control */ + struct mlx5_wq_ctrl wq_ctrl; +} ____cacheline_aligned_in_smp; + +struct mlx5e_rq { + /* data path */ + struct mlx5_wq_ll wq; + u32 wqe_sz; + struct sk_buff **skb; + + struct device *pdev; + struct net_device *netdev; + struct mlx5e_rq_stats stats; + struct mlx5e_cq cq; + + unsigned long state; + int ix; + + /* control */ + struct mlx5_wq_ctrl wq_ctrl; + u32 rqn; + struct mlx5e_channel *channel; +} ____cacheline_aligned_in_smp; + +struct mlx5e_tx_skb_cb { + u32 num_bytes; + u8 num_wqebbs; + u8 num_dma; +}; + +#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) + +struct mlx5e_sq_dma { + dma_addr_t addr; + u32 size; +}; + +enum { + MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, +}; + +struct mlx5e_sq { + /* data path */ + + /* dirtied @completion */ + u16 cc; + u32 dma_fifo_cc; + + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + u32 dma_fifo_pc; + u32 bf_offset; + struct mlx5e_sq_stats stats; + + struct mlx5e_cq cq; + + /* pointers to per packet info: write@xmit, read@completion */ + struct sk_buff **skb; + struct mlx5e_sq_dma *dma_fifo; + + /* read only */ + struct mlx5_wq_cyc wq; + u32 dma_fifo_mask; + void __iomem *uar_map; + struct netdev_queue *txq; + u32 sqn; + u32 bf_buf_size; + struct device *pdev; + __be32 mkey_be; + unsigned long state; + + /* control path */ + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5_uar uar; + struct mlx5e_channel *channel; + int tc; +} ____cacheline_aligned_in_smp; + +static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) +{ + return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) || + (sq->cc == sq->pc)); +} + +enum channel_flags { + MLX5E_CHANNEL_NAPI_SCHED = 1, +}; + +struct mlx5e_channel { + /* data path */ + struct mlx5e_rq rq; + struct mlx5e_sq sq[MLX5E_MAX_NUM_TC]; + struct napi_struct napi; + struct device *pdev; + struct net_device *netdev; + __be32 mkey_be; + u8 num_tc; + unsigned long flags; + + /* control */ + struct mlx5e_priv *priv; + int ix; + int cpu; +}; + +enum mlx5e_traffic_types { + MLX5E_TT_IPV4_TCP = 0, + MLX5E_TT_IPV6_TCP = 1, + MLX5E_TT_IPV4_UDP = 2, + MLX5E_TT_IPV6_UDP = 3, + MLX5E_TT_IPV4 = 4, + MLX5E_TT_IPV6 = 5, + MLX5E_TT_ANY = 6, + MLX5E_NUM_TT = 7, +}; + +enum { + MLX5E_RQT_SPREADING = 0, + MLX5E_RQT_DEFAULT_RQ = 1, + MLX5E_NUM_RQT = 2, +}; + +struct mlx5e_eth_addr_info { + u8 addr[ETH_ALEN + 2]; + u32 tt_vec; + u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */ +}; + +#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE) + +struct mlx5e_eth_addr_db { + struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE]; + struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE]; + struct mlx5e_eth_addr_info broadcast; + struct mlx5e_eth_addr_info allmulti; + struct mlx5e_eth_addr_info promisc; + bool broadcast_enabled; + bool allmulti_enabled; + bool promisc_enabled; +}; + +enum { + MLX5E_STATE_ASYNC_EVENTS_ENABLE, + MLX5E_STATE_OPENED, +}; + +struct mlx5e_vlan_db { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 active_vlans_ft_ix[VLAN_N_VID]; + u32 untagged_rule_ft_ix; + u32 any_vlan_rule_ft_ix; + bool filter_disabled; +}; + +struct mlx5e_flow_table { + void *vlan; + void *main; +}; + +struct mlx5e_priv { + /* priv data path fields - start */ + int order_base_2_num_channels; + int queue_mapping_channel_mask; + int num_tc; + int default_vlan_prio; + /* priv data path fields - end */ + + unsigned long state; + struct mutex state_lock; /* Protects Interface state */ + struct mlx5_uar cq_uar; + u32 pdn; + struct mlx5_core_mr mr; + + struct mlx5e_channel **channel; + u32 tisn[MLX5E_MAX_NUM_TC]; + u32 rqtn; + u32 tirn[MLX5E_NUM_TT]; + + struct mlx5e_flow_table ft; + struct mlx5e_eth_addr_db eth_addr; + struct mlx5e_vlan_db vlan; + + struct mlx5e_params params; + spinlock_t async_events_spinlock; /* sync hw events */ + struct work_struct update_carrier_work; + struct work_struct set_rx_mode_work; + struct delayed_work update_stats_work; + + struct mlx5_core_dev *mdev; + struct net_device *netdev; + struct mlx5e_stats stats; +}; + +#define MLX5E_NET_IP_ALIGN 2 + +struct mlx5e_tx_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_eth_seg eth; +}; + +struct mlx5e_rx_wqe { + struct mlx5_wqe_srq_next_seg next; + struct mlx5_wqe_data_seg data; +}; + +enum mlx5e_link_mode { + MLX5E_1000BASE_CX_SGMII = 0, + MLX5E_1000BASE_KX = 1, + MLX5E_10GBASE_CX4 = 2, + MLX5E_10GBASE_KX4 = 3, + MLX5E_10GBASE_KR = 4, + MLX5E_20GBASE_KR2 = 5, + MLX5E_40GBASE_CR4 = 6, + MLX5E_40GBASE_KR4 = 7, + MLX5E_56GBASE_R4 = 8, + MLX5E_10GBASE_CR = 12, + MLX5E_10GBASE_SR = 13, + MLX5E_10GBASE_ER = 14, + MLX5E_40GBASE_SR4 = 15, + MLX5E_40GBASE_LR4 = 16, + MLX5E_100GBASE_CR4 = 20, + MLX5E_100GBASE_SR4 = 21, + MLX5E_100GBASE_KR4 = 22, + MLX5E_100GBASE_LR4 = 23, + MLX5E_100BASE_TX = 24, + MLX5E_100BASE_T = 25, + MLX5E_10GBASE_T = 26, + MLX5E_25GBASE_CR = 27, + MLX5E_25GBASE_KR = 28, + MLX5E_25GBASE_SR = 29, + MLX5E_50GBASE_CR2 = 30, + MLX5E_50GBASE_KR2 = 31, + MLX5E_LINK_MODES_NUMBER, +}; + +#define MLX5E_PROT_MASK(link_mode) (1 << link_mode) + +u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback); +netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev); + +void mlx5e_completion_event(struct mlx5_core_cq *mcq); +void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); +int mlx5e_napi_poll(struct napi_struct *napi, int budget); +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq); +bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); +bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); +struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); + +void mlx5e_update_stats(struct mlx5e_priv *priv); + +int mlx5e_open_flow_table(struct mlx5e_priv *priv); +void mlx5e_close_flow_table(struct mlx5e_priv *priv); +void mlx5e_init_eth_addr(struct mlx5e_priv *priv); +void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv); +void mlx5e_set_rx_mode_work(struct work_struct *work); + +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); +void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); +int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv); +void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv); + +int mlx5e_open_locked(struct net_device *netdev); +int mlx5e_close_locked(struct net_device *netdev); +int mlx5e_update_priv_params(struct mlx5e_priv *priv, + struct mlx5e_params *new_params); + +static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, + struct mlx5e_tx_wqe *wqe) +{ + /* ensure wqe is visible to device before updating doorbell record */ + dma_wmb(); + + *sq->wq.db = cpu_to_be32(sq->pc); + + /* ensure doorbell record is visible to device before ringing the + * doorbell + */ + wmb(); + + mlx5_write64((__be32 *)&wqe->ctrl, + sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset, + NULL); + + sq->bf_offset ^= sq->bf_buf_size; +} + +static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) +{ + struct mlx5_core_cq *mcq; + + mcq = &cq->mcq; + mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); +} + +extern const struct ethtool_ops mlx5e_ethtool_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c new file mode 100644 index 000000000000..388938482ff9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -0,0 +1,679 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +static void mlx5e_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")", + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev)); + strlcpy(drvinfo->bus_info, pci_name(mdev->pdev), + sizeof(drvinfo->bus_info)); +} + +static const struct { + u32 supported; + u32 advertised; + u32 speed; +} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = { + [MLX5E_1000BASE_CX_SGMII] = { + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .speed = 1000, + }, + [MLX5E_1000BASE_KX] = { + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .speed = 1000, + }, + [MLX5E_10GBASE_CX4] = { + .supported = SUPPORTED_10000baseKX4_Full, + .advertised = ADVERTISED_10000baseKX4_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_KX4] = { + .supported = SUPPORTED_10000baseKX4_Full, + .advertised = ADVERTISED_10000baseKX4_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_KR] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_20GBASE_KR2] = { + .supported = SUPPORTED_20000baseKR2_Full, + .advertised = ADVERTISED_20000baseKR2_Full, + .speed = 20000, + }, + [MLX5E_40GBASE_CR4] = { + .supported = SUPPORTED_40000baseCR4_Full, + .advertised = ADVERTISED_40000baseCR4_Full, + .speed = 40000, + }, + [MLX5E_40GBASE_KR4] = { + .supported = SUPPORTED_40000baseKR4_Full, + .advertised = ADVERTISED_40000baseKR4_Full, + .speed = 40000, + }, + [MLX5E_56GBASE_R4] = { + .supported = SUPPORTED_56000baseKR4_Full, + .advertised = ADVERTISED_56000baseKR4_Full, + .speed = 56000, + }, + [MLX5E_10GBASE_CR] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_SR] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_ER] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_40GBASE_SR4] = { + .supported = SUPPORTED_40000baseSR4_Full, + .advertised = ADVERTISED_40000baseSR4_Full, + .speed = 40000, + }, + [MLX5E_40GBASE_LR4] = { + .supported = SUPPORTED_40000baseLR4_Full, + .advertised = ADVERTISED_40000baseLR4_Full, + .speed = 40000, + }, + [MLX5E_100GBASE_CR4] = { + .speed = 100000, + }, + [MLX5E_100GBASE_SR4] = { + .speed = 100000, + }, + [MLX5E_100GBASE_KR4] = { + .speed = 100000, + }, + [MLX5E_100GBASE_LR4] = { + .speed = 100000, + }, + [MLX5E_100BASE_TX] = { + .speed = 100, + }, + [MLX5E_100BASE_T] = { + .supported = SUPPORTED_100baseT_Full, + .advertised = ADVERTISED_100baseT_Full, + .speed = 100, + }, + [MLX5E_10GBASE_T] = { + .supported = SUPPORTED_10000baseT_Full, + .advertised = ADVERTISED_10000baseT_Full, + .speed = 1000, + }, + [MLX5E_25GBASE_CR] = { + .speed = 25000, + }, + [MLX5E_25GBASE_KR] = { + .speed = 25000, + }, + [MLX5E_25GBASE_SR] = { + .speed = 25000, + }, + [MLX5E_50GBASE_CR2] = { + .speed = 50000, + }, + [MLX5E_50GBASE_KR2] = { + .speed = 50000, + }, +}; + +static int mlx5e_get_sset_count(struct net_device *dev, int sset) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return NUM_VPORT_COUNTERS + + priv->params.num_channels * NUM_RQ_STATS + + priv->params.num_channels * priv->num_tc * + NUM_SQ_STATS; + /* fallthrough */ + default: + return -EOPNOTSUPP; + } +} + +static void mlx5e_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *data) +{ + int i, j, tc, idx = 0; + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (stringset) { + case ETH_SS_PRIV_FLAGS: + break; + + case ETH_SS_TEST: + break; + + case ETH_SS_STATS: + /* VPORT counters */ + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vport_strings[i]); + + /* per channel counters */ + for (i = 0; i < priv->params.num_channels; i++) + for (j = 0; j < NUM_RQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + "rx%d_%s", i, rq_stats_strings[j]); + + for (i = 0; i < priv->params.num_channels; i++) + for (tc = 0; tc < priv->num_tc; tc++) + for (j = 0; j < NUM_SQ_STATS; j++) + sprintf(data + + (idx++) * ETH_GSTRING_LEN, + "tx%d_%d_%s", i, tc, + sq_stats_strings[j]); + break; + } +} + +static void mlx5e_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int i, j, tc, idx = 0; + + if (!data) + return; + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_update_stats(priv); + mutex_unlock(&priv->state_lock); + + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + data[idx++] = ((u64 *)&priv->stats.vport)[i]; + + /* per channel counters */ + for (i = 0; i < priv->params.num_channels; i++) + for (j = 0; j < NUM_RQ_STATS; j++) + data[idx++] = !test_bit(MLX5E_STATE_OPENED, + &priv->state) ? 0 : + ((u64 *)&priv->channel[i]->rq.stats)[j]; + + for (i = 0; i < priv->params.num_channels; i++) + for (tc = 0; tc < priv->num_tc; tc++) + for (j = 0; j < NUM_SQ_STATS; j++) + data[idx++] = !test_bit(MLX5E_STATE_OPENED, + &priv->state) ? 0 : + ((u64 *)&priv->channel[i]->sq[tc].stats)[j]; +} + +static void mlx5e_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; + param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; + param->rx_pending = 1 << priv->params.log_rq_size; + param->tx_pending = 1 << priv->params.log_sq_size; +} + +static int mlx5e_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_params new_params; + u16 min_rx_wqes; + u8 log_rq_size; + u8 log_sq_size; + int err = 0; + + if (param->rx_jumbo_pending) { + netdev_info(dev, "%s: rx_jumbo_pending not supported\n", + __func__); + return -EINVAL; + } + if (param->rx_mini_pending) { + netdev_info(dev, "%s: rx_mini_pending not supported\n", + __func__); + return -EINVAL; + } + if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { + netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", + __func__, param->rx_pending, + 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); + return -EINVAL; + } + if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) { + netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", + __func__, param->rx_pending, + 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE); + return -EINVAL; + } + if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { + netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n", + __func__, param->tx_pending, + 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); + return -EINVAL; + } + if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) { + netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n", + __func__, param->tx_pending, + 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE); + return -EINVAL; + } + + log_rq_size = order_base_2(param->rx_pending); + log_sq_size = order_base_2(param->tx_pending); + min_rx_wqes = min_t(u16, param->rx_pending - 1, + MLX5E_PARAMS_DEFAULT_MIN_RX_WQES); + + if (log_rq_size == priv->params.log_rq_size && + log_sq_size == priv->params.log_sq_size && + min_rx_wqes == priv->params.min_rx_wqes) + return 0; + + mutex_lock(&priv->state_lock); + new_params = priv->params; + new_params.log_rq_size = log_rq_size; + new_params.log_sq_size = log_sq_size; + new_params.min_rx_wqes = min_rx_wqes; + err = mlx5e_update_priv_params(priv, &new_params); + mutex_unlock(&priv->state_lock); + + return err; +} + +static void mlx5e_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ncv = priv->mdev->priv.eq_table.num_comp_vectors; + + ch->max_combined = ncv; + ch->combined_count = priv->params.num_channels; +} + +static int mlx5e_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ncv = priv->mdev->priv.eq_table.num_comp_vectors; + unsigned int count = ch->combined_count; + struct mlx5e_params new_params; + int err = 0; + + if (!count) { + netdev_info(dev, "%s: combined_count=0 not supported\n", + __func__); + return -EINVAL; + } + if (ch->rx_count || ch->tx_count) { + netdev_info(dev, "%s: separate rx/tx count not supported\n", + __func__); + return -EINVAL; + } + if (count > ncv) { + netdev_info(dev, "%s: count (%d) > max (%d)\n", + __func__, count, ncv); + return -EINVAL; + } + + if (priv->params.num_channels == count) + return 0; + + mutex_lock(&priv->state_lock); + new_params = priv->params; + new_params.num_channels = count; + err = mlx5e_update_priv_params(priv, &new_params); + mutex_unlock(&priv->state_lock); + + return err; +} + +static int mlx5e_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; + coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; + coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; + coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts; + + return 0; +} + +static int mlx5e_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channel *c; + int tc; + int i; + + priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; + priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; + priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; + priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames; + + for (i = 0; i < priv->params.num_channels; ++i) { + c = priv->channel[i]; + + for (tc = 0; tc < c->num_tc; tc++) { + mlx5_core_modify_cq_moderation(mdev, + &c->sq[tc].cq.mcq, + coal->tx_coalesce_usecs, + coal->tx_max_coalesced_frames); + } + + mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, + coal->rx_coalesce_usecs, + coal->rx_max_coalesced_frames); + } + + return 0; +} + +static u32 ptys2ethtool_supported_link(u32 eth_proto_cap) +{ + int i; + u32 supported_modes = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (eth_proto_cap & MLX5E_PROT_MASK(i)) + supported_modes |= ptys2ethtool_table[i].supported; + } + return supported_modes; +} + +static u32 ptys2ethtool_adver_link(u32 eth_proto_cap) +{ + int i; + u32 advertising_modes = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (eth_proto_cap & MLX5E_PROT_MASK(i)) + advertising_modes |= ptys2ethtool_table[i].advertised; + } + return advertising_modes; +} + +static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) +{ + if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) + | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) + | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) + | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { + return SUPPORTED_FIBRE; + } + + if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4) + | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) + | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) + | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) + | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) { + return SUPPORTED_Backplane; + } + return 0; +} + +static void get_speed_duplex(struct net_device *netdev, + u32 eth_proto_oper, + struct ethtool_cmd *cmd) +{ + int i; + u32 speed = SPEED_UNKNOWN; + u8 duplex = DUPLEX_UNKNOWN; + + if (!netif_carrier_ok(netdev)) + goto out; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (eth_proto_oper & MLX5E_PROT_MASK(i)) { + speed = ptys2ethtool_table[i].speed; + duplex = DUPLEX_FULL; + break; + } + } +out: + ethtool_cmd_speed_set(cmd, speed); + cmd->duplex = duplex; +} + +static void get_supported(u32 eth_proto_cap, u32 *supported) +{ + *supported |= ptys2ethtool_supported_port(eth_proto_cap); + *supported |= ptys2ethtool_supported_link(eth_proto_cap); + *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; +} + +static void get_advertising(u32 eth_proto_cap, u8 tx_pause, + u8 rx_pause, u32 *advertising) +{ + *advertising |= ptys2ethtool_adver_link(eth_proto_cap); + *advertising |= tx_pause ? ADVERTISED_Pause : 0; + *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0; +} + +static u8 get_connector_port(u32 eth_proto) +{ + if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR) + | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { + return PORT_FIBRE; + } + + if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) + | MLX5E_PROT_MASK(MLX5E_10GBASE_CR) + | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) { + return PORT_DA; + } + + if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) + | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) + | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) + | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) { + return PORT_NONE; + } + + return PORT_OTHER; +} + +static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising) +{ + *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp); +} + +static int mlx5e_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 eth_proto_cap; + u32 eth_proto_admin; + u32 eth_proto_lp; + u32 eth_proto_oper; + int err; + + err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); + + if (err) { + netdev_err(netdev, "%s: query port ptys failed: %d\n", + __func__, err); + goto err_query_ptys; + } + + eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + + cmd->supported = 0; + cmd->advertising = 0; + + get_supported(eth_proto_cap, &cmd->supported); + get_advertising(eth_proto_admin, 0, 0, &cmd->advertising); + get_speed_duplex(netdev, eth_proto_oper, cmd); + + eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; + + cmd->port = get_connector_port(eth_proto_oper); + get_lp_advertising(eth_proto_lp, &cmd->lp_advertising); + + cmd->transceiver = XCVR_INTERNAL; + +err_query_ptys: + return err; +} + +static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes) +{ + u32 i, ptys_modes = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (ptys2ethtool_table[i].advertised & link_modes) + ptys_modes |= MLX5E_PROT_MASK(i); + } + + return ptys_modes; +} + +static u32 mlx5e_ethtool2ptys_speed_link(u32 speed) +{ + u32 i, speed_links = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (ptys2ethtool_table[i].speed == speed) + speed_links |= MLX5E_PROT_MASK(i); + } + + return speed_links; +} + +static int mlx5e_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u32 link_modes; + u32 speed; + u32 eth_proto_cap, eth_proto_admin; + u8 port_status; + int err; + + speed = ethtool_cmd_speed(cmd); + + link_modes = cmd->autoneg == AUTONEG_ENABLE ? + mlx5e_ethtool2ptys_adver_link(cmd->advertising) : + mlx5e_ethtool2ptys_speed_link(speed); + + err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); + if (err) { + netdev_err(netdev, "%s: query port eth proto cap failed: %d\n", + __func__, err); + goto out; + } + + link_modes = link_modes & eth_proto_cap; + if (!link_modes) { + netdev_err(netdev, "%s: Not supported link mode(s) requested", + __func__); + err = -EINVAL; + goto out; + } + + err = mlx5_query_port_proto_admin(mdev, ð_proto_admin, MLX5_PTYS_EN); + if (err) { + netdev_err(netdev, "%s: query port eth proto admin failed: %d\n", + __func__, err); + goto out; + } + + if (link_modes == eth_proto_admin) + goto out; + + err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); + if (err) { + netdev_err(netdev, "%s: set port eth proto admin failed: %d\n", + __func__, err); + goto out; + } + + err = mlx5_query_port_status(mdev, &port_status); + if (err) + goto out; + + if (port_status == MLX5_PORT_DOWN) + return 0; + + err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN); + if (err) + goto out; + err = mlx5_set_port_status(mdev, MLX5_PORT_UP); +out: + return err; +} + +const struct ethtool_ops mlx5e_ethtool_ops = { + .get_drvinfo = mlx5e_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = mlx5e_get_strings, + .get_sset_count = mlx5e_get_sset_count, + .get_ethtool_stats = mlx5e_get_ethtool_stats, + .get_ringparam = mlx5e_get_ringparam, + .set_ringparam = mlx5e_set_ringparam, + .get_channels = mlx5e_get_channels, + .set_channels = mlx5e_set_channels, + .get_coalesce = mlx5e_get_coalesce, + .set_coalesce = mlx5e_set_coalesce, + .get_settings = mlx5e_get_settings, + .set_settings = mlx5e_set_settings, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c new file mode 100644 index 000000000000..120db80c47aa --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -0,0 +1,860 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "en.h" + +enum { + MLX5E_FULLMATCH = 0, + MLX5E_ALLMULTI = 1, + MLX5E_PROMISC = 2, +}; + +enum { + MLX5E_UC = 0, + MLX5E_MC_IPV4 = 1, + MLX5E_MC_IPV6 = 2, + MLX5E_MC_OTHER = 3, +}; + +enum { + MLX5E_ACTION_NONE = 0, + MLX5E_ACTION_ADD = 1, + MLX5E_ACTION_DEL = 2, +}; + +struct mlx5e_eth_addr_hash_node { + struct hlist_node hlist; + u8 action; + struct mlx5e_eth_addr_info ai; +}; + +static inline int mlx5e_hash_eth_addr(u8 *addr) +{ + return addr[5]; +} + +static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) +{ + struct mlx5e_eth_addr_hash_node *hn; + int ix = mlx5e_hash_eth_addr(addr); + int found = 0; + + hlist_for_each_entry(hn, &hash[ix], hlist) + if (ether_addr_equal_64bits(hn->ai.addr, addr)) { + found = 1; + break; + } + + if (found) { + hn->action = MLX5E_ACTION_NONE; + return; + } + + hn = kzalloc(sizeof(*hn), GFP_ATOMIC); + if (!hn) + return; + + ether_addr_copy(hn->ai.addr, addr); + hn->action = MLX5E_ACTION_ADD; + + hlist_add_head(&hn->hlist, &hash[ix]); +} + +static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) +{ + hlist_del(&hn->hlist); + kfree(hn); +} + +static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai) +{ + void *ft = priv->ft.main; + + if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV6)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV4)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]); + + if (ai->tt_vec & (1 << MLX5E_TT_ANY)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]); +} + +static int mlx5e_get_eth_addr_type(u8 *addr) +{ + if (is_unicast_ether_addr(addr)) + return MLX5E_UC; + + if ((addr[0] == 0x01) && + (addr[1] == 0x00) && + (addr[2] == 0x5e) && + !(addr[3] & 0x80)) + return MLX5E_MC_IPV4; + + if ((addr[0] == 0x33) && + (addr[1] == 0x33)) + return MLX5E_MC_IPV6; + + return MLX5E_MC_OTHER; +} + +static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) +{ + int eth_addr_type; + u32 ret; + + switch (type) { + case MLX5E_FULLMATCH: + eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); + switch (eth_addr_type) { + case MLX5E_UC: + ret = + (1 << MLX5E_TT_IPV4_TCP) | + (1 << MLX5E_TT_IPV6_TCP) | + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV4) | + (1 << MLX5E_TT_IPV6) | + (1 << MLX5E_TT_ANY) | + 0; + break; + + case MLX5E_MC_IPV4: + ret = + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV4) | + 0; + break; + + case MLX5E_MC_IPV6: + ret = + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV6) | + 0; + break; + + case MLX5E_MC_OTHER: + ret = + (1 << MLX5E_TT_ANY) | + 0; + break; + } + + break; + + case MLX5E_ALLMULTI: + ret = + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV4) | + (1 << MLX5E_TT_IPV6) | + (1 << MLX5E_TT_ANY) | + 0; + break; + + default: /* MLX5E_PROMISC */ + ret = + (1 << MLX5E_TT_IPV4_TCP) | + (1 << MLX5E_TT_IPV6_TCP) | + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV4) | + (1 << MLX5E_TT_IPV6) | + (1 << MLX5E_TT_ANY) | + 0; + break; + } + + return ret; +} + +static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai, int type, + void *flow_context, void *match_criteria) +{ + u8 match_criteria_enable = 0; + void *match_value; + void *dest; + u8 *dmac; + u8 *match_criteria_dmac; + void *ft = priv->ft.main; + u32 *tirn = priv->tirn; + u32 tt_vec; + int err; + + match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value); + dmac = MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.dmac_47_16); + match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.dmac_47_16); + dest = MLX5_ADDR_OF(flow_context, flow_context, destination); + + MLX5_SET(flow_context, flow_context, action, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); + MLX5_SET(flow_context, flow_context, destination_list_size, 1); + MLX5_SET(dest_format_struct, dest, destination_type, + MLX5_FLOW_CONTEXT_DEST_TYPE_TIR); + + switch (type) { + case MLX5E_FULLMATCH: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + memset(match_criteria_dmac, 0xff, ETH_ALEN); + ether_addr_copy(dmac, ai->addr); + break; + + case MLX5E_ALLMULTI: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + match_criteria_dmac[0] = 0x01; + dmac[0] = 0x01; + break; + + case MLX5E_PROMISC: + break; + } + + tt_vec = mlx5e_get_tt_vec(ai, type); + + if (tt_vec & (1 << MLX5E_TT_ANY)) { + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_ANY]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_ANY]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_ANY); + } + + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.ethertype); + + if (tt_vec & (1 << MLX5E_TT_IPV4)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV4]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV4); + } + + if (tt_vec & (1 << MLX5E_TT_IPV6)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV6]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV6); + } + + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.ip_protocol); + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, + IPPROTO_UDP); + + if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4_UDP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV4_UDP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP); + } + + if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6_UDP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV6_UDP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP); + } + + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, + IPPROTO_TCP); + + if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4_TCP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV4_TCP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP); + } + + if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6_TCP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV6_TCP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP); + } + + return 0; +} + +static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai, int type) +{ + u32 *flow_context; + u32 *match_criteria; + int err; + + flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) + + MLX5_ST_SZ_BYTES(dest_format_struct)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!flow_context || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_eth_addr_rule_out; + } + + err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context, + match_criteria); + if (err) + netdev_err(priv->netdev, "%s: failed\n", __func__); + +add_eth_addr_rule_out: + kvfree(match_criteria); + kvfree(flow_context); + return err; +} + +enum mlx5e_vlan_rule_type { + MLX5E_VLAN_RULE_TYPE_UNTAGGED, + MLX5E_VLAN_RULE_TYPE_ANY_VID, + MLX5E_VLAN_RULE_TYPE_MATCH_VID, +}; + +static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, u16 vid) +{ + u8 match_criteria_enable = 0; + u32 *flow_context; + void *match_value; + void *dest; + u32 *match_criteria; + u32 *ft_ix; + int err; + + flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) + + MLX5_ST_SZ_BYTES(dest_format_struct)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!flow_context || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_vlan_rule_out; + } + match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value); + dest = MLX5_ADDR_OF(flow_context, flow_context, destination); + + MLX5_SET(flow_context, flow_context, action, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); + MLX5_SET(flow_context, flow_context, destination_list_size, 1); + MLX5_SET(dest_format_struct, dest, destination_type, + MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE); + MLX5_SET(dest_format_struct, dest, destination_id, + mlx5_get_flow_table_id(priv->ft.main)); + + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.vlan_tag); + + switch (rule_type) { + case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + ft_ix = &priv->vlan.untagged_rule_ft_ix; + break; + case MLX5E_VLAN_RULE_TYPE_ANY_VID: + ft_ix = &priv->vlan.any_vlan_rule_ft_ix; + MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag, + 1); + break; + default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ + ft_ix = &priv->vlan.active_vlans_ft_ix[vid]; + MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag, + 1); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.first_vid); + MLX5_SET(fte_match_param, match_value, outer_headers.first_vid, + vid); + break; + } + + err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable, + match_criteria, flow_context, ft_ix); + if (err) + netdev_err(priv->netdev, "%s: failed\n", __func__); + +add_vlan_rule_out: + kvfree(match_criteria); + kvfree(flow_context); + return err; +} + +static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, u16 vid) +{ + switch (rule_type) { + case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + mlx5_del_flow_table_entry(priv->ft.vlan, + priv->vlan.untagged_rule_ft_ix); + break; + case MLX5E_VLAN_RULE_TYPE_ANY_VID: + mlx5_del_flow_table_entry(priv->ft.vlan, + priv->vlan.any_vlan_rule_ft_ix); + break; + case MLX5E_VLAN_RULE_TYPE_MATCH_VID: + mlx5_del_flow_table_entry(priv->ft.vlan, + priv->vlan.active_vlans_ft_ix[vid]); + break; + } +} + +void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) +{ + WARN_ON(!mutex_is_locked(&priv->state_lock)); + + if (priv->vlan.filter_disabled) { + priv->vlan.filter_disabled = false; + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } +} + +void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) +{ + WARN_ON(!mutex_is_locked(&priv->state_lock)); + + if (!priv->vlan.filter_disabled) { + priv->vlan.filter_disabled = true; + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } +} + +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int err = 0; + + mutex_lock(&priv->state_lock); + + set_bit(vid, priv->vlan.active_vlans); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, + vid); + + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + mutex_lock(&priv->state_lock); + + clear_bit(vid, priv->vlan.active_vlans); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); + + mutex_unlock(&priv->state_lock); + + return 0; +} + +int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv) +{ + u16 vid; + int err; + + for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) { + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, + vid); + if (err) + return err; + } + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + if (err) + return err; + + if (priv->vlan.filter_disabled) { + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + if (err) + return err; + } + + return 0; +} + +void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv) +{ + u16 vid; + + if (priv->vlan.filter_disabled) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + + for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); +} + +#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ + for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) + +static void mlx5e_execute_action(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_hash_node *hn) +{ + switch (hn->action) { + case MLX5E_ACTION_ADD: + mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); + hn->action = MLX5E_ACTION_NONE; + break; + + case MLX5E_ACTION_DEL: + mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); + mlx5e_del_eth_addr_from_hash(hn); + break; + } +} + +static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(netdev); + + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, + priv->netdev->dev_addr); + + netdev_for_each_uc_addr(ha, netdev) + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); + + netdev_for_each_mc_addr(ha, netdev) + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); + + netif_addr_unlock_bh(netdev); +} + +static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_node *tmp; + int i; + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + mlx5e_execute_action(priv, hn); + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + mlx5e_execute_action(priv, hn); +} + +static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_node *tmp; + int i; + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + hn->action = MLX5E_ACTION_DEL; + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + hn->action = MLX5E_ACTION_DEL; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_sync_netdev_addr(priv); + + mlx5e_apply_netdev_addr(priv); +} + +void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + struct net_device *ndev = priv->netdev; + + bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state); + bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); + bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); + bool broadcast_enabled = rx_mode_enable; + + bool enable_promisc = !ea->promisc_enabled && promisc_enabled; + bool disable_promisc = ea->promisc_enabled && !promisc_enabled; + bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; + bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; + bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; + bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; + + if (enable_promisc) + mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); + if (enable_allmulti) + mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); + if (enable_broadcast) + mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); + + mlx5e_handle_netdev_addr(priv); + + if (disable_broadcast) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); + if (disable_allmulti) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); + if (disable_promisc) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + + ea->promisc_enabled = promisc_enabled; + ea->allmulti_enabled = allmulti_enabled; + ea->broadcast_enabled = broadcast_enabled; +} + +void mlx5e_set_rx_mode_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + set_rx_mode_work); + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_set_rx_mode_core(priv); + mutex_unlock(&priv->state_lock); +} + +void mlx5e_init_eth_addr(struct mlx5e_priv *priv) +{ + ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); +} + +static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) +{ + struct mlx5_flow_table_group *g; + u8 *dmac; + + g = kcalloc(9, sizeof(*g), GFP_KERNEL); + if (!g) + return -ENOMEM; + + g[0].log_sz = 2; + g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.ip_protocol); + + g[1].log_sz = 1; + g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria, + outer_headers.ethertype); + + g[2].log_sz = 0; + + g[3].log_sz = 14; + g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria, + outer_headers.dmac_47_16); + memset(dmac, 0xff, ETH_ALEN); + MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria, + outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria, + outer_headers.ip_protocol); + + g[4].log_sz = 13; + g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria, + outer_headers.dmac_47_16); + memset(dmac, 0xff, ETH_ALEN); + MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria, + outer_headers.ethertype); + + g[5].log_sz = 11; + g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria, + outer_headers.dmac_47_16); + memset(dmac, 0xff, ETH_ALEN); + + g[6].log_sz = 2; + g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; + MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria, + outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria, + outer_headers.ip_protocol); + + g[7].log_sz = 1; + g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; + MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria, + outer_headers.ethertype); + + g[8].log_sz = 0; + g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; + priv->ft.main = mlx5_create_flow_table(priv->mdev, 1, + MLX5_FLOW_TABLE_TYPE_NIC_RCV, + 9, g); + kfree(g); + + return priv->ft.main ? 0 : -ENOMEM; +} + +static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) +{ + mlx5_destroy_flow_table(priv->ft.main); +} + +static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) +{ + struct mlx5_flow_table_group *g; + + g = kcalloc(2, sizeof(*g), GFP_KERNEL); + if (!g) + return -ENOMEM; + + g[0].log_sz = 12; + g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.first_vid); + + /* untagged + any vlan id */ + g[1].log_sz = 1; + g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria, + outer_headers.vlan_tag); + + priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0, + MLX5_FLOW_TABLE_TYPE_NIC_RCV, + 2, g); + + kfree(g); + return priv->ft.vlan ? 0 : -ENOMEM; +} + +static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) +{ + mlx5_destroy_flow_table(priv->ft.vlan); +} + +int mlx5e_open_flow_table(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_create_main_flow_table(priv); + if (err) + return err; + + err = mlx5e_create_vlan_flow_table(priv); + if (err) + goto err_destroy_main_flow_table; + + return 0; + +err_destroy_main_flow_table: + mlx5e_destroy_main_flow_table(priv); + + return err; +} + +void mlx5e_close_flow_table(struct mlx5e_priv *priv) +{ + mlx5e_destroy_vlan_flow_table(priv); + mlx5e_destroy_main_flow_table(priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c new file mode 100644 index 000000000000..7348c5173aa9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -0,0 +1,1899 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "en.h" + +struct mlx5e_rq_param { + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; +}; + +struct mlx5e_sq_param { + u32 sqc[MLX5_ST_SZ_DW(sqc)]; + struct mlx5_wq_param wq; +}; + +struct mlx5e_cq_param { + u32 cqc[MLX5_ST_SZ_DW(cqc)]; + struct mlx5_wq_param wq; + u16 eq_ix; +}; + +struct mlx5e_channel_param { + struct mlx5e_rq_param rq; + struct mlx5e_sq_param sq; + struct mlx5e_cq_param rx_cq; + struct mlx5e_cq_param tx_cq; +}; + +static void mlx5e_update_carrier(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u8 port_state; + + port_state = mlx5_query_vport_state(mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT); + + if (port_state == VPORT_STATE_UP) + netif_carrier_on(priv->netdev); + else + netif_carrier_off(priv->netdev); +} + +static void mlx5e_update_carrier_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + update_carrier_work); + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_update_carrier(priv); + mutex_unlock(&priv->state_lock); +} + +void mlx5e_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_vport_stats *s = &priv->stats.vport; + struct mlx5e_rq_stats *rq_stats; + struct mlx5e_sq_stats *sq_stats; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); + u64 tx_offload_none; + int i, j; + + out = mlx5_vzalloc(outlen); + if (!out) + return; + + /* Collect firts the SW counters and then HW for consistency */ + s->tso_packets = 0; + s->tso_bytes = 0; + s->tx_queue_stopped = 0; + s->tx_queue_wake = 0; + s->tx_queue_dropped = 0; + tx_offload_none = 0; + s->lro_packets = 0; + s->lro_bytes = 0; + s->rx_csum_none = 0; + s->rx_wqe_err = 0; + for (i = 0; i < priv->params.num_channels; i++) { + rq_stats = &priv->channel[i]->rq.stats; + + s->lro_packets += rq_stats->lro_packets; + s->lro_bytes += rq_stats->lro_bytes; + s->rx_csum_none += rq_stats->csum_none; + s->rx_wqe_err += rq_stats->wqe_err; + + for (j = 0; j < priv->num_tc; j++) { + sq_stats = &priv->channel[i]->sq[j].stats; + + s->tso_packets += sq_stats->tso_packets; + s->tso_bytes += sq_stats->tso_bytes; + s->tx_queue_stopped += sq_stats->stopped; + s->tx_queue_wake += sq_stats->wake; + s->tx_queue_dropped += sq_stats->dropped; + tx_offload_none += sq_stats->csum_offload_none; + } + } + + /* HW counters */ + memset(in, 0, sizeof(in)); + + MLX5_SET(query_vport_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_VPORT_COUNTER); + MLX5_SET(query_vport_counter_in, in, op_mod, 0); + MLX5_SET(query_vport_counter_in, in, other_vport, 0); + + memset(out, 0, outlen); + + if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen)) + goto free_out; + +#define MLX5_GET_CTR(p, x) \ + MLX5_GET64(query_vport_counter_out, p, x) + + s->rx_error_packets = + MLX5_GET_CTR(out, received_errors.packets); + s->rx_error_bytes = + MLX5_GET_CTR(out, received_errors.octets); + s->tx_error_packets = + MLX5_GET_CTR(out, transmit_errors.packets); + s->tx_error_bytes = + MLX5_GET_CTR(out, transmit_errors.octets); + + s->rx_unicast_packets = + MLX5_GET_CTR(out, received_eth_unicast.packets); + s->rx_unicast_bytes = + MLX5_GET_CTR(out, received_eth_unicast.octets); + s->tx_unicast_packets = + MLX5_GET_CTR(out, transmitted_eth_unicast.packets); + s->tx_unicast_bytes = + MLX5_GET_CTR(out, transmitted_eth_unicast.octets); + + s->rx_multicast_packets = + MLX5_GET_CTR(out, received_eth_multicast.packets); + s->rx_multicast_bytes = + MLX5_GET_CTR(out, received_eth_multicast.octets); + s->tx_multicast_packets = + MLX5_GET_CTR(out, transmitted_eth_multicast.packets); + s->tx_multicast_bytes = + MLX5_GET_CTR(out, transmitted_eth_multicast.octets); + + s->rx_broadcast_packets = + MLX5_GET_CTR(out, received_eth_broadcast.packets); + s->rx_broadcast_bytes = + MLX5_GET_CTR(out, received_eth_broadcast.octets); + s->tx_broadcast_packets = + MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); + s->tx_broadcast_bytes = + MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); + + s->rx_packets = + s->rx_unicast_packets + + s->rx_multicast_packets + + s->rx_broadcast_packets; + s->rx_bytes = + s->rx_unicast_bytes + + s->rx_multicast_bytes + + s->rx_broadcast_bytes; + s->tx_packets = + s->tx_unicast_packets + + s->tx_multicast_packets + + s->tx_broadcast_packets; + s->tx_bytes = + s->tx_unicast_bytes + + s->tx_multicast_bytes + + s->tx_broadcast_bytes; + + /* Update calculated offload counters */ + s->tx_csum_offload = s->tx_packets - tx_offload_none; + s->rx_csum_good = s->rx_packets - s->rx_csum_none; + +free_out: + kvfree(out); +} + +static void mlx5e_update_stats_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, + update_stats_work); + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + mlx5e_update_stats(priv); + schedule_delayed_work(dwork, + msecs_to_jiffies( + MLX5E_UPDATE_STATS_INTERVAL)); + } + mutex_unlock(&priv->state_lock); +} + +static void __mlx5e_async_event(struct mlx5e_priv *priv, + enum mlx5_dev_event event) +{ + switch (event) { + case MLX5_DEV_EVENT_PORT_UP: + case MLX5_DEV_EVENT_PORT_DOWN: + schedule_work(&priv->update_carrier_work); + break; + + default: + break; + } +} + +static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, + enum mlx5_dev_event event, unsigned long param) +{ + struct mlx5e_priv *priv = vpriv; + + spin_lock(&priv->async_events_spinlock); + if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) + __mlx5e_async_event(priv, event); + spin_unlock(&priv->async_events_spinlock); +} + +static void mlx5e_enable_async_events(struct mlx5e_priv *priv) +{ + set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); +} + +static void mlx5e_disable_async_events(struct mlx5e_priv *priv) +{ + spin_lock_irq(&priv->async_events_spinlock); + clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); + spin_unlock_irq(&priv->async_events_spinlock); +} + +static void mlx5e_send_nop(struct mlx5e_sq *sq) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + u16 pi = sq->pc & wq->sz_m1; + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + + memset(cseg, 0, sizeof(*cseg)); + + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01); + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; + + sq->skb[pi] = NULL; + sq->pc++; + mlx5e_tx_notify_hw(sq, wqe); +} + +static int mlx5e_create_rq(struct mlx5e_channel *c, + struct mlx5e_rq_param *param, + struct mlx5e_rq *rq) +{ + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + void *rqc = param->rqc; + void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); + int wq_sz; + int err; + int i; + + err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, + &rq->wq_ctrl); + if (err) + return err; + + rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; + + wq_sz = mlx5_wq_ll_get_size(&rq->wq); + rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!rq->skb) { + err = -ENOMEM; + goto err_rq_wq_destroy; + } + + rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : + priv->netdev->mtu + ETH_HLEN + VLAN_HLEN; + + for (i = 0; i < wq_sz; i++) { + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); + + wqe->data.lkey = c->mkey_be; + wqe->data.byte_count = cpu_to_be32(rq->wqe_sz); + } + + rq->pdev = c->pdev; + rq->netdev = c->netdev; + rq->channel = c; + rq->ix = c->ix; + + return 0; + +err_rq_wq_destroy: + mlx5_wq_destroy(&rq->wq_ctrl); + + return err; +} + +static void mlx5e_destroy_rq(struct mlx5e_rq *rq) +{ + kfree(rq->skb); + mlx5_wq_destroy(&rq->wq_ctrl); +} + +static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *rqc; + void *wq; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_rq_in) + + sizeof(u64) * rq->wq_ctrl.buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); + wq = MLX5_ADDR_OF(rqc, rqc, wq); + + memcpy(rqc, param->rqc, sizeof(param->rqc)); + + MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); + MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); + MLX5_SET(rqc, rqc, flush_in_error_en, 1); + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - + PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); + + mlx5_fill_page_array(&rq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); + + kvfree(in); + + return err; +} + +static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *rqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_rq_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + + MLX5_SET(modify_rq_in, in, rq_state, curr_state); + MLX5_SET(rqc, rqc, state, next_state); + + err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); + + kvfree(in); + + return err; +} + +static void mlx5e_disable_rq(struct mlx5e_rq *rq) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + mlx5_core_destroy_rq(mdev, rq->rqn); +} + +static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_wq_ll *wq = &rq->wq; + int i; + + for (i = 0; i < 1000; i++) { + if (wq->cur_sz >= priv->params.min_rx_wqes) + return 0; + + msleep(20); + } + + return -ETIMEDOUT; +} + +static int mlx5e_open_rq(struct mlx5e_channel *c, + struct mlx5e_rq_param *param, + struct mlx5e_rq *rq) +{ + int err; + + err = mlx5e_create_rq(c, param, rq); + if (err) + return err; + + err = mlx5e_enable_rq(rq, param); + if (err) + goto err_destroy_rq; + + err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + if (err) + goto err_disable_rq; + + set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); + mlx5e_send_nop(&c->sq[0]); /* trigger mlx5e_post_rx_wqes() */ + + return 0; + +err_disable_rq: + mlx5e_disable_rq(rq); +err_destroy_rq: + mlx5e_destroy_rq(rq); + + return err; +} + +static void mlx5e_close_rq(struct mlx5e_rq *rq) +{ + clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); + napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ + + mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); + while (!mlx5_wq_ll_is_empty(&rq->wq)) + msleep(20); + + /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ + napi_synchronize(&rq->channel->napi); + + mlx5e_disable_rq(rq); + mlx5e_destroy_rq(rq); +} + +static void mlx5e_free_sq_db(struct mlx5e_sq *sq) +{ + kfree(sq->dma_fifo); + kfree(sq->skb); +} + +static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa) +{ + int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; + + sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa); + sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL, + numa); + + if (!sq->skb || !sq->dma_fifo) { + mlx5e_free_sq_db(sq); + return -ENOMEM; + } + + sq->dma_fifo_mask = df_sz - 1; + + return 0; +} + +static int mlx5e_create_sq(struct mlx5e_channel *c, + int tc, + struct mlx5e_sq_param *param, + struct mlx5e_sq *sq) +{ + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *sqc = param->sqc; + void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); + int err; + + err = mlx5_alloc_map_uar(mdev, &sq->uar); + if (err) + return err; + + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, + &sq->wq_ctrl); + if (err) + goto err_unmap_free_uar; + + sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + sq->uar_map = sq->uar.map; + sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; + + if (mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu))) + goto err_sq_wq_destroy; + + sq->txq = netdev_get_tx_queue(priv->netdev, + c->ix + tc * priv->params.num_channels); + + sq->pdev = c->pdev; + sq->mkey_be = c->mkey_be; + sq->channel = c; + sq->tc = tc; + + return 0; + +err_sq_wq_destroy: + mlx5_wq_destroy(&sq->wq_ctrl); + +err_unmap_free_uar: + mlx5_unmap_free_uar(mdev, &sq->uar); + + return err; +} + +static void mlx5e_destroy_sq(struct mlx5e_sq *sq) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + + mlx5e_free_sq_db(sq); + mlx5_wq_destroy(&sq->wq_ctrl); + mlx5_unmap_free_uar(priv->mdev, &sq->uar); +} + +static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *sqc; + void *wq; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_sq_in) + + sizeof(u64) * sq->wq_ctrl.buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); + wq = MLX5_ADDR_OF(sqc, sqc, wq); + + memcpy(sqc, param->sqc, sizeof(param->sqc)); + + MLX5_SET(sqc, sqc, user_index, sq->tc); + MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]); + MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, tis_lst_sz, 1); + MLX5_SET(sqc, sqc, flush_in_error_en, 1); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, uar_page, sq->uar.index); + MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - + PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); + + mlx5_fill_page_array(&sq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); + + kvfree(in); + + return err; +} + +static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *sqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_sq_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + + MLX5_SET(modify_sq_in, in, sq_state, curr_state); + MLX5_SET(sqc, sqc, state, next_state); + + err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen); + + kvfree(in); + + return err; +} + +static void mlx5e_disable_sq(struct mlx5e_sq *sq) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + mlx5_core_destroy_sq(mdev, sq->sqn); +} + +static int mlx5e_open_sq(struct mlx5e_channel *c, + int tc, + struct mlx5e_sq_param *param, + struct mlx5e_sq *sq) +{ + int err; + + err = mlx5e_create_sq(c, tc, param, sq); + if (err) + return err; + + err = mlx5e_enable_sq(sq, param); + if (err) + goto err_destroy_sq; + + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); + if (err) + goto err_disable_sq; + + set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); + netdev_tx_reset_queue(sq->txq); + netif_tx_start_queue(sq->txq); + + return 0; + +err_disable_sq: + mlx5e_disable_sq(sq); +err_destroy_sq: + mlx5e_destroy_sq(sq); + + return err; +} + +static inline void netif_tx_disable_queue(struct netdev_queue *txq) +{ + __netif_tx_lock_bh(txq); + netif_tx_stop_queue(txq); + __netif_tx_unlock_bh(txq); +} + +static void mlx5e_close_sq(struct mlx5e_sq *sq) +{ + clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); + napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */ + netif_tx_disable_queue(sq->txq); + + /* ensure hw is notified of all pending wqes */ + if (mlx5e_sq_has_room_for(sq, 1)) + mlx5e_send_nop(sq); + + mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); + while (sq->cc != sq->pc) /* wait till sq is empty */ + msleep(20); + + /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ + napi_synchronize(&sq->channel->napi); + + mlx5e_disable_sq(sq); + mlx5e_destroy_sq(sq); +} + +static int mlx5e_create_cq(struct mlx5e_channel *c, + struct mlx5e_cq_param *param, + struct mlx5e_cq *cq) +{ + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + int eqn_not_used; + int irqn; + int err; + u32 i; + + param->wq.numa = cpu_to_node(c->cpu); + param->eq_ix = c->ix; + + err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, + &cq->wq_ctrl); + if (err) + return err; + + mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); + + cq->napi = &c->napi; + + mcq->cqe_sz = 64; + mcq->set_ci_db = cq->wq_ctrl.db.db; + mcq->arm_db = cq->wq_ctrl.db.db + 1; + *mcq->set_ci_db = 0; + *mcq->arm_db = 0; + mcq->vector = param->eq_ix; + mcq->comp = mlx5e_completion_event; + mcq->event = mlx5e_cq_error_event; + mcq->irqn = irqn; + mcq->uar = &priv->cq_uar; + + for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); + + cqe->op_own = 0xf1; + } + + cq->channel = c; + + return 0; +} + +static void mlx5e_destroy_cq(struct mlx5e_cq *cq) +{ + mlx5_wq_destroy(&cq->wq_ctrl); +} + +static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) +{ + struct mlx5e_channel *c = cq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + + void *in; + void *cqc; + int inlen; + int irqn_not_used; + int eqn; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + sizeof(u64) * cq->wq_ctrl.buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + + memcpy(cqc, param->cqc, sizeof(param->cqc)); + + mlx5_fill_page_array(&cq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); + + mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); + + MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - + PAGE_SHIFT); + MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); + + err = mlx5_core_create_cq(mdev, mcq, in, inlen); + + kvfree(in); + + if (err) + return err; + + mlx5e_cq_arm(cq); + + return 0; +} + +static void mlx5e_disable_cq(struct mlx5e_cq *cq) +{ + struct mlx5e_channel *c = cq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + mlx5_core_destroy_cq(mdev, &cq->mcq); +} + +static int mlx5e_open_cq(struct mlx5e_channel *c, + struct mlx5e_cq_param *param, + struct mlx5e_cq *cq, + u16 moderation_usecs, + u16 moderation_frames) +{ + int err; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + err = mlx5e_create_cq(c, param, cq); + if (err) + return err; + + err = mlx5e_enable_cq(cq, param); + if (err) + goto err_destroy_cq; + + err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq, + moderation_usecs, + moderation_frames); + if (err) + goto err_destroy_cq; + + return 0; + +err_destroy_cq: + mlx5e_destroy_cq(cq); + + return err; +} + +static void mlx5e_close_cq(struct mlx5e_cq *cq) +{ + mlx5e_disable_cq(cq); + mlx5e_destroy_cq(cq); +} + +static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) +{ + return cpumask_first(priv->mdev->priv.irq_info[ix].mask); +} + +static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, + struct mlx5e_channel_param *cparam) +{ + struct mlx5e_priv *priv = c->priv; + int err; + int tc; + + for (tc = 0; tc < c->num_tc; tc++) { + err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, + priv->params.tx_cq_moderation_usec, + priv->params.tx_cq_moderation_pkts); + if (err) + goto err_close_tx_cqs; + + c->sq[tc].cq.sqrq = &c->sq[tc]; + } + + return 0; + +err_close_tx_cqs: + for (tc--; tc >= 0; tc--) + mlx5e_close_cq(&c->sq[tc].cq); + + return err; +} + +static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_close_cq(&c->sq[tc].cq); +} + +static int mlx5e_open_sqs(struct mlx5e_channel *c, + struct mlx5e_channel_param *cparam) +{ + int err; + int tc; + + for (tc = 0; tc < c->num_tc; tc++) { + err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); + if (err) + goto err_close_sqs; + } + + return 0; + +err_close_sqs: + for (tc--; tc >= 0; tc--) + mlx5e_close_sq(&c->sq[tc]); + + return err; +} + +static void mlx5e_close_sqs(struct mlx5e_channel *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_close_sq(&c->sq[tc]); +} + +static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, + struct mlx5e_channel_param *cparam, + struct mlx5e_channel **cp) +{ + struct net_device *netdev = priv->netdev; + int cpu = mlx5e_get_cpu(priv, ix); + struct mlx5e_channel *c; + int err; + + c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); + if (!c) + return -ENOMEM; + + c->priv = priv; + c->ix = ix; + c->cpu = cpu; + c->pdev = &priv->mdev->pdev->dev; + c->netdev = priv->netdev; + c->mkey_be = cpu_to_be32(priv->mr.key); + c->num_tc = priv->num_tc; + + netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); + + err = mlx5e_open_tx_cqs(c, cparam); + if (err) + goto err_napi_del; + + err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, + priv->params.rx_cq_moderation_usec, + priv->params.rx_cq_moderation_pkts); + if (err) + goto err_close_tx_cqs; + c->rq.cq.sqrq = &c->rq; + + napi_enable(&c->napi); + + err = mlx5e_open_sqs(c, cparam); + if (err) + goto err_disable_napi; + + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); + if (err) + goto err_close_sqs; + + netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix); + *cp = c; + + return 0; + +err_close_sqs: + mlx5e_close_sqs(c); + +err_disable_napi: + napi_disable(&c->napi); + mlx5e_close_cq(&c->rq.cq); + +err_close_tx_cqs: + mlx5e_close_tx_cqs(c); + +err_napi_del: + netif_napi_del(&c->napi); + kfree(c); + + return err; +} + +static void mlx5e_close_channel(struct mlx5e_channel *c) +{ + mlx5e_close_rq(&c->rq); + mlx5e_close_sqs(c); + napi_disable(&c->napi); + mlx5e_close_cq(&c->rq.cq); + mlx5e_close_tx_cqs(c); + netif_napi_del(&c->napi); + kfree(c); +} + +static void mlx5e_build_rq_param(struct mlx5e_priv *priv, + struct mlx5e_rq_param *param) +{ + void *rqc = param->rqc; + void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); + MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); + MLX5_SET(wq, wq, pd, priv->pdn); + + param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); + param->wq.linear = 1; +} + +static void mlx5e_build_sq_param(struct mlx5e_priv *priv, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); + MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); + MLX5_SET(wq, wq, pd, priv->pdn); + + param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); +} + +static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); +} + +static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); + + mlx5e_build_common_cq_param(priv, param); +} + +static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); + + mlx5e_build_common_cq_param(priv, param); +} + +static void mlx5e_build_channel_param(struct mlx5e_priv *priv, + struct mlx5e_channel_param *cparam) +{ + memset(cparam, 0, sizeof(*cparam)); + + mlx5e_build_rq_param(priv, &cparam->rq); + mlx5e_build_sq_param(priv, &cparam->sq); + mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); + mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); +} + +static int mlx5e_open_channels(struct mlx5e_priv *priv) +{ + struct mlx5e_channel_param cparam; + int err; + int i; + int j; + + priv->channel = kcalloc(priv->params.num_channels, + sizeof(struct mlx5e_channel *), GFP_KERNEL); + if (!priv->channel) + return -ENOMEM; + + mlx5e_build_channel_param(priv, &cparam); + for (i = 0; i < priv->params.num_channels; i++) { + err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); + if (err) + goto err_close_channels; + } + + for (j = 0; j < priv->params.num_channels; j++) { + err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq); + if (err) + goto err_close_channels; + } + + return 0; + +err_close_channels: + for (i--; i >= 0; i--) + mlx5e_close_channel(priv->channel[i]); + + kfree(priv->channel); + + return err; +} + +static void mlx5e_close_channels(struct mlx5e_priv *priv) +{ + int i; + + for (i = 0; i < priv->params.num_channels; i++) + mlx5e_close_channel(priv->channel[i]); + + kfree(priv->channel); +} + +static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(create_tis_in)]; + void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + + memset(in, 0, sizeof(in)); + + MLX5_SET(tisc, tisc, prio, tc); + + return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); +} + +static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) +{ + mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); +} + +static int mlx5e_open_tises(struct mlx5e_priv *priv) +{ + int num_tc = priv->num_tc; + int err; + int tc; + + for (tc = 0; tc < num_tc; tc++) { + err = mlx5e_open_tis(priv, tc); + if (err) + goto err_close_tises; + } + + return 0; + +err_close_tises: + for (tc--; tc >= 0; tc--) + mlx5e_close_tis(priv, tc); + + return err; +} + +static void mlx5e_close_tises(struct mlx5e_priv *priv) +{ + int num_tc = priv->num_tc; + int tc; + + for (tc = 0; tc < num_tc; tc++) + mlx5e_close_tis(priv, tc); +} + +static int mlx5e_open_rqt(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + u32 out[MLX5_ST_SZ_DW(create_rqt_out)]; + void *rqtc; + int inlen; + int err; + int sz; + int i; + + sz = 1 << priv->params.rx_hash_log_tbl_sz; + + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); + + MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); + MLX5_SET(rqtc, rqtc, rqt_max_size, sz); + + for (i = 0; i < sz; i++) { + int ix = i % priv->params.num_channels; + + MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); + } + + MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); + if (!err) + priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); + + kvfree(in); + + return err; +} + +static void mlx5e_close_rqt(struct mlx5e_priv *priv) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); + MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); + + mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out, + sizeof(out)); +} + +static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) +{ + void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + +#define ROUGH_MAX_L2_L3_HDR_SZ 256 + +#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP) + +#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_L4_SPORT |\ + MLX5_HASH_FIELD_SEL_L4_DPORT) + + if (priv->params.lro_en) { + MLX5_SET(tirc, tirc, lro_enable_mask, + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); + MLX5_SET(tirc, tirc, lro_max_ip_payload_size, + (priv->params.lro_wqe_sz - + ROUGH_MAX_L2_L3_HDR_SZ) >> 8); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, + MLX5_CAP_ETH(priv->mdev, + lro_timer_supported_periods[3])); + } + + switch (tt) { + case MLX5E_TT_ANY: + MLX5_SET(tirc, tirc, disp_type, + MLX5_TIRC_DISP_TYPE_DIRECT); + MLX5_SET(tirc, tirc, inline_rqn, + priv->channel[0]->rq.rqn); + break; + default: + MLX5_SET(tirc, tirc, disp_type, + MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, + priv->rqtn); + MLX5_SET(tirc, tirc, rx_hash_fn, + MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc, + rx_hash_toeplitz_key), + MLX5_FLD_SZ_BYTES(tirc, + rx_hash_toeplitz_key)); + break; + } + + switch (tt) { + case MLX5E_TT_IPV4_TCP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV6_TCP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV4_UDP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_UDP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV6_UDP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_UDP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV4: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP); + break; + + case MLX5E_TT_IPV6: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP); + break; + } +} + +static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + void *tirc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + + mlx5e_build_tir_ctx(priv, tirc, tt); + + err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); + + kvfree(in); + + return err; +} + +static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt) +{ + mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); +} + +static int mlx5e_open_tirs(struct mlx5e_priv *priv) +{ + int err; + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) { + err = mlx5e_open_tir(priv, i); + if (err) + goto err_close_tirs; + } + + return 0; + +err_close_tirs: + for (i--; i >= 0; i--) + mlx5e_close_tir(priv, i); + + return err; +} + +static void mlx5e_close_tirs(struct mlx5e_priv *priv) +{ + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) + mlx5e_close_tir(priv, i); +} + +int mlx5e_open_locked(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int actual_mtu; + int num_txqs; + int err; + + num_txqs = roundup_pow_of_two(priv->params.num_channels) * + priv->params.num_tc; + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, priv->params.num_channels); + + err = mlx5_set_port_mtu(mdev, netdev->mtu); + if (err) { + netdev_err(netdev, "%s: mlx5_set_port_mtu failed %d\n", + __func__, err); + return err; + } + + err = mlx5_query_port_oper_mtu(mdev, &actual_mtu, 1); + if (err) { + netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n", + __func__, err); + return err; + } + + if (actual_mtu != netdev->mtu) + netdev_warn(netdev, "%s: Failed to set MTU to %d\n", + __func__, netdev->mtu); + + netdev->mtu = actual_mtu; + + err = mlx5e_open_tises(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n", + __func__, err); + return err; + } + + err = mlx5e_open_channels(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", + __func__, err); + goto err_close_tises; + } + + err = mlx5e_open_rqt(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n", + __func__, err); + goto err_close_channels; + } + + err = mlx5e_open_tirs(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n", + __func__, err); + goto err_close_rqls; + } + + err = mlx5e_open_flow_table(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n", + __func__, err); + goto err_close_tirs; + } + + err = mlx5e_add_all_vlan_rules(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n", + __func__, err); + goto err_close_flow_table; + } + + mlx5e_init_eth_addr(priv); + + set_bit(MLX5E_STATE_OPENED, &priv->state); + + mlx5e_update_carrier(priv); + mlx5e_set_rx_mode_core(priv); + + schedule_delayed_work(&priv->update_stats_work, 0); + return 0; + +err_close_flow_table: + mlx5e_close_flow_table(priv); + +err_close_tirs: + mlx5e_close_tirs(priv); + +err_close_rqls: + mlx5e_close_rqt(priv); + +err_close_channels: + mlx5e_close_channels(priv); + +err_close_tises: + mlx5e_close_tises(priv); + + return err; +} + +static int mlx5e_open(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_close_locked(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + clear_bit(MLX5E_STATE_OPENED, &priv->state); + + mlx5e_set_rx_mode_core(priv); + mlx5e_del_all_vlan_rules(priv); + netif_carrier_off(priv->netdev); + mlx5e_close_flow_table(priv); + mlx5e_close_tirs(priv); + mlx5e_close_rqt(priv); + mlx5e_close_channels(priv); + mlx5e_close_tises(priv); + + return 0; +} + +static int mlx5e_close(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + err = mlx5e_close_locked(netdev); + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_update_priv_params(struct mlx5e_priv *priv, + struct mlx5e_params *new_params) +{ + int err = 0; + int was_opened; + + WARN_ON(!mutex_is_locked(&priv->state_lock)); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(priv->netdev); + + priv->params = *new_params; + + if (was_opened) + err = mlx5e_open_locked(priv->netdev); + + return err; +} + +static struct rtnl_link_stats64 * +mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_vport_stats *vstats = &priv->stats.vport; + + stats->rx_packets = vstats->rx_packets; + stats->rx_bytes = vstats->rx_bytes; + stats->tx_packets = vstats->tx_packets; + stats->tx_bytes = vstats->tx_bytes; + stats->multicast = vstats->rx_multicast_packets + + vstats->tx_multicast_packets; + stats->tx_errors = vstats->tx_error_packets; + stats->rx_errors = vstats->rx_error_packets; + stats->tx_dropped = vstats->tx_queue_dropped; + stats->rx_crc_errors = 0; + stats->rx_length_errors = 0; + + return stats; +} + +static void mlx5e_set_rx_mode(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + schedule_work(&priv->set_rx_mode_work); +} + +static int mlx5e_set_mac(struct net_device *netdev, void *addr) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + netif_addr_lock_bh(netdev); + ether_addr_copy(netdev->dev_addr, saddr->sa_data); + netif_addr_unlock_bh(netdev); + + schedule_work(&priv->set_rx_mode_work); + + return 0; +} + +static int mlx5e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + netdev_features_t changes = features ^ netdev->features; + struct mlx5e_params new_params; + bool update_params = false; + + mutex_lock(&priv->state_lock); + new_params = priv->params; + + if (changes & NETIF_F_LRO) { + new_params.lro_en = !!(features & NETIF_F_LRO); + update_params = true; + } + + if (update_params) + mlx5e_update_priv_params(priv, &new_params); + + if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + mlx5e_enable_vlan_filter(priv); + else + mlx5e_disable_vlan_filter(priv); + } + + mutex_unlock(&priv->state_lock); + + return 0; +} + +static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int max_mtu; + int err = 0; + + err = mlx5_query_port_max_mtu(mdev, &max_mtu, 1); + if (err) + return err; + + if (new_mtu > max_mtu || new_mtu < MLX5E_PARAMS_MIN_MTU) { + netdev_err(netdev, "%s: Bad MTU size, mtu must be [%d-%d]\n", + __func__, MLX5E_PARAMS_MIN_MTU, max_mtu); + return -EINVAL; + } + + mutex_lock(&priv->state_lock); + netdev->mtu = new_mtu; + err = mlx5e_update_priv_params(priv, &priv->params); + mutex_unlock(&priv->state_lock); + + return err; +} + +static struct net_device_ops mlx5e_netdev_ops = { + .ndo_open = mlx5e_open, + .ndo_stop = mlx5e_close, + .ndo_start_xmit = mlx5e_xmit, + .ndo_get_stats64 = mlx5e_get_stats, + .ndo_set_rx_mode = mlx5e_set_rx_mode, + .ndo_set_mac_address = mlx5e_set_mac, + .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, + .ndo_set_features = mlx5e_set_features, + .ndo_change_mtu = mlx5e_change_mtu, +}; + +static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) +{ + if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return -ENOTSUPP; + if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || + !MLX5_CAP_GEN(mdev, nic_flow_table) || + !MLX5_CAP_ETH(mdev, csum_cap) || + !MLX5_CAP_ETH(mdev, max_lso_cap) || + !MLX5_CAP_ETH(mdev, vlan_cap) || + !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap)) { + mlx5_core_warn(mdev, + "Not creating net device, some required device capabilities are missing\n"); + return -ENOTSUPP; + } + return 0; +} + +static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, + struct net_device *netdev, + int num_comp_vectors) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + priv->params.log_sq_size = + MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + priv->params.log_rq_size = + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + priv->params.rx_cq_moderation_usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + priv->params.rx_cq_moderation_pkts = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + priv->params.tx_cq_moderation_usec = + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + priv->params.tx_cq_moderation_pkts = + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + priv->params.min_rx_wqes = + MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; + priv->params.rx_hash_log_tbl_sz = + (order_base_2(num_comp_vectors) > + MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? + order_base_2(num_comp_vectors) : + MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; + priv->params.num_tc = 1; + priv->params.default_vlan_prio = 0; + + priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap); + priv->params.lro_wqe_sz = + MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + + priv->mdev = mdev; + priv->netdev = netdev; + priv->params.num_channels = num_comp_vectors; + priv->order_base_2_num_channels = order_base_2(num_comp_vectors); + priv->queue_mapping_channel_mask = + roundup_pow_of_two(num_comp_vectors) - 1; + priv->num_tc = priv->params.num_tc; + priv->default_vlan_prio = priv->params.default_vlan_prio; + + spin_lock_init(&priv->async_events_spinlock); + mutex_init(&priv->state_lock); + + INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); + INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); + INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); +} + +static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr); +} + +static void mlx5e_build_netdev(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + SET_NETDEV_DEV(netdev, &mdev->pdev->dev); + + if (priv->num_tc > 1) { + mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; + mlx5e_netdev_ops.ndo_start_xmit = mlx5e_xmit_multi_tc; + } + + netdev->netdev_ops = &mlx5e_netdev_ops; + netdev->watchdog_timeo = 15 * HZ; + + netdev->ethtool_ops = &mlx5e_ethtool_ops; + + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_GRO; + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_RXCSUM; + netdev->vlan_features |= NETIF_F_RXHASH; + + if (!!MLX5_CAP_ETH(mdev, lro_cap)) + netdev->vlan_features |= NETIF_F_LRO; + + netdev->hw_features = netdev->vlan_features; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->features = netdev->hw_features; + if (!priv->params.lro_en) + netdev->features &= ~NETIF_F_LRO; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + mlx5e_set_netdev_dev_addr(netdev); +} + +static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, + struct mlx5_core_mr *mr) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_create_mkey_mbox_in *in; + int err; + + in = mlx5_vzalloc(sizeof(*in)); + if (!in) + return -ENOMEM; + + in->seg.flags = MLX5_PERM_LOCAL_WRITE | + MLX5_PERM_LOCAL_READ | + MLX5_ACCESS_MODE_PA; + in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + + err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL, + NULL); + + kvfree(in); + + return err; +} + +static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) +{ + struct net_device *netdev; + struct mlx5e_priv *priv; + int ncv = mdev->priv.eq_table.num_comp_vectors; + int err; + + if (mlx5e_check_required_hca_cap(mdev)) + return NULL; + + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), + roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC, + ncv); + if (!netdev) { + mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); + return NULL; + } + + mlx5e_build_netdev_priv(mdev, netdev, ncv); + mlx5e_build_netdev(netdev); + + netif_carrier_off(netdev); + + priv = netdev_priv(netdev); + + err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); + if (err) { + netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n", + __func__, err); + goto err_free_netdev; + } + + err = mlx5_core_alloc_pd(mdev, &priv->pdn); + if (err) { + netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n", + __func__, err); + goto err_unmap_free_uar; + } + + err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); + if (err) { + netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n", + __func__, err); + goto err_dealloc_pd; + } + + err = register_netdev(netdev); + if (err) { + netdev_err(netdev, "%s: register_netdev failed, %d\n", + __func__, err); + goto err_destroy_mkey; + } + + mlx5e_enable_async_events(priv); + + return priv; + +err_destroy_mkey: + mlx5_core_destroy_mkey(mdev, &priv->mr); + +err_dealloc_pd: + mlx5_core_dealloc_pd(mdev, priv->pdn); + +err_unmap_free_uar: + mlx5_unmap_free_uar(mdev, &priv->cq_uar); + +err_free_netdev: + free_netdev(netdev); + + return NULL; +} + +static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) +{ + struct mlx5e_priv *priv = vpriv; + struct net_device *netdev = priv->netdev; + + unregister_netdev(netdev); + mlx5_core_destroy_mkey(priv->mdev, &priv->mr); + mlx5_core_dealloc_pd(priv->mdev, priv->pdn); + mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); + mlx5e_disable_async_events(priv); + flush_scheduled_work(); + free_netdev(netdev); +} + +static void *mlx5e_get_netdev(void *vpriv) +{ + struct mlx5e_priv *priv = vpriv; + + return priv->netdev; +} + +static struct mlx5_interface mlx5e_interface = { + .add = mlx5e_create_netdev, + .remove = mlx5e_destroy_netdev, + .event = mlx5e_async_event, + .protocol = MLX5_INTERFACE_PROTOCOL_ETH, + .get_dev = mlx5e_get_netdev, +}; + +void mlx5e_init(void) +{ + mlx5_register_interface(&mlx5e_interface); +} + +void mlx5e_cleanup(void) +{ + mlx5_unregister_interface(&mlx5e_interface); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c new file mode 100644 index 000000000000..ce1317cdabd7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "en.h" + +static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, + struct mlx5e_rx_wqe *wqe, u16 ix) +{ + struct sk_buff *skb; + dma_addr_t dma_addr; + + skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz); + if (unlikely(!skb)) + return -ENOMEM; + + skb_reserve(skb, MLX5E_NET_IP_ALIGN); + + dma_addr = dma_map_single(rq->pdev, + /* hw start padding */ + skb->data - MLX5E_NET_IP_ALIGN, + /* hw end padding */ + rq->wqe_sz, + DMA_FROM_DEVICE); + + if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) + goto err_free_skb; + + *((dma_addr_t *)skb->cb) = dma_addr; + wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); + + rq->skb[ix] = skb; + + return 0; + +err_free_skb: + dev_kfree_skb(skb); + + return -ENOMEM; +} + +bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + + if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state))) + return false; + + while (!mlx5_wq_ll_is_full(wq)) { + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + + if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) + break; + + mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); + } + + /* ensure wqes are visible to device before updating doorbell record */ + dma_wmb(); + + mlx5_wq_ll_update_db_record(wq); + + return !mlx5_wq_ll_is_full(wq); +} + +static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); + struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); + struct tcphdr *tcp; + + u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); + int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || + (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); + + u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN; + + if (eth->h_proto == htons(ETH_P_IP)) { + tcp = (struct tcphdr *)(skb->data + ETH_HLEN + + sizeof(struct iphdr)); + ipv6 = NULL; + } else { + tcp = (struct tcphdr *)(skb->data + ETH_HLEN + + sizeof(struct ipv6hdr)); + ipv4 = NULL; + } + + if (get_cqe_lro_tcppsh(cqe)) + tcp->psh = 1; + + if (tcp_ack) { + tcp->ack = 1; + tcp->ack_seq = cqe->lro_ack_seq_num; + tcp->window = cqe->lro_tcp_win; + } + + if (ipv4) { + ipv4->ttl = cqe->lro_min_ttl; + ipv4->tot_len = cpu_to_be16(tot_len); + ipv4->check = 0; + ipv4->check = ip_fast_csum((unsigned char *)ipv4, + ipv4->ihl); + } else { + ipv6->hop_limit = cqe->lro_min_ttl; + ipv6->payload_len = cpu_to_be16(tot_len - + sizeof(struct ipv6hdr)); + } +} + +static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, + struct sk_buff *skb) +{ + u8 cht = cqe->rss_hash_type; + int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : + (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : + PKT_HASH_TYPE_NONE; + skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); +} + +static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, + struct mlx5e_rq *rq, + struct sk_buff *skb) +{ + struct net_device *netdev = rq->netdev; + u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + int lro_num_seg; + + skb_put(skb, cqe_bcnt); + + lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; + if (lro_num_seg > 1) { + mlx5e_lro_update_hdr(skb, cqe); + skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + rq->stats.lro_packets++; + rq->stats.lro_bytes += cqe_bcnt; + } + + if (likely(netdev->features & NETIF_F_RXCSUM) && + (cqe->hds_ip_ext & CQE_L2_OK) && + (cqe->hds_ip_ext & CQE_L3_OK) && + (cqe->hds_ip_ext & CQE_L4_OK)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb->ip_summed = CHECKSUM_NONE; + rq->stats.csum_none++; + } + + skb->protocol = eth_type_trans(skb, netdev); + + skb_record_rx_queue(skb, rq->ix); + + if (likely(netdev->features & NETIF_F_RXHASH)) + mlx5e_skb_set_hash(cqe, skb); + + if (cqe_has_vlan(cqe)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(cqe->vlan_info)); +} + +bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) +{ + struct mlx5e_rq *rq = cq->sqrq; + int i; + + /* avoid accessing cq (dma coherent memory) if not needed */ + if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) + return false; + + for (i = 0; i < budget; i++) { + struct mlx5e_rx_wqe *wqe; + struct mlx5_cqe64 *cqe; + struct sk_buff *skb; + __be16 wqe_counter_be; + u16 wqe_counter; + + cqe = mlx5e_get_cqe(cq); + if (!cqe) + break; + + wqe_counter_be = cqe->wqe_counter; + wqe_counter = be16_to_cpu(wqe_counter_be); + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); + skb = rq->skb[wqe_counter]; + rq->skb[wqe_counter] = NULL; + + dma_unmap_single(rq->pdev, + *((dma_addr_t *)skb->cb), + skb_end_offset(skb), + DMA_FROM_DEVICE); + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + rq->stats.wqe_err++; + dev_kfree_skb(skb); + goto wq_ll_pop; + } + + mlx5e_build_rx_skb(cqe, rq, skb); + rq->stats.packets++; + napi_gro_receive(cq->napi, skb); + +wq_ll_pop: + mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, + &wqe->next.next_wqe_index); + } + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + if (i == budget) { + set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c new file mode 100644 index 000000000000..8020986cdaf6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "en.h" + +static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, + u32 *size) +{ + sq->dma_fifo_pc--; + *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; + *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; +} + +static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) +{ + dma_addr_t addr; + u32 size; + int i; + + for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) { + mlx5e_dma_pop_last_pushed(sq, &addr, &size); + dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE); + } +} + +static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, + u32 size) +{ + sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; + sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; + sq->dma_fifo_pc++; +} + +static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, + u32 *size) +{ + *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; + *size = sq->dma_fifo[i & sq->dma_fifo_mask].size; +} + +u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int channel_ix = fallback(dev, skb); + int up = skb_vlan_tag_present(skb) ? + skb->vlan_tci >> VLAN_PRIO_SHIFT : + priv->default_vlan_prio; + int tc = netdev_get_prio_tc_map(dev, up); + + return (tc << priv->order_base_2_num_channels) | channel_ix; +} + +static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, + struct sk_buff *skb) +{ +#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */ + return MLX5E_MIN_INLINE; +} + +static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) +{ + struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; + int cpy1_sz = 2 * ETH_ALEN; + int cpy2_sz = ihs - cpy1_sz - VLAN_HLEN; + + skb_copy_from_linear_data(skb, vhdr, cpy1_sz); + skb_pull_inline(skb, cpy1_sz); + vhdr->h_vlan_proto = skb->vlan_proto; + vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); + skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto, + cpy2_sz); + skb_pull_inline(skb, cpy2_sz); +} + +static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + u16 pi = sq->pc & wq->sz_m1; + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + struct mlx5_wqe_data_seg *dseg; + + u8 opcode = MLX5_OPCODE_SEND; + dma_addr_t dma_addr = 0; + u16 headlen; + u16 ds_cnt; + u16 ihs; + int i; + + memset(wqe, 0, sizeof(*wqe)); + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; + else + sq->stats.csum_offload_none++; + + if (skb_is_gso(skb)) { + u32 payload_len; + int num_pkts; + + eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + opcode = MLX5_OPCODE_LSO; + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + payload_len = skb->len - ihs; + num_pkts = (payload_len / skb_shinfo(skb)->gso_size) + + !!(payload_len % skb_shinfo(skb)->gso_size); + MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len + + (num_pkts - 1) * ihs; + sq->stats.tso_packets++; + sq->stats.tso_bytes += payload_len; + } else { + ihs = mlx5e_get_inline_hdr_size(sq, skb); + MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len, + ETH_ZLEN); + } + + if (skb_vlan_tag_present(skb)) { + mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs); + } else { + skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs); + skb_pull_inline(skb, ihs); + } + + eseg->inline_hdr_sz = cpu_to_be16(ihs); + + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start), + MLX5_SEND_WQE_DS); + dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; + + MLX5E_TX_SKB_CB(skb)->num_dma = 0; + + headlen = skb_headlen(skb); + if (headlen) { + dma_addr = dma_map_single(sq->pdev, skb->data, headlen, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->addr = cpu_to_be64(dma_addr); + dseg->lkey = sq->mkey_be; + dseg->byte_count = cpu_to_be32(headlen); + + mlx5e_dma_push(sq, dma_addr, headlen); + MLX5E_TX_SKB_CB(skb)->num_dma++; + + dseg++; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + int fsz = skb_frag_size(frag); + + dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->addr = cpu_to_be64(dma_addr); + dseg->lkey = sq->mkey_be; + dseg->byte_count = cpu_to_be32(fsz); + + mlx5e_dma_push(sq, dma_addr, fsz); + MLX5E_TX_SKB_CB(skb)->num_dma++; + + dseg++; + } + + ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma; + + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; + + sq->skb[pi] = skb; + + MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt, + MLX5_SEND_WQEBB_NUM_DS); + sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; + + netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes); + + if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS))) { + netif_tx_stop_queue(sq->txq); + sq->stats.stopped++; + } + + if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) + mlx5e_tx_notify_hw(sq, wqe); + + sq->stats.packets++; + return NETDEV_TX_OK; + +dma_unmap_wqe_err: + sq->stats.dropped++; + mlx5e_dma_unmap_wqe_err(sq, skb); + + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ix = skb->queue_mapping; + int tc = 0; + struct mlx5e_channel *c = priv->channel[ix]; + struct mlx5e_sq *sq = &c->sq[tc]; + + return mlx5e_sq_xmit(sq, skb); +} + +netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ix = skb->queue_mapping & priv->queue_mapping_channel_mask; + int tc = skb->queue_mapping >> priv->order_base_2_num_channels; + struct mlx5e_channel *c = priv->channel[ix]; + struct mlx5e_sq *sq = &c->sq[tc]; + + return mlx5e_sq_xmit(sq, skb); +} + +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) +{ + struct mlx5e_sq *sq; + u32 dma_fifo_cc; + u32 nbytes; + u16 npkts; + u16 sqcc; + int i; + + /* avoid accessing cq (dma coherent memory) if not needed */ + if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) + return false; + + sq = cq->sqrq; + + npkts = 0; + nbytes = 0; + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + sqcc = sq->cc; + + /* avoid dirtying sq cache line every cqe */ + dma_fifo_cc = sq->dma_fifo_cc; + + for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { + struct mlx5_cqe64 *cqe; + struct sk_buff *skb; + u16 ci; + int j; + + cqe = mlx5e_get_cqe(cq); + if (!cqe) + break; + + ci = sqcc & sq->wq.sz_m1; + skb = sq->skb[ci]; + + if (unlikely(!skb)) { /* nop */ + sq->stats.nop++; + sqcc++; + goto free_skb; + } + + for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { + dma_addr_t addr; + u32 size; + + mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); + dma_fifo_cc++; + dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE); + } + + npkts++; + nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes; + sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; + +free_skb: + dev_kfree_skb(skb); + } + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->dma_fifo_cc = dma_fifo_cc; + sq->cc = sqcc; + + netdev_tx_completed_queue(sq->txq, npkts, nbytes); + + if (netif_tx_queue_stopped(sq->txq) && + mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS) && + likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { + netif_tx_wake_queue(sq->txq); + sq->stats.wake++; + } + if (i == MLX5E_TX_CQ_POLL_BUDGET) { + set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c new file mode 100644 index 000000000000..088bc424157c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) +{ + struct mlx5_cqwq *wq = &cq->wq; + u32 ci = mlx5_cqwq_get_ci(wq); + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); + int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK; + int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1; + + if (cqe_ownership_bit != sw_ownership_val) + return NULL; + + mlx5_cqwq_pop(wq); + + /* ensure cqe content is read after cqe ownership bit */ + rmb(); + + return cqe; +} + +int mlx5e_napi_poll(struct napi_struct *napi, int budget) +{ + struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, + napi); + bool busy = false; + int i; + + clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); + + for (i = 0; i < c->num_tc; i++) + busy |= mlx5e_poll_tx_cq(&c->sq[i].cq); + + busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget); + + busy |= mlx5e_post_rx_wqes(c->rq.cq.sqrq); + + if (busy) + return budget; + + napi_complete(napi); + + /* avoid losing completion event during/after polling cqs */ + if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) { + napi_schedule(napi); + return 0; + } + + for (i = 0; i < c->num_tc; i++) + mlx5e_cq_arm(&c->sq[i].cq); + mlx5e_cq_arm(&c->rq.cq); + + return 0; +} + +void mlx5e_completion_event(struct mlx5_core_cq *mcq) +{ + struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); + + set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); + set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); + barrier(); + napi_schedule(cq->napi); +} + +void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) +{ + struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); + struct mlx5e_channel *c = cq->channel; + struct mlx5e_priv *priv = c->priv; + struct net_device *netdev = priv->netdev; + + netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n", + __func__, mcq->cqn, event); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 58800e4f3958..a40b96d4c662 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -339,15 +339,14 @@ static void init_eq_buf(struct mlx5_eq *eq) int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar) { - struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_priv *priv = &dev->priv; struct mlx5_create_eq_mbox_in *in; struct mlx5_create_eq_mbox_out out; int err; int inlen; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); - err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, - &eq->buf); + err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); if (err) return err; @@ -378,14 +377,15 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, goto err_in; } - snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", + snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", name, pci_name(dev->pdev)); + eq->eqn = out.eq_number; eq->irqn = vecidx; eq->dev = dev; eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; - err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, - eq->name, eq); + err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0, + priv->irq_info[vecidx].name, eq); if (err) goto err_eq; @@ -401,7 +401,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, return 0; err_irq: - free_irq(table->msix_arr[vecidx].vector, eq); + free_irq(priv->msix_arr[vecidx].vector, eq); err_eq: mlx5_cmd_destroy_eq(dev, eq->eqn); @@ -417,16 +417,15 @@ EXPORT_SYMBOL_GPL(mlx5_create_map_eq); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) { - struct mlx5_eq_table *table = &dev->priv.eq_table; int err; mlx5_debug_eq_remove(dev, eq); - free_irq(table->msix_arr[eq->irqn].vector, eq); + free_irq(dev->priv.msix_arr[eq->irqn].vector, eq); err = mlx5_cmd_destroy_eq(dev, eq->eqn); if (err) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); - synchronize_irq(table->msix_arr[eq->irqn].vector); + synchronize_irq(dev->priv.msix_arr[eq->irqn].vector); mlx5_buf_free(dev, &eq->buf); return err; @@ -456,7 +455,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; - if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG) + if (MLX5_CAP_GEN(dev, pg)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, @@ -479,7 +478,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->pages_eq, MLX5_EQ_VEC_PAGES, - dev->caps.gen.max_vf + 1, + /* TODO: sriov max_vf + */ 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", &dev->priv.uuari.uars[0]); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c new file mode 100644 index 000000000000..ca90b9bc3b95 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_core.h" + +struct mlx5_ftg { + struct mlx5_flow_table_group g; + u32 id; + u32 start_ix; +}; + +struct mlx5_flow_table { + struct mlx5_core_dev *dev; + u8 level; + u8 type; + u32 id; + struct mutex mutex; /* sync bitmap alloc */ + u16 num_groups; + struct mlx5_ftg *group; + unsigned long *bitmap; + u32 size; +}; + +static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix, + u32 flow_index, void *flow_context) +{ + u32 out[MLX5_ST_SZ_DW(set_fte_out)]; + u32 *in; + void *in_flow_context; + int fcdls = + MLX5_GET(flow_context, flow_context, destination_list_size) * + MLX5_ST_SZ_BYTES(dest_format_struct); + int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(ft->dev, "failed to allocate inbox\n"); + return -ENOMEM; + } + + MLX5_SET(set_fte_in, in, table_type, ft->type); + MLX5_SET(set_fte_in, in, table_id, ft->id); + MLX5_SET(set_fte_in, in, flow_index, flow_index); + MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); + + in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); + memcpy(in_flow_context, flow_context, + MLX5_ST_SZ_BYTES(flow_context) + fcdls); + + MLX5_SET(flow_context, in_flow_context, group_id, + ft->group[group_ix].id); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out, + sizeof(out)); + kvfree(in); + + return err; +} + +static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index) +{ + u32 in[MLX5_ST_SZ_DW(delete_fte_in)]; + u32 out[MLX5_ST_SZ_DW(delete_fte_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + +#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v) + MLX5_SET_DFTEI(in, table_type, ft->type); + MLX5_SET_DFTEI(in, table_id, ft->id); + MLX5_SET_DFTEI(in, flow_index, flow_index); + MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); + + mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); +} + +static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + +#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v) + MLX5_SET_DFGI(in, table_type, ft->type); + MLX5_SET_DFGI(in, table_id, ft->id); + MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); + MLX5_SET_DFGI(in, group_id, ft->group[i].id); + mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)]; + u32 *in; + void *in_match_criteria; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table_group *g = &ft->group[i].g; + u32 start_ix = ft->group[i].start_ix; + u32 end_ix = start_ix + (1 << g->log_sz) - 1; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(ft->dev, "failed to allocate inbox\n"); + return -ENOMEM; + } + in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in, + match_criteria); + + memset(out, 0, sizeof(out)); + +#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v) + MLX5_SET_CFGI(in, table_type, ft->type); + MLX5_SET_CFGI(in, table_id, ft->id); + MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); + MLX5_SET_CFGI(in, start_flow_index, start_ix); + MLX5_SET_CFGI(in, end_flow_index, end_ix); + MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable); + + memcpy(in_match_criteria, g->match_criteria, + MLX5_ST_SZ_BYTES(fte_match_param)); + + err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out, + sizeof(out)); + if (!err) + ft->group[i].id = MLX5_GET(create_flow_group_out, out, + group_id); + + kvfree(in); + + return err; +} + +static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft) +{ + int i; + + for (i = 0; i < ft->num_groups; i++) + mlx5_destroy_flow_group_cmd(ft, i); +} + +static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft) +{ + int err; + int i; + + for (i = 0; i < ft->num_groups; i++) { + err = mlx5_create_flow_group_cmd(ft, i); + if (err) + goto err_destroy_flow_table_groups; + } + + return 0; + +err_destroy_flow_table_groups: + for (i--; i >= 0; i--) + mlx5_destroy_flow_group_cmd(ft, i); + + return err; +} + +static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(create_flow_table_in, in, table_type, ft->type); + MLX5_SET(create_flow_table_in, in, level, ft->level); + MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size)); + + MLX5_SET(create_flow_table_in, in, opcode, + MLX5_CMD_OP_CREATE_FLOW_TABLE); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, + sizeof(out)); + if (err) + return err; + + ft->id = MLX5_GET(create_flow_table_out, out, table_id); + + return 0; +} + +static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + +#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v) + MLX5_SET_DFTI(in, table_type, ft->type); + MLX5_SET_DFTI(in, table_id, ft->id); + MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); + + mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable, + u32 *match_criteria, int *group_ix) +{ + void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers); + void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria, + misc_parameters); + void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria, + inner_headers); + int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4); + int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc); + int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4); + int i; + + for (i = 0; i < ft->num_groups; i++) { + struct mlx5_flow_table_group *g = &ft->group[i].g; + void *gmc_outer = MLX5_ADDR_OF(fte_match_param, + g->match_criteria, + outer_headers); + void *gmc_misc = MLX5_ADDR_OF(fte_match_param, + g->match_criteria, + misc_parameters); + void *gmc_inner = MLX5_ADDR_OF(fte_match_param, + g->match_criteria, + inner_headers); + + if (g->match_criteria_enable != match_criteria_enable) + continue; + + if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) + if (memcmp(mc_outer, gmc_outer, mc_outer_sz)) + continue; + + if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS) + if (memcmp(mc_misc, gmc_misc, mc_misc_sz)) + continue; + + if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS) + if (memcmp(mc_inner, gmc_inner, mc_inner_sz)) + continue; + + *group_ix = i; + return 0; + } + + return -EINVAL; +} + +static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix) +{ + struct mlx5_ftg *g = &ft->group[group_ix]; + int err = 0; + + mutex_lock(&ft->mutex); + + *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix); + if (*ix >= (g->start_ix + (1 << g->g.log_sz))) + err = -ENOSPC; + else + __set_bit(*ix, ft->bitmap); + + mutex_unlock(&ft->mutex); + + return err; +} + +static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix) +{ + __clear_bit(ix, ft->bitmap); +} + +int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable, + void *match_criteria, void *flow_context, + u32 *flow_index) +{ + struct mlx5_flow_table *ft = flow_table; + int group_ix; + int err; + + err = mlx5_find_group(ft, match_criteria_enable, match_criteria, + &group_ix); + if (err) { + mlx5_core_warn(ft->dev, "mlx5_find_group failed\n"); + return err; + } + + err = alloc_flow_index(ft, group_ix, flow_index); + if (err) { + mlx5_core_warn(ft->dev, "alloc_flow_index failed\n"); + return err; + } + + return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context); +} +EXPORT_SYMBOL(mlx5_add_flow_table_entry); + +void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index) +{ + struct mlx5_flow_table *ft = flow_table; + + mlx5_del_flow_entry_cmd(ft, flow_index); + mlx5_free_flow_index(ft, flow_index); +} +EXPORT_SYMBOL(mlx5_del_flow_table_entry); + +void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, + u16 num_groups, + struct mlx5_flow_table_group *group) +{ + struct mlx5_flow_table *ft; + u32 start_ix = 0; + u32 ft_size = 0; + void *gr; + void *bm; + int err; + int i; + + for (i = 0; i < num_groups; i++) + ft_size += (1 << group[i].log_sz); + + ft = kzalloc(sizeof(*ft), GFP_KERNEL); + gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL); + bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL); + if (!ft || !gr || !bm) + goto err_free_ft; + + ft->group = gr; + ft->bitmap = bm; + ft->num_groups = num_groups; + ft->level = level; + ft->type = table_type; + ft->size = ft_size; + ft->dev = dev; + mutex_init(&ft->mutex); + + for (i = 0; i < ft->num_groups; i++) { + memcpy(&ft->group[i].g, &group[i], sizeof(*group)); + ft->group[i].start_ix = start_ix; + start_ix += 1 << group[i].log_sz; + } + + err = mlx5_create_flow_table_cmd(ft); + if (err) + goto err_free_ft; + + err = mlx5_create_flow_table_groups(ft); + if (err) + goto err_destroy_flow_table_cmd; + + return ft; + +err_destroy_flow_table_cmd: + mlx5_destroy_flow_table_cmd(ft); + +err_free_ft: + mlx5_core_warn(dev, "failed to alloc flow table\n"); + kfree(bm); + kfree(gr); + kfree(ft); + + return NULL; +} +EXPORT_SYMBOL(mlx5_create_flow_table); + +void mlx5_destroy_flow_table(void *flow_table) +{ + struct mlx5_flow_table *ft = flow_table; + + mlx5_destroy_flow_table_groups(ft); + mlx5_destroy_flow_table_cmd(ft); + kfree(ft->bitmap); + kfree(ft->group); + kfree(ft); +} +EXPORT_SYMBOL(mlx5_destroy_flow_table); + +u32 mlx5_get_flow_table_id(void *flow_table) +{ + struct mlx5_flow_table *ft = flow_table; + + return ft->id; +} +EXPORT_SYMBOL(mlx5_get_flow_table_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 4b4cda3bcc5f..9335e5ae18cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -35,79 +35,133 @@ #include #include "mlx5_core.h" -int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev) +static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, + int outlen) { - struct mlx5_cmd_query_adapter_mbox_out *out; - struct mlx5_cmd_query_adapter_mbox_in in; + u32 in[MLX5_ST_SZ_DW(query_adapter_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); +} + +int mlx5_query_board_id(struct mlx5_core_dev *dev) +{ + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); int err; - out = kzalloc(sizeof(*out), GFP_KERNEL); + out = kzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; - memset(&in, 0, sizeof(in)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + err = mlx5_cmd_query_adapter(dev, out, outlen); if (err) - goto out_out; - - if (out->hdr.status) { - err = mlx5_cmd_status_to_err(&out->hdr); - goto out_out; - } + goto out; - memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid)); + memcpy(dev->board_id, + MLX5_ADDR_OF(query_adapter_out, out, + query_adapter_struct.vsd_contd_psid), + MLX5_FLD_SZ_BYTES(query_adapter_out, + query_adapter_struct.vsd_contd_psid)); -out_out: +out: kfree(out); - return err; } -int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps) +int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id) { - return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); + int err; + + out = kzalloc(outlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5_cmd_query_adapter(mdev, out, outlen); + if (err) + goto out; + + *vendor_id = MLX5_GET(query_adapter_out, out, + query_adapter_struct.ieee_vendor_id); +out: + kfree(out); + return err; } +EXPORT_SYMBOL(mlx5_core_query_vendor_id); -int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps) +int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { - u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; - int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); - void *out; int err; - if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)) - return -ENOTSUPP; + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; - memset(in, 0, sizeof(in)); - out = kzalloc(out_sz, GFP_KERNEL); - if (!out) - return -ENOMEM; - MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); - MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX); if (err) - goto out; + return err; - err = mlx5_cmd_status_to_err_v2(out); - if (err) { - mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err); - goto out; + if (MLX5_CAP_GEN(dev, eth_net_offloads)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; } - memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct), - sizeof(*caps)); + if (MLX5_CAP_GEN(dev, pg)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ODP, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ODP, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } - mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n", - be32_to_cpu(caps->per_transport_caps.rc_odp_caps), - be32_to_cpu(caps->per_transport_caps.uc_odp_caps), - be32_to_cpu(caps->per_transport_caps.ud_odp_caps)); + if (MLX5_CAP_GEN(dev, atomic)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } -out: - kfree(out); - return err; + if (MLX5_CAP_GEN(dev, roce)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, nic_flow_table)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } + return 0; } -EXPORT_SYMBOL(mlx5_query_odp_caps); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 28425e5ea91f..afad529838de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -47,10 +48,6 @@ #include #include "mlx5_core.h" -#define DRIVER_NAME "mlx5_core" -#define DRIVER_VERSION "3.0" -#define DRIVER_RELDATE "January 2015" - MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); MODULE_LICENSE("Dual BSD/GPL"); @@ -208,24 +205,28 @@ static void release_bar(struct pci_dev *pdev) static int mlx5_enable_msix(struct mlx5_core_dev *dev) { - struct mlx5_eq_table *table = &dev->priv.eq_table; - int num_eqs = 1 << dev->caps.gen.log_max_eq; + struct mlx5_priv *priv = &dev->priv; + struct mlx5_eq_table *table = &priv->eq_table; + int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); int nvec; int i; - nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; + nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + + MLX5_EQ_VEC_COMP_BASE; nvec = min_t(int, nvec, num_eqs); if (nvec <= MLX5_EQ_VEC_COMP_BASE) return -ENOMEM; - table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL); - if (!table->msix_arr) - return -ENOMEM; + priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL); + + priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL); + if (!priv->msix_arr || !priv->irq_info) + goto err_free_msix; for (i = 0; i < nvec; i++) - table->msix_arr[i].entry = i; + priv->msix_arr[i].entry = i; - nvec = pci_enable_msix_range(dev->pdev, table->msix_arr, + nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, MLX5_EQ_VEC_COMP_BASE + 1, nvec); if (nvec < 0) return nvec; @@ -233,14 +234,20 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev) table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; return 0; + +err_free_msix: + kfree(priv->irq_info); + kfree(priv->msix_arr); + return -ENOMEM; } static void mlx5_disable_msix(struct mlx5_core_dev *dev) { - struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_priv *priv = &dev->priv; pci_disable_msix(dev->pdev); - kfree(table->msix_arr); + kfree(priv->irq_info); + kfree(priv->msix_arr); } struct mlx5_reg_host_endianess { @@ -277,98 +284,20 @@ static u16 to_fw_pkey_sz(u32 size) } } -/* selectively copy writable fields clearing any reserved area - */ -static void copy_rw_fields(void *to, struct mlx5_caps *from) -{ - __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22); - u64 v64; - - MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp); - MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp); - MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp); - MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size); - MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size)); - MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12); - v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK; - *flags_off = cpu_to_be64(v64); -} - -static u16 get_pkey_table_size(int pkey) -{ - if (pkey > MLX5_MAX_LOG_PKEY_TABLE) - return 0; - - return MLX5_MIN_PKEY_TABLE_SIZE << pkey; -} - -static void fw2drv_caps(struct mlx5_caps *caps, void *out) -{ - struct mlx5_general_caps *gen = &caps->gen; - - gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz); - gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz); - gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp); - gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz); - gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs); - gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz); - gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq); - gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz); - gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey); - gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq); - gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection); - gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz); - gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size); - gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size); - gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc); - gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc); - gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp); - gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp); - gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt); - gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size)); - gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay); - gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports); - gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg); - gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support); - gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22)); - pr_debug("flags = 0x%llx\n", gen->flags); - gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz); - gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz); - gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf); - gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size); - gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq); - gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq); - gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc); - gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg); - gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd); - gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd); - gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz); -} - -static const char *caps_opmod_str(u16 opmod) -{ - switch (opmod) { - case HCA_CAP_OPMOD_GET_MAX: - return "GET_MAX"; - case HCA_CAP_OPMOD_GET_CUR: - return "GET_CUR"; - default: - return "Invalid"; - } -} - -int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, - u16 opmod) +int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, + enum mlx5_cap_mode cap_mode) { u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); - void *out; + void *out, *hca_caps; + u16 opmod = (cap_type << 1) | (cap_mode & 0x01); int err; memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, op_mod, opmod); err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); @@ -377,12 +306,30 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, err = mlx5_cmd_status_to_err_v2(out); if (err) { - mlx5_core_warn(dev, "query max hca cap failed, %d\n", err); + mlx5_core_warn(dev, + "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", + cap_type, cap_mode, err); goto query_ex; } - mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod)); - fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct)); + hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); + + switch (cap_mode) { + case HCA_CAP_OPMOD_GET_MAX: + memcpy(dev->hca_caps_max[cap_type], hca_caps, + MLX5_UN_SZ_BYTES(hca_cap_union)); + break; + case HCA_CAP_OPMOD_GET_CUR: + memcpy(dev->hca_caps_cur[cap_type], hca_caps, + MLX5_UN_SZ_BYTES(hca_cap_union)); + break; + default: + mlx5_core_warn(dev, + "Tried to query dev cap type(%x) with wrong opmode(%x)\n", + cap_type, cap_mode); + err = -EINVAL; + break; + } query_ex: kfree(out); return err; @@ -409,49 +356,45 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) { void *set_ctx = NULL; struct mlx5_profile *prof = dev->profile; - struct mlx5_caps *cur_caps = NULL; - struct mlx5_caps *max_caps = NULL; int err = -ENOMEM; int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + void *set_hca_cap; set_ctx = kzalloc(set_sz, GFP_KERNEL); if (!set_ctx) goto query_ex; - max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL); - if (!max_caps) - goto query_ex; - - cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL); - if (!cur_caps) - goto query_ex; - - err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX); if (err) goto query_ex; - err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR); if (err) goto query_ex; + set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, + capability); + memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], + MLX5_ST_SZ_BYTES(cmd_hca_cap)); + + mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", + mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), + 128); /* we limit the size of the pkey table to 128 entries for now */ - cur_caps->gen.pkey_table_size = 128; + MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, + to_fw_pkey_sz(128)); if (prof->mask & MLX5_PROF_MASK_QP_SIZE) - cur_caps->gen.log_max_qp = prof->log_max_qp; + MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, + prof->log_max_qp); - /* disable checksum */ - cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + /* disable cmdif checksum */ + MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); - copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct), - cur_caps); err = set_caps(dev, set_ctx, set_sz); query_ex: - kfree(cur_caps); - kfree(max_caps); kfree(set_ctx); - return err; } @@ -507,6 +450,74 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) return 0; } +static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + struct msix_entry *msix = priv->msix_arr; + int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; + int numa_node = dev_to_node(&mdev->pdev->dev); + int err; + + if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { + mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); + return -ENOMEM; + } + + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + priv->irq_info[i].mask); + + err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); + if (err) { + mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", + irq); + goto err_clear_mask; + } + + return 0; + +err_clear_mask: + free_cpumask_var(priv->irq_info[i].mask); + return err; +} + +static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + struct msix_entry *msix = priv->msix_arr; + int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; + + irq_set_affinity_hint(irq, NULL); + free_cpumask_var(priv->irq_info[i].mask); +} + +static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) +{ + int err; + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) { + err = mlx5_irq_set_affinity_hint(mdev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + mlx5_irq_clear_affinity_hint(mdev, i); + + return err; +} + +static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev) +{ + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) + mlx5_irq_clear_affinity_hint(mdev, i); +} + int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) { struct mlx5_eq_table *table = &dev->priv.eq_table; @@ -549,7 +560,7 @@ static void free_comp_eqs(struct mlx5_core_dev *dev) static int alloc_comp_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; - char name[MLX5_MAX_EQ_NAME]; + char name[MLX5_MAX_IRQ_NAME]; struct mlx5_eq *eq; int ncomp_vec; int nent; @@ -566,7 +577,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) goto clean; } - snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); + snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, name, &dev->priv.uuari.uars[0]); @@ -588,6 +599,61 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) return err; } +#ifdef CONFIG_MLX5_CORE_EN +static int mlx5_core_set_issi(struct mlx5_core_dev *dev) +{ + u32 query_in[MLX5_ST_SZ_DW(query_issi_in)]; + u32 query_out[MLX5_ST_SZ_DW(query_issi_out)]; + u32 set_in[MLX5_ST_SZ_DW(set_issi_in)]; + u32 set_out[MLX5_ST_SZ_DW(set_issi_out)]; + int err; + u32 sup_issi; + + memset(query_in, 0, sizeof(query_in)); + memset(query_out, 0, sizeof(query_out)); + + MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); + + err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in), + query_out, sizeof(query_out)); + if (err) { + if (((struct mlx5_outbox_hdr *)query_out)->status == + MLX5_CMD_STAT_BAD_OP_ERR) { + pr_debug("Only ISSI 0 is supported\n"); + return 0; + } + + pr_err("failed to query ISSI\n"); + return err; + } + + sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); + + if (sup_issi & (1 << 1)) { + memset(set_in, 0, sizeof(set_in)); + memset(set_out, 0, sizeof(set_out)); + + MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); + MLX5_SET(set_issi_in, set_in, current_issi, 1); + + err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in), + set_out, sizeof(set_out)); + if (err) { + pr_err("failed to set ISSI=1\n"); + return err; + } + + dev->issi = 1; + + return 0; + } else if (sup_issi & (1 << 0) || !sup_issi) { + return 0; + } + + return -ENOTSUPP; +} +#endif + static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) { struct mlx5_priv *priv = &dev->priv; @@ -650,6 +716,14 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) goto err_pagealloc_cleanup; } +#ifdef CONFIG_MLX5_CORE_EN + err = mlx5_core_set_issi(dev); + if (err) { + dev_err(&pdev->dev, "failed to set issi\n"); + goto err_disable_hca; + } +#endif + err = mlx5_satisfy_startup_pages(dev, 1); if (err) { dev_err(&pdev->dev, "failed to allocate boot pages\n"); @@ -688,15 +762,15 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) mlx5_start_health_poll(dev); - err = mlx5_cmd_query_hca_cap(dev, &dev->caps); + err = mlx5_query_hca_caps(dev); if (err) { dev_err(&pdev->dev, "query hca failed\n"); goto err_stop_poll; } - err = mlx5_cmd_query_adapter(dev); + err = mlx5_query_board_id(dev); if (err) { - dev_err(&pdev->dev, "query adapter failed\n"); + dev_err(&pdev->dev, "query board id failed\n"); goto err_stop_poll; } @@ -730,6 +804,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) goto err_stop_eqs; } + err = mlx5_irq_set_affinity_hints(dev); + if (err) { + dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); + goto err_free_comp_eqs; + } + MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); mlx5_init_cq_table(dev); @@ -739,6 +819,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) return 0; +err_free_comp_eqs: + free_comp_eqs(dev); + err_stop_eqs: mlx5_stop_eqs(dev); @@ -793,6 +876,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); + mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); @@ -1048,6 +1132,10 @@ static int __init init(void) if (err) goto err_health; +#ifdef CONFIG_MLX5_CORE_EN + mlx5e_init(); +#endif + return 0; err_health: @@ -1060,6 +1148,9 @@ static int __init init(void) static void __exit cleanup(void) { +#ifdef CONFIG_MLX5_CORE_EN + mlx5e_cleanup(); +#endif pci_unregister_driver(&mlx5_core_driver); mlx5_health_cleanup(); destroy_workqueue(mlx5_core_wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c index d79fd85d1dd5..d5a0c2d61a18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG); memcpy(in.gid, mgid, sizeof(*mgid)); in.qpn = cpu_to_be32(qpn); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index a051b906afdf..fc88ecaecb4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -37,6 +37,10 @@ #include #include +#define DRIVER_NAME "mlx5_core" +#define DRIVER_VERSION "3.0-1" +#define DRIVER_RELDATE "January 2015" + extern int mlx5_core_debug_mask; #define mlx5_core_dbg(dev, format, ...) \ @@ -65,11 +69,20 @@ enum { MLX5_CMD_TIME, /* print command execution time */ }; +static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in, + int in_size, u32 *out, + int out_size) +{ + mlx5_cmd_exec(dev, in, in_size, out, out_size); + return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out); +} -int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, - struct mlx5_caps *caps); -int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev); +int mlx5_query_hca_caps(struct mlx5_core_dev *dev); +int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); +void mlx5e_init(void); +void mlx5e_cleanup(void); + #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 49e90f2612d8..619d3baf19ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -102,3 +102,235 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps) return err; } EXPORT_SYMBOL_GPL(mlx5_set_port_caps); + +int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, + int ptys_size, int proto_mask, u8 local_port) +{ + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(ptys_reg, in, local_port, local_port); + MLX5_SET(ptys_reg, in, proto_mask, proto_mask); + + err = mlx5_core_access_reg(dev, in, sizeof(in), ptys, + ptys_size, MLX5_REG_PTYS, 0, 0); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_ptys); + +int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, + u32 *proto_cap, int proto_mask) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); + if (err) + return err; + + if (proto_mask == MLX5_PTYS_EN) + *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + else + *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap); + +int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, + u32 *proto_admin, int proto_mask) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); + if (err) + return err; + + if (proto_mask == MLX5_PTYS_EN) + *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + else + *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin); + +int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, + u8 *link_width_oper, u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port); + if (err) + return err; + + *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper); + +int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, int proto_mask, + u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port); + if (err) + return err; + + if (proto_mask == MLX5_PTYS_EN) + *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + else + *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper); + +int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, + int proto_mask) +{ + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(ptys_reg, in, local_port, 1); + MLX5_SET(ptys_reg, in, proto_mask, proto_mask); + if (proto_mask == MLX5_PTYS_EN) + MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); + else + MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PTYS, 0, 1); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_proto); + +int mlx5_set_port_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status) +{ + u32 in[MLX5_ST_SZ_DW(paos_reg)]; + u32 out[MLX5_ST_SZ_DW(paos_reg)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(paos_reg, in, admin_status, status); + MLX5_SET(paos_reg, in, ase, 1); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PAOS, 0, 1); +} + +int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) +{ + u32 in[MLX5_ST_SZ_DW(paos_reg)]; + u32 out[MLX5_ST_SZ_DW(paos_reg)]; + int err; + + memset(in, 0, sizeof(in)); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PAOS, 0, 0); + if (err) + return err; + + *status = MLX5_GET(paos_reg, out, oper_status); + return err; +} + +static int mlx5_query_port_mtu(struct mlx5_core_dev *dev, + int *admin_mtu, int *max_mtu, int *oper_mtu, + u8 local_port) +{ + u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; + u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(pmtu_reg, in, local_port, local_port); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PMTU, 0, 0); + if (err) + return err; + + if (max_mtu) + *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu); + if (oper_mtu) + *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu); + if (admin_mtu) + *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); + + return 0; +} + +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu) +{ + u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; + u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(pmtu_reg, in, admin_mtu, mtu); + MLX5_SET(pmtu_reg, in, local_port, 1); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_PMTU, 0, 1); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); + +int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, + u8 local_port) +{ + return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, local_port); +} +EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); + +int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, + u8 local_port) +{ + return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, local_port); +} +EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); + +static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, + int pvlc_size, u8 local_port) +{ + u32 in[MLX5_ST_SZ_DW(pvlc_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(ptys_reg, in, local_port, local_port); + + err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc, + pvlc_size, MLX5_REG_PVLC, 0, 0); + + return err; +} + +int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, + u8 *vl_hw_cap, u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(pvlc_reg)]; + int err; + + err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port); + if (err) + return err; + + *vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index dc7dbf7e9d98..8b494b562263 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -187,10 +187,17 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_destroy_qp_mbox_in din; struct mlx5_destroy_qp_mbox_out dout; int err; + void *qpc; memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); + if (dev->issi) { + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + /* 0xffffff means we ask to work with cqe version 0 */ + MLX5_SET(qpc, qpc, user_index, 0xffffff); + } + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "ret %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index f9d25dcd03c1..c48f504ccbeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -37,6 +37,7 @@ #include #include #include "mlx5_core.h" +#include "transobj.h" void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) { @@ -62,6 +63,74 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) complete(&srq->free); } +static int get_pas_size(void *srqc) +{ + u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12; + u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size); + u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride); + u32 page_offset = MLX5_GET(srqc, srqc, page_offset); + u32 po_quanta = 1 << (log_page_size - 6); + u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride); + u32 page_size = 1 << log_page_size; + u32 rq_sz_po = rq_sz + (page_offset * po_quanta); + u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size; + + return rq_num_pas * sizeof(u64); +} + +static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc) +{ + void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + + if (srqc_to_rmpc) { + switch (MLX5_GET(srqc, srqc, state)) { + case MLX5_SRQC_STATE_GOOD: + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + break; + case MLX5_SRQC_STATE_ERROR: + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR); + break; + default: + pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__, + __LINE__, MLX5_GET(srqc, srqc, state)); + MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state)); + } + + MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature)); + MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size)); + MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4); + MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size)); + MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset)); + MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm)); + MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd)); + MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr)); + } else { + switch (MLX5_GET(rmpc, rmpc, state)) { + case MLX5_RMPC_STATE_RDY: + MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD); + break; + case MLX5_RMPC_STATE_ERR: + MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR); + break; + default: + pr_warn("%s: %d: Unknown rmp state = 0x%x\n", + __func__, __LINE__, + MLX5_GET(rmpc, rmpc, state)); + MLX5_SET(srqc, srqc, state, + MLX5_GET(rmpc, rmpc, state)); + } + + MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature)); + MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz)); + MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4); + MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz)); + MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset)); + MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm)); + MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd)); + MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr)); + } +} + struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) { struct mlx5_srq_table *table = &dev->priv.srq_table; @@ -79,26 +148,311 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) } EXPORT_SYMBOL(mlx5_core_get_srq); -int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen) +static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen) { struct mlx5_create_srq_mbox_out out; - struct mlx5_srq_table *table = &dev->priv.srq_table; - struct mlx5_destroy_srq_mbox_in din; - struct mlx5_destroy_srq_mbox_out dout; int err; memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); - if (err) - return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); + err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), + sizeof(out)); srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; + return err; +} + +static int destroy_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + struct mlx5_destroy_srq_mbox_in in; + struct mlx5_destroy_srq_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + + return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), + (u32 *)(&out), sizeof(out)); +} + +static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq) +{ + struct mlx5_arm_srq_mbox_in in; + struct mlx5_arm_srq_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); + in.hdr.opmod = cpu_to_be16(!!is_srq); + in.srqn = cpu_to_be32(srq->srqn); + in.lwm = cpu_to_be16(lwm); + + return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), + sizeof(in), (u32 *)(&out), + sizeof(out)); +} + +static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + struct mlx5_query_srq_mbox_in in; + + memset(&in, 0, sizeof(in)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + + return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), + (u32 *)out, sizeof(*out)); +} + +static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, + int srq_inlen) +{ + u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; + void *create_in; + void *srqc; + void *xrc_srqc; + void *pas; + int pas_size; + int inlen; + int err; + + srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); + pas_size = get_pas_size(srqc); + inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size; + create_in = mlx5_vzalloc(inlen); + if (!create_in) + return -ENOMEM; + + xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, + xrc_srq_context_entry); + pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas); + + memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc)); + memcpy(pas, in->pas, pas_size); + /* 0xffffff means we ask to work with cqe version 0 */ + MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff); + MLX5_SET(create_xrc_srq_in, create_in, opcode, + MLX5_CMD_OP_CREATE_XRC_SRQ); + + memset(create_out, 0, sizeof(create_out)); + err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out, + sizeof(create_out)); + if (err) + goto out; + + srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn); +out: + kvfree(create_in); + return err; +} + +static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]; + u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)]; + + memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); + memset(xrcsrq_out, 0, sizeof(xrcsrq_out)); + + MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode, + MLX5_CMD_OP_DESTROY_XRC_SRQ); + MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); + + return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, sizeof(xrcsrq_out)); +} + +static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, u16 lwm) +{ + u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]; + u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)]; + + memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); + memset(xrcsrq_out, 0, sizeof(xrcsrq_out)); + + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm); + + return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, sizeof(xrcsrq_out)); +} + +static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; + u32 *xrcsrq_out; + void *srqc; + void *xrc_srqc; + int err; + + xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + if (!xrcsrq_out) + return -ENOMEM; + memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); + + MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode, + MLX5_CMD_OP_QUERY_XRC_SRQ); + MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); + err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, + MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + if (err) + goto out; + + xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out, + xrc_srq_context_entry); + srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); + memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); + +out: + kvfree(xrcsrq_out); + return err; +} + +static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int srq_inlen) +{ + void *create_in; + void *rmpc; + void *srqc; + int pas_size; + int inlen; + int err; + + srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); + pas_size = get_pas_size(srqc); + inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size; + create_in = mlx5_vzalloc(inlen); + if (!create_in) + return -ENOMEM; + + rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx); + + memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); + rmpc_srqc_reformat(srqc, rmpc, true); + + err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn); + + kvfree(create_in); + return err; +} + +static int destroy_rmp_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + return mlx5_core_destroy_rmp(dev, srq->srqn); +} + +static int arm_rmp_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + u16 lwm) +{ + void *in; + void *rmpc; + void *wq; + void *bitmask; + int err; + + in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in)); + if (!in) + return -ENOMEM; + + rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx); + bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask); + wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + + MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY); + MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn); + MLX5_SET(wq, wq, lwm, lwm); + MLX5_SET(rmp_bitmask, bitmask, lwm, 1); + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + + err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in)); + + kvfree(in); + return err; +} + +static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + u32 *rmp_out; + void *rmpc; + void *srqc; + int err; + + rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out)); + if (!rmp_out) + return -ENOMEM; + + err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out); + if (err) + goto out; + + srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); + rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context); + rmpc_srqc_reformat(srqc, rmpc, false); + +out: + kvfree(rmp_out); + return err; +} + +static int create_srq_split(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, + int inlen, int is_xrc) +{ + if (!dev->issi) + return create_srq_cmd(dev, srq, in, inlen); + else if (srq->common.res == MLX5_RES_XSRQ) + return create_xrc_srq_cmd(dev, srq, in, inlen); + else + return create_rmp_cmd(dev, srq, in, inlen); +} + +static int destroy_srq_split(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + if (!dev->issi) + return destroy_srq_cmd(dev, srq); + else if (srq->common.res == MLX5_RES_XSRQ) + return destroy_xrc_srq_cmd(dev, srq); + else + return destroy_rmp_cmd(dev, srq); +} + +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen, + int is_xrc) +{ + int err; + struct mlx5_srq_table *table = &dev->priv.srq_table; + + srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ; + + err = create_srq_split(dev, srq, in, inlen, is_xrc); + if (err) + return err; + atomic_set(&srq->refcount, 1); init_completion(&srq->free); @@ -107,25 +461,20 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, spin_unlock_irq(&table->lock); if (err) { mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); - goto err_cmd; + goto err_destroy_srq_split; } return 0; -err_cmd: - memset(&din, 0, sizeof(din)); - memset(&dout, 0, sizeof(dout)); - din.srqn = cpu_to_be32(srq->srqn); - din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); - mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); +err_destroy_srq_split: + destroy_srq_split(dev, srq); + return err; } EXPORT_SYMBOL(mlx5_core_create_srq); int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) { - struct mlx5_destroy_srq_mbox_in in; - struct mlx5_destroy_srq_mbox_out out; struct mlx5_srq_table *table = &dev->priv.srq_table; struct mlx5_core_srq *tmp; int err; @@ -142,17 +491,10 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) return -EINVAL; } - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); - in.srqn = cpu_to_be32(srq->srqn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + err = destroy_srq_split(dev, srq); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - if (atomic_dec_and_test(&srq->refcount)) complete(&srq->free); wait_for_completion(&srq->free); @@ -164,48 +506,24 @@ EXPORT_SYMBOL(mlx5_core_destroy_srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_query_srq_mbox_out *out) { - struct mlx5_query_srq_mbox_in in; - int err; - - memset(&in, 0, sizeof(in)); - memset(out, 0, sizeof(*out)); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); - in.srqn = cpu_to_be32(srq->srqn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); - if (err) - return err; - - if (out->hdr.status) - return mlx5_cmd_status_to_err(&out->hdr); - - return err; + if (!dev->issi) + return query_srq_cmd(dev, srq, out); + else if (srq->common.res == MLX5_RES_XSRQ) + return query_xrc_srq_cmd(dev, srq, out); + else + return query_rmp_cmd(dev, srq, out); } EXPORT_SYMBOL(mlx5_core_query_srq); int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq) { - struct mlx5_arm_srq_mbox_in in; - struct mlx5_arm_srq_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); - in.hdr.opmod = cpu_to_be16(!!is_srq); - in.srqn = cpu_to_be32(srq->srqn); - in.lwm = cpu_to_be16(lwm); - - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return err; + if (!dev->issi) + return arm_srq_cmd(dev, srq, lwm, is_srq); + else if (srq->common.res == MLX5_RES_XSRQ) + return arm_xrc_srq_cmd(dev, srq, lwm); + else + return arm_rmp_cmd(dev, srq, lwm); } EXPORT_SYMBOL(mlx5_core_arm_srq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c new file mode 100644 index 000000000000..7a120283de2c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "mlx5_core.h" +#include "transobj.h" + +int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) +{ + u32 out[MLX5_ST_SZ_DW(create_rq_out)]; + int err; + + MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *rqn = MLX5_GET(create_rq_out, out, rqn); + + return err; +} + +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_rq_out)]; + + MLX5_SET(modify_rq_in, in, rqn, rqn); + MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + +void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rq_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rq_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); + MLX5_SET(destroy_rq_in, in, rqn, rqn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) +{ + u32 out[MLX5_ST_SZ_DW(create_sq_out)]; + int err; + + MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *sqn = MLX5_GET(create_sq_out, out, sqn); + + return err; +} + +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_sq_out)]; + + MLX5_SET(modify_sq_in, in, sqn, sqn); + MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + +void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_sq_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_sq_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ); + MLX5_SET(destroy_sq_in, in, sqn, sqn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tirn) +{ + u32 out[MLX5_ST_SZ_DW(create_tir_out)]; + int err; + + MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *tirn = MLX5_GET(create_tir_out, out, tirn); + + return err; +} + +void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_tir_out)]; + u32 out[MLX5_ST_SZ_DW(destroy_tir_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR); + MLX5_SET(destroy_tir_in, in, tirn, tirn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tisn) +{ + u32 out[MLX5_ST_SZ_DW(create_tis_out)]; + int err; + + MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *tisn = MLX5_GET(create_tis_out, out, tisn); + + return err; +} + +void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_tis_out)]; + u32 out[MLX5_ST_SZ_DW(destroy_tis_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS); + MLX5_SET(destroy_tis_in, in, tisn, tisn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn) +{ + u32 out[MLX5_ST_SZ_DW(create_rmp_out)]; + int err; + + MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *rmpn = MLX5_GET(create_rmp_out, out, rmpn); + + return err; +} + +int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_rmp_out)]; + + MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + +int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP); + MLX5_SET(destroy_rmp_in, in, rmpn, rmpn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out) +{ + u32 in[MLX5_ST_SZ_DW(query_rmp_in)]; + int outlen = MLX5_ST_SZ_BYTES(query_rmp_out); + + memset(in, 0, sizeof(in)); + MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP); + MLX5_SET(query_rmp_in, in, rmpn, rmpn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); +} + +int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm) +{ + void *in; + void *rmpc; + void *wq; + void *bitmask; + int err; + + in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in)); + if (!in) + return -ENOMEM; + + rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx); + bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask); + wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + + MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY); + MLX5_SET(modify_rmp_in, in, rmpn, rmpn); + MLX5_SET(wq, wq, lwm, lwm); + MLX5_SET(rmp_bitmask, bitmask, lwm, 1); + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + + err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in)); + + kvfree(in); + + return err; +} + +int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *xsrqn) +{ + u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; + int err; + + MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn); + + return err; +} + +int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ); + MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out) +{ + u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; + void *srqc; + void *xrc_srqc; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); + MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), + out, + MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + if (!err) { + xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out, + xrc_srq_context_entry); + srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); + memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); + } + + return err; +} + +int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) +{ + u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]; + u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn); + MLX5_SET(arm_xrc_srq_in, in, lwm, lwm); + MLX5_SET(arm_xrc_srq_in, in, op_mod, + MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h new file mode 100644 index 000000000000..90322c11361f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __TRANSOBJ_H__ +#define __TRANSOBJ_H__ + +int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rqn); +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); +void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); +int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *sqn); +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); +void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tirn); +void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tisn); +void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); +int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn); +int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen); +int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn); +int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); +int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); +int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn); +int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); +int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); +int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); + +#endif /* __TRANSOBJ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 5a89bb1d678a..9ef85873ceea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -175,12 +175,13 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) for (i = 0; i < tot_uuars; i++) { bf = &uuari->bfs[i]; - bf->buf_size = dev->caps.gen.bf_reg_size / 2; + bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2; bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; bf->reg = NULL; /* Add WC support */ - bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size + - MLX5_BF_OFFSET; + bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * + (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) + + MLX5_BF_OFFSET; bf->need_lock = need_uuar_lock(i); spin_lock_init(&bf->lock); spin_lock_init(&bf->lock32); @@ -223,3 +224,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) return 0; } + +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) +{ + phys_addr_t pfn; + phys_addr_t uar_bar_start; + int err; + + err = mlx5_cmd_alloc_uar(mdev, &uar->index); + if (err) { + mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); + return err; + } + + uar_bar_start = pci_resource_start(mdev->pdev, 0); + pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; + uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->map) { + mlx5_core_warn(mdev, "ioremap() failed, %d\n", err); + err = -ENOMEM; + goto err_free_uar; + } + + return 0; + +err_free_uar: + mlx5_cmd_free_uar(mdev, uar->index); + + return err; +} +EXPORT_SYMBOL(mlx5_alloc_map_uar); + +void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) +{ + iounmap(uar->map); + mlx5_cmd_free_uar(mdev, uar->index); +} +EXPORT_SYMBOL(mlx5_unmap_free_uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c new file mode 100644 index 000000000000..b94177ebcf3a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -0,0 +1,345 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod) +{ + u32 in[MLX5_ST_SZ_DW(query_vport_state_in)]; + u32 out[MLX5_ST_SZ_DW(query_vport_state_out)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_vport_state_in, in, opcode, + MLX5_CMD_OP_QUERY_VPORT_STATE); + MLX5_SET(query_vport_state_in, in, op_mod, opmod); + + err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, + sizeof(out)); + if (err) + mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n"); + + return MLX5_GET(query_vport_state_out, out, state); +} +EXPORT_SYMBOL(mlx5_query_vport_state); + +void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr) +{ + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + u8 *out_addr; + + out = mlx5_vzalloc(outlen); + if (!out) + return; + + out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out, + nic_vport_context.permanent_address); + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + + memset(out, 0, outlen); + mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); + + ether_addr_copy(addr, &out_addr[2]); + + kvfree(out); +} +EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address); + +int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid) +{ + int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in); + int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out); + int is_group_manager; + void *out = NULL; + void *in = NULL; + union ib_gid *tmp; + int tbsz; + int nout; + int err; + + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size)); + mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n", + vf_num, gid_index, tbsz); + + if (gid_index > tbsz && gid_index != 0xffff) + return -EINVAL; + + if (gid_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * sizeof(*gid); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID); + if (other_vport) { + if (is_group_manager) { + MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num); + MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1); + } else { + err = -EPERM; + goto out; + } + } + MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index); + + if (MLX5_CAP_GEN(dev, num_ports) == 2) + MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num); + + err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto out; + + tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out); + gid->global.subnet_prefix = tmp->global.subnet_prefix; + gid->global.interface_id = tmp->global.interface_id; + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid); + +int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey) +{ + int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in); + int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out); + int is_group_manager; + void *out = NULL; + void *in = NULL; + void *pkarr; + int nout; + int tbsz; + int err; + int i; + + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + + tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)); + if (pkey_index > tbsz && pkey_index != 0xffff) + return -EINVAL; + + if (pkey_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * MLX5_ST_SZ_BYTES(pkey); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY); + if (other_vport) { + if (is_group_manager) { + MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num); + MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1); + } else { + err = -EPERM; + goto out; + } + } + MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index); + + if (MLX5_CAP_GEN(dev, num_ports) == 2) + MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num); + + err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto out; + + pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey); + for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey)) + *pkey = MLX5_GET_PR(pkey, pkarr, pkey); + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey); + +int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct mlx5_hca_vport_context *rep) +{ + int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); + int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)]; + int is_group_manager; + void *out; + void *ctx; + int err; + + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT); + + if (other_vport) { + if (is_group_manager) { + MLX5_SET(query_hca_vport_context_in, in, other_vport, 1); + MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num); + } else { + err = -EPERM; + goto ex; + } + } + + if (MLX5_CAP_GEN(dev, num_ports) == 2) + MLX5_SET(query_hca_vport_context_in, in, port_num, port_num); + + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + if (err) + goto ex; + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto ex; + + ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context); + rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select); + rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware); + rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi); + rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw); + rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy); + rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx, + port_physical_state); + rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state); + rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx, + port_physical_state); + rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid); + rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid); + rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1); + rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx, + cap_mask1_field_select); + rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2); + rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx, + cap_mask2_field_select); + rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid); + rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx, + init_type_reply); + rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc); + rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx, + subnet_timeout); + rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid); + rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl); + rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx, + qkey_violation_counter); + rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx, + pkey_violation_counter); + rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required); + rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx, + system_image_guid); + +ex: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context); + +int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, + u64 *sys_image_guid) +{ + struct mlx5_hca_vport_context *rep; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep); + if (!err) + *sys_image_guid = rep->sys_image_guid; + + kfree(rep); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid); + +int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, + u64 *node_guid) +{ + struct mlx5_hca_vport_context *rep; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep); + if (!err) + *node_guid = rep->node_guid; + + kfree(rep); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c new file mode 100644 index 000000000000..8388411582cf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "wq.h" +#include "mlx5_core.h" + +u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) +{ + return (u32)wq->sz_m1 + 1; +} + +u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) +{ + return wq->sz_m1 + 1; +} + +u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) +{ + return (u32)wq->sz_m1 + 1; +} + +static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) +{ + return mlx5_wq_cyc_get_size(wq) << wq->log_stride; +} + +static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) +{ + return mlx5_cqwq_get_size(wq) << wq->log_stride; +} + +static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) +{ + return mlx5_wq_ll_get_size(wq) << wq->log_stride; +} + +int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_cyc *wq, + struct mlx5_wq_ctrl *wq_ctrl) +{ + int err; + + wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); + wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + + err = mlx5_db_alloc(mdev, &wq_ctrl->db); + if (err) { + mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + return err; + } + + err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf); + if (err) { + mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + goto err_db_free; + } + + wq->buf = wq_ctrl->buf.direct.buf; + wq->db = wq_ctrl->db.db; + + wq_ctrl->mdev = mdev; + + return 0; + +err_db_free: + mlx5_db_free(mdev, &wq_ctrl->db); + + return err; +} + +int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *cqc, struct mlx5_cqwq *wq, + struct mlx5_wq_ctrl *wq_ctrl) +{ + int err; + + wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); + wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); + wq->sz_m1 = (1 << wq->log_sz) - 1; + + err = mlx5_db_alloc(mdev, &wq_ctrl->db); + if (err) { + mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + return err; + } + + err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf); + if (err) { + mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + goto err_db_free; + } + + wq->buf = wq_ctrl->buf.direct.buf; + wq->db = wq_ctrl->db.db; + + wq_ctrl->mdev = mdev; + + return 0; + +err_db_free: + mlx5_db_free(mdev, &wq_ctrl->db); + + return err; +} + +int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_ll *wq, + struct mlx5_wq_ctrl *wq_ctrl) +{ + struct mlx5_wqe_srq_next_seg *next_seg; + int err; + int i; + + wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); + wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + + err = mlx5_db_alloc(mdev, &wq_ctrl->db); + if (err) { + mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + return err; + } + + err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf); + if (err) { + mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + goto err_db_free; + } + + wq->buf = wq_ctrl->buf.direct.buf; + wq->db = wq_ctrl->db.db; + + for (i = 0; i < wq->sz_m1; i++) { + next_seg = mlx5_wq_ll_get_wqe(wq, i); + next_seg->next_wqe_index = cpu_to_be16(i + 1); + } + next_seg = mlx5_wq_ll_get_wqe(wq, i); + wq->tail_next = &next_seg->next_wqe_index; + + wq_ctrl->mdev = mdev; + + return 0; + +err_db_free: + mlx5_db_free(mdev, &wq_ctrl->db); + + return err; +} + +void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) +{ + mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); + mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h new file mode 100644 index 000000000000..e0ddd69fb429 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_WQ_H__ +#define __MLX5_WQ_H__ + +#include + +struct mlx5_wq_param { + int linear; + int numa; +}; + +struct mlx5_wq_ctrl { + struct mlx5_core_dev *mdev; + struct mlx5_buf buf; + struct mlx5_db db; +}; + +struct mlx5_wq_cyc { + void *buf; + __be32 *db; + u16 sz_m1; + u8 log_stride; +}; + +struct mlx5_cqwq { + void *buf; + __be32 *db; + u32 sz_m1; + u32 cc; /* consumer counter */ + u8 log_sz; + u8 log_stride; +}; + +struct mlx5_wq_ll { + void *buf; + __be32 *db; + __be16 *tail_next; + u16 sz_m1; + u16 head; + u16 wqe_ctr; + u16 cur_sz; + u8 log_stride; +}; + +int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_cyc *wq, + struct mlx5_wq_ctrl *wq_ctrl); +u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); + +int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *cqc, struct mlx5_cqwq *wq, + struct mlx5_wq_ctrl *wq_ctrl); +u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); + +int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_ll *wq, + struct mlx5_wq_ctrl *wq_ctrl); +u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); + +void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); + +static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) +{ + return ctr & wq->sz_m1; +} + +static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) +{ + return wq->buf + (ix << wq->log_stride); +} + +static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) +{ + int equal = (cc1 == cc2); + int smaller = 0x8000 & (cc1 - cc2); + + return !equal && !smaller; +} + +static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) +{ + return wq->cc & wq->sz_m1; +} + +static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) +{ + return wq->buf + (ix << wq->log_stride); +} + +static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) +{ + return wq->cc >> wq->log_sz; +} + +static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) +{ + wq->cc++; +} + +static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq) +{ + *wq->db = cpu_to_be32(wq->cc & 0xffffff); +} + +static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) +{ + return wq->cur_sz == wq->sz_m1; +} + +static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) +{ + return !wq->cur_sz; +} + +static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) +{ + return wq->buf + (ix << wq->log_stride); +} + +static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) +{ + wq->head = head_next; + wq->wqe_ctr++; + wq->cur_sz++; +} + +static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix, + __be16 *next_tail_next) +{ + *wq->tail_next = ix; + wq->tail_next = next_tail_next; + wq->cur_sz--; +} + +static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq) +{ + *wq->db = cpu_to_be32(wq->wqe_ctr); +} + +#endif /* __MLX5_WQ_H__ */ diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 6f332ebdf3b5..75dc46c5fca2 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -6664,7 +6664,7 @@ static void mib_read_work(struct work_struct *work) wake_up_interruptible( &hw_priv->counter[i].counter); } - } else if (jiffies >= hw_priv->counter[i].time) { + } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) { /* Only read MIB counters when the port is connected. */ if (media_connected == mib->state) hw_priv->counter[i].read = 1; @@ -6689,7 +6689,7 @@ static void mib_monitor(unsigned long ptr) /* This is used to verify Wake-on-LAN is working. */ if (hw_priv->pme_wait) { - if (hw_priv->pme_wait <= jiffies) { + if (time_is_before_eq_jiffies(hw_priv->pme_wait)) { hw_clr_wol_pme_status(&hw_priv->hw); hw_priv->pme_wait = 0; } diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index e0c31e3947d1..6409a06bbdf6 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -3025,9 +3025,9 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, u8 dw, rows, cols, banks, ranks; u32 val; - if (size != sizeof(struct netxen_dimm_cfg)) { + if (size < attr->size) { netdev_err(netdev, "Invalid size\n"); - return -1; + return -EINVAL; } memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); @@ -3137,7 +3137,7 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, static struct bin_attribute bin_attr_dimm = { .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, - .size = 0, + .size = sizeof(struct netxen_dimm_cfg), .read = netxen_sysfs_read_dimm, }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index f221126a5c4e..055f3763e577 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1326,9 +1326,6 @@ struct qlcnic_eswitch { }; -/* Return codes for Error handling */ -#define QL_STATUS_INVALID_PARAM -1 - #define MAX_BW 100 /* % of link speed */ #define MIN_BW 1 /* % of link speed */ #define MAX_VLAN_ID 4095 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 367f3976df56..2f6cc423ab1d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1031,7 +1031,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) pfn = pci_info[i].id; if (pfn >= ahw->max_vnic_func) { - ret = QL_STATUS_INVALID_PARAM; + ret = -EINVAL; dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n", __func__, pfn, ahw->max_vnic_func); goto err_eswitch; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 59a721fba018..05c28f2c6df7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -24,8 +24,6 @@ #include #endif -#define QLC_STATUS_UNSUPPORTED_CMD -2 - int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) { return -EOPNOTSUPP; @@ -166,7 +164,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, u8 b_state, b_rate; if (len != sizeof(u16)) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; memcpy(&beacon, buf, sizeof(u16)); err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate); @@ -383,17 +381,17 @@ static int validate_pm_config(struct qlcnic_adapter *adapter, dest_pci_func = pm_cfg[i].dest_npar; src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func); if (src_index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func); if (dest_index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; s_esw_id = adapter->npars[src_index].phy_port; d_esw_id = adapter->npars[dest_index].phy_port; if (s_esw_id != d_esw_id) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; } return 0; @@ -414,7 +412,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, count = size / sizeof(struct qlcnic_pm_func_cfg); rem = size % sizeof(struct qlcnic_pm_func_cfg); if (rem) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); pm_cfg = (struct qlcnic_pm_func_cfg *)buf; @@ -427,7 +425,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, action = !!pm_cfg[i].action; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; id = adapter->npars[index].phy_port; ret = qlcnic_config_port_mirroring(adapter, id, @@ -440,7 +438,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, pci_func = pm_cfg[i].pci_func; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; id = adapter->npars[index].phy_port; adapter->npars[index].enable_pm = !!pm_cfg[i].action; adapter->npars[index].dest_npar = id; @@ -499,11 +497,11 @@ static int validate_esw_config(struct qlcnic_adapter *adapter, for (i = 0; i < count; i++) { pci_func = esw_cfg[i].pci_func; if (pci_func >= ahw->max_vnic_func) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: @@ -517,25 +515,25 @@ static int validate_esw_config(struct qlcnic_adapter *adapter, if (ret != QLCNIC_NON_PRIV_FUNC) { if (esw_cfg[i].mac_anti_spoof != 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (esw_cfg[i].mac_override != 1) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (esw_cfg[i].promisc_mode != 1) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; } break; case QLCNIC_ADD_VLAN: if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (!esw_cfg[i].op_type) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; break; case QLCNIC_DEL_VLAN: if (!esw_cfg[i].op_type) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; break; default: - return QL_STATUS_INVALID_PARAM; + return -EINVAL; } } @@ -559,7 +557,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, count = size / sizeof(struct qlcnic_esw_func_cfg); rem = size % sizeof(struct qlcnic_esw_func_cfg); if (rem) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); esw_cfg = (struct qlcnic_esw_func_cfg *)buf; @@ -570,7 +568,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, for (i = 0; i < count; i++) { if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (adapter->ahw->pci_func != esw_cfg[i].pci_func) continue; @@ -604,7 +602,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, pci_func = esw_cfg[i].pci_func; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; npar = &adapter->npars[index]; switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: @@ -654,7 +652,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, esw_cfg[pci_func].pci_func = pci_func; if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; } qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; @@ -669,11 +667,11 @@ static int validate_npar_config(struct qlcnic_adapter *adapter, for (i = 0; i < count; i++) { pci_func = np_cfg[i].pci_func; if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (!IS_VALID_BW(np_cfg[i].min_bw) || !IS_VALID_BW(np_cfg[i].max_bw)) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; } return 0; } @@ -694,7 +692,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, count = size / sizeof(struct qlcnic_npar_func_cfg); rem = size % sizeof(struct qlcnic_npar_func_cfg); if (rem) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); np_cfg = (struct qlcnic_npar_func_cfg *)buf; @@ -717,7 +715,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, return ret; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; adapter->npars[index].min_bw = nic_info.min_tx_bw; adapter->npars[index].max_bw = nic_info.max_tx_bw; } @@ -784,13 +782,13 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file, int ret; if (qlcnic_83xx_check(adapter)) - return QLC_STATUS_UNSUPPORTED_CMD; + return -EOPNOTSUPP; if (size != sizeof(struct qlcnic_esw_statistics)) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (offset >= adapter->ahw->max_vnic_func) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; memset(&port_stats, 0, size); ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, @@ -819,13 +817,13 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file, int ret; if (qlcnic_83xx_check(adapter)) - return QLC_STATUS_UNSUPPORTED_CMD; + return -EOPNOTSUPP; if (size != sizeof(struct qlcnic_esw_statistics)) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (offset >= QLCNIC_NIU_MAX_XG_PORTS) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; memset(&esw_stats, 0, size); ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, @@ -853,10 +851,10 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file, int ret; if (qlcnic_83xx_check(adapter)) - return QLC_STATUS_UNSUPPORTED_CMD; + return -EOPNOTSUPP; if (offset >= QLCNIC_NIU_MAX_XG_PORTS) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, QLCNIC_QUERY_RX_COUNTER); @@ -883,10 +881,10 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file, int ret; if (qlcnic_83xx_check(adapter)) - return QLC_STATUS_UNSUPPORTED_CMD; + return -EOPNOTSUPP; if (offset >= adapter->ahw->max_vnic_func) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, QLCNIC_QUERY_RX_COUNTER); @@ -953,9 +951,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, struct qlcnic_adapter *adapter = dev_get_drvdata(dev); if (!size) - return QL_STATUS_INVALID_PARAM; - if (!buf) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; count = size / sizeof(u32); @@ -1132,9 +1128,6 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - if (!buf) - return QL_STATUS_INVALID_PARAM; - ret = kstrtoul(buf, 16, &data); switch (data) { diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 36f7edfc3c7a..819289e5be4f 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -1172,11 +1172,11 @@ static void rocker_dma_rings_fini(struct rocker *rocker) rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); } -static int rocker_dma_rx_ring_skb_map(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, struct sk_buff *skb, size_t buf_len) { + const struct rocker *rocker = rocker_port->rocker; struct pci_dev *pdev = rocker->pdev; dma_addr_t dma_handle; @@ -1201,8 +1201,7 @@ static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; } -static int rocker_dma_rx_ring_skb_alloc(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info) { struct net_device *dev = rocker_port->dev; @@ -1219,8 +1218,7 @@ static int rocker_dma_rx_ring_skb_alloc(const struct rocker *rocker, skb = netdev_alloc_skb_ip_align(dev, buf_len); if (!skb) return -ENOMEM; - err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info, - skb, buf_len); + err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); if (err) { dev_kfree_skb_any(skb); return err; @@ -1257,15 +1255,15 @@ static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, dev_kfree_skb_any(skb); } -static int rocker_dma_rx_ring_skbs_alloc(const struct rocker *rocker, - const struct rocker_port *rocker_port) +static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) { const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + const struct rocker *rocker = rocker_port->rocker; int i; int err; for (i = 0; i < rx_ring->size; i++) { - err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, + err = rocker_dma_rx_ring_skb_alloc(rocker_port, &rx_ring->desc_info[i]); if (err) goto rollback; @@ -1278,10 +1276,10 @@ static int rocker_dma_rx_ring_skbs_alloc(const struct rocker *rocker, return err; } -static void rocker_dma_rx_ring_skbs_free(const struct rocker *rocker, - const struct rocker_port *rocker_port) +static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) { const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + const struct rocker *rocker = rocker_port->rocker; int i; for (i = 0; i < rx_ring->size; i++) @@ -1327,7 +1325,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) goto err_dma_rx_ring_bufs_alloc; } - err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port); + err = rocker_dma_rx_ring_skbs_alloc(rocker_port); if (err) { netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); goto err_dma_rx_ring_skbs_alloc; @@ -1353,7 +1351,7 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) { struct rocker *rocker = rocker_port->rocker; - rocker_dma_rx_ring_skbs_free(rocker, rocker_port); + rocker_dma_rx_ring_skbs_free(rocker_port); rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, PCI_DMA_BIDIRECTIONAL); rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); @@ -1588,22 +1586,20 @@ static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) * Command interface ********************/ -typedef int (*rocker_cmd_prep_cb_t)(const struct rocker *rocker, - const struct rocker_port *rocker_port, +typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv); -typedef int (*rocker_cmd_proc_cb_t)(const struct rocker *rocker, - const struct rocker_port *rocker_port, +typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv); -static int rocker_cmd_exec(struct rocker *rocker, - struct rocker_port *rocker_port, +static int rocker_cmd_exec(struct rocker_port *rocker_port, enum switchdev_trans trans, rocker_cmd_prep_cb_t prepare, void *prepare_priv, rocker_cmd_proc_cb_t process, void *process_priv) { + struct rocker *rocker = rocker_port->rocker; struct rocker_desc_info *desc_info; struct rocker_wait *wait; unsigned long flags; @@ -1622,7 +1618,7 @@ static int rocker_cmd_exec(struct rocker *rocker, goto out; } - err = prepare(rocker, rocker_port, desc_info, prepare_priv); + err = prepare(rocker_port, desc_info, prepare_priv); if (err) { spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); goto out; @@ -1644,7 +1640,7 @@ static int rocker_cmd_exec(struct rocker *rocker, return err; if (process) - err = process(rocker, rocker_port, desc_info, process_priv); + err = process(rocker_port, desc_info, process_priv); rocker_desc_gen_clear(desc_info); out: @@ -1653,8 +1649,7 @@ static int rocker_cmd_exec(struct rocker *rocker, } static int -rocker_cmd_get_port_settings_prep(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -1674,8 +1669,7 @@ rocker_cmd_get_port_settings_prep(const struct rocker *rocker, } static int -rocker_cmd_get_port_settings_ethtool_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv) { @@ -1713,8 +1707,7 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker *rocker, } static int -rocker_cmd_get_port_settings_macaddr_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv) { @@ -1746,8 +1739,7 @@ struct port_name { }; static int -rocker_cmd_get_port_settings_phys_name_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv) { @@ -1788,8 +1780,7 @@ rocker_cmd_get_port_settings_phys_name_proc(const struct rocker *rocker, } static int -rocker_cmd_set_port_settings_ethtool_prep(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -1819,8 +1810,7 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker *rocker, } static int -rocker_cmd_set_port_settings_macaddr_prep(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -1844,8 +1834,7 @@ rocker_cmd_set_port_settings_macaddr_prep(const struct rocker *rocker, } static int -rocker_cmd_set_port_learning_prep(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -1870,8 +1859,7 @@ rocker_cmd_set_port_learning_prep(const struct rocker *rocker, static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, struct ethtool_cmd *ecmd) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_ethtool_proc, ecmd); @@ -1880,8 +1868,7 @@ static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, unsigned char *macaddr) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_macaddr_proc, macaddr); @@ -1890,8 +1877,7 @@ static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, struct ethtool_cmd *ecmd) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_set_port_settings_ethtool_prep, ecmd, NULL, NULL); } @@ -1899,8 +1885,7 @@ static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, unsigned char *macaddr) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_set_port_settings_macaddr_prep, macaddr, NULL, NULL); } @@ -1908,7 +1893,7 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, static int rocker_port_set_learning(struct rocker_port *rocker_port, enum switchdev_trans trans) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, trans, + return rocker_cmd_exec(rocker_port, trans, rocker_cmd_set_port_learning_prep, NULL, NULL, NULL); } @@ -2114,8 +2099,7 @@ rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, return 0; } -static int rocker_cmd_flow_tbl_add(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -2172,8 +2156,7 @@ static int rocker_cmd_flow_tbl_add(const struct rocker *rocker, return 0; } -static int rocker_cmd_flow_tbl_del(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -2282,8 +2265,7 @@ rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, return 0; } -static int rocker_cmd_group_tbl_add(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -2328,8 +2310,7 @@ static int rocker_cmd_group_tbl_add(const struct rocker *rocker, return 0; } -static int rocker_cmd_group_tbl_del(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -2460,8 +2441,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); - return rocker_cmd_exec(rocker, rocker_port, trans, - rocker_cmd_flow_tbl_add, + return rocker_cmd_exec(rocker_port, trans, rocker_cmd_flow_tbl_add, found, NULL, NULL); } @@ -2492,7 +2472,7 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port, rocker_port_kfree(trans, match); if (found) { - err = rocker_cmd_exec(rocker, rocker_port, trans, + err = rocker_cmd_exec(rocker_port, trans, rocker_cmd_flow_tbl_del, found, NULL, NULL); rocker_port_kfree(trans, found); @@ -2782,8 +2762,7 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); - return rocker_cmd_exec(rocker, rocker_port, trans, - rocker_cmd_group_tbl_add, + return rocker_cmd_exec(rocker_port, trans, rocker_cmd_group_tbl_add, found, NULL, NULL); } @@ -2811,7 +2790,7 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port, rocker_group_tbl_entry_free(trans, match); if (found) { - err = rocker_cmd_exec(rocker, rocker_port, trans, + err = rocker_cmd_exec(rocker_port, trans, rocker_cmd_group_tbl_del, found, NULL, NULL); rocker_group_tbl_entry_free(trans, found); @@ -3157,6 +3136,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, for (i = 0; i < rocker->port_count; i++) { p = rocker->ports[i]; + if (!p) + continue; if (!rocker_port_is_bridged(p)) continue; if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { @@ -3216,7 +3197,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, for (i = 0; i < rocker->port_count; i++) { p = rocker->ports[i]; - if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) + if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) ref++; } @@ -4183,35 +4164,6 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p) return 0; } -static int rocker_port_vlan_rx_add_vid(struct net_device *dev, - __be16 proto, u16 vid) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, 0, vid); - if (err) - return err; - - return rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE, - 0, htons(vid)); -} - -static int rocker_port_vlan_rx_kill_vid(struct net_device *dev, - __be16 proto, u16 vid) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE, - ROCKER_OP_FLAG_REMOVE, htons(vid)); - if (err) - return err; - - return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, - ROCKER_OP_FLAG_REMOVE, vid); -} - static int rocker_port_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { @@ -4219,8 +4171,7 @@ static int rocker_port_get_phys_port_name(struct net_device *dev, struct port_name name = { .buf = buf, .len = len }; int err; - err = rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_phys_name_proc, &name); @@ -4233,8 +4184,6 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_stop = rocker_port_stop, .ndo_start_xmit = rocker_port_xmit, .ndo_set_mac_address = rocker_port_set_mac_address, - .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid, .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, @@ -4342,7 +4291,12 @@ static int rocker_port_vlan_add(struct rocker_port *rocker_port, if (err) return err; - return rocker_port_router_mac(rocker_port, trans, 0, htons(vid)); + err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid)); + if (err) + rocker_port_vlan(rocker_port, trans, + ROCKER_OP_FLAG_REMOVE, vid); + + return err; } static int rocker_port_vlans_add(struct rocker_port *rocker_port, @@ -4600,8 +4554,7 @@ static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, } static int -rocker_cmd_get_port_stats_prep(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -4625,8 +4578,7 @@ rocker_cmd_get_port_stats_prep(const struct rocker *rocker, } static int -rocker_cmd_get_port_stats_ethtool_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv) { @@ -4666,8 +4618,7 @@ rocker_cmd_get_port_stats_ethtool_proc(const struct rocker *rocker, static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, void *priv) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_get_port_stats_prep, NULL, rocker_cmd_get_port_stats_ethtool_proc, priv); @@ -4780,7 +4731,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker, netif_receive_skb(skb); - return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info); + return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); } static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) @@ -4858,9 +4809,9 @@ static void rocker_remove_ports(const struct rocker *rocker) kfree(rocker->ports); } -static void rocker_port_dev_addr_init(const struct rocker *rocker, - struct rocker_port *rocker_port) +static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) { + const struct rocker *rocker = rocker_port->rocker; const struct pci_dev *pdev = rocker->pdev; int err; @@ -4877,6 +4828,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) const struct pci_dev *pdev = rocker->pdev; struct rocker_port *rocker_port; struct net_device *dev; + u16 untagged_vid = 0; int err; dev = alloc_etherdev(sizeof(struct rocker_port)); @@ -4890,7 +4842,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; INIT_LIST_HEAD(&rocker_port->trans_mem); - rocker_port_dev_addr_init(rocker, rocker_port); + rocker_port_dev_addr_init(rocker_port); dev->netdev_ops = &rocker_port_netdev_ops; dev->ethtool_ops = &rocker_port_ethtool_ops; dev->switchdev_ops = &rocker_port_switchdev_ops; @@ -4900,8 +4852,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) NAPI_POLL_WEIGHT); rocker_carrier_init(rocker_port); - dev->features |= NETIF_F_NETNS_LOCAL | - NETIF_F_HW_VLAN_CTAG_FILTER; + dev->features |= NETIF_F_NETNS_LOCAL; err = register_netdev(dev); if (err) { @@ -4912,16 +4863,27 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE); - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0); if (err) { dev_err(&pdev->dev, "install ig port table failed\n"); goto err_port_ig_tbl; } + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); + + err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, + untagged_vid, 0); + if (err) { + netdev_err(rocker_port->dev, "install untagged VLAN failed\n"); + goto err_untagged_vlan; + } + return 0; +err_untagged_vlan: + rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, + ROCKER_OP_FLAG_REMOVE); err_port_ig_tbl: unregister_netdev(dev); err_register_netdev: @@ -4936,7 +4898,7 @@ static int rocker_probe_ports(struct rocker *rocker) int err; alloc_size = sizeof(struct rocker_port *) * rocker->port_count; - rocker->ports = kmalloc(alloc_size, GFP_KERNEL); + rocker->ports = kzalloc(alloc_size, GFP_KERNEL); if (!rocker->ports) return -ENOMEM; for (i = 0; i < rocker->port_count; i++) { @@ -5159,41 +5121,49 @@ static bool rocker_port_dev_check(const struct net_device *dev) static int rocker_port_bridge_join(struct rocker_port *rocker_port, struct net_device *bridge) { + u16 untagged_vid = 0; int err; - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->dev->ifindex); - - rocker_port->bridge_dev = bridge; + /* Port is joining bridge, so the internal VLAN for the + * port is going to change to the bridge internal VLAN. + * Let's remove untagged VLAN (vid=0) from port and + * re-add once internal VLAN has changed. + */ - /* Use bridge internal VLAN ID for untagged pkts */ - err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, - ROCKER_OP_FLAG_REMOVE, 0); + err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); if (err) return err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->dev->ifindex); rocker_port->internal_vlan_id = rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); - return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, 0, 0); + + rocker_port->bridge_dev = bridge; + + return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, + untagged_vid, 0); } static int rocker_port_bridge_leave(struct rocker_port *rocker_port) { + u16 untagged_vid = 0; int err; - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->bridge_dev->ifindex); - - rocker_port->bridge_dev = NULL; - - /* Use port internal VLAN ID for untagged pkts */ - err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, - ROCKER_OP_FLAG_REMOVE, 0); + err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); if (err) return err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->bridge_dev->ifindex); rocker_port->internal_vlan_id = rocker_port_internal_vlan_id_get(rocker_port, rocker_port->dev->ifindex); - err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, 0, 0); + + rocker_port->bridge_dev = NULL; + + err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, + untagged_vid, 0); if (err) return err; diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 088921294448..4dd92b7b80f4 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -36,3 +36,12 @@ config SFC_SRIOV This enables support for the SFC9000 I/O Virtualization features, allowing accelerated network performance in virtualized environments. +config SFC_MCDI_LOGGING + bool "Solarflare SFC9000/SFC9100-family MCDI logging support" + depends on SFC + default y + ---help--- + This enables support for tracing of MCDI (Management-Controller-to- + Driver-Interface) commands and responses, allowing debugging of + driver/firmware interaction. The tracing is actually enabled by + a sysfs file 'mcdi_logging' under the PCI device. diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index a547cebff4e2..847643455468 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -246,9 +246,38 @@ static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) return 0; } +static ssize_t efx_ef10_show_link_control_flag(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + return sprintf(buf, "%d\n", + ((efx->mcdi->fn_flags) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) + ? 1 : 0); +} + +static ssize_t efx_ef10_show_primary_flag(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + return sprintf(buf, "%d\n", + ((efx->mcdi->fn_flags) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) + ? 1 : 0); +} + +static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, + NULL); +static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); + static int efx_ef10_probe(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data; + struct net_device *net_dev = efx->net_dev; int i, rc; /* We can have one VI for each 8K region. However, until we @@ -267,6 +296,9 @@ static int efx_ef10_probe(struct efx_nic *efx) return -ENOMEM; efx->nic_data = nic_data; + /* we assume later that we can copy from this buffer in dwords */ + BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); + rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); if (rc) @@ -311,29 +343,39 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc) goto fail3; - rc = efx_ef10_get_pf_index(efx); + rc = device_create_file(&efx->pci_dev->dev, + &dev_attr_link_control_flag); if (rc) goto fail3; + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); + if (rc) + goto fail4; + + rc = efx_ef10_get_pf_index(efx); + if (rc) + goto fail5; + rc = efx_ef10_init_datapath_caps(efx); if (rc < 0) - goto fail3; + goto fail5; efx->rx_packet_len_offset = ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; rc = efx_mcdi_port_get_number(efx); if (rc < 0) - goto fail3; + goto fail5; efx->port_num = rc; + net_dev->dev_port = rc; rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); if (rc) - goto fail3; + goto fail5; rc = efx_ef10_get_sysclk_freq(efx); if (rc < 0) - goto fail3; + goto fail5; efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ /* Check whether firmware supports bug 35388 workaround. @@ -341,9 +383,9 @@ static int efx_ef10_probe(struct efx_nic *efx) * ask if it's already enabled */ rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); - if (rc == 0) + if (rc == 0) { nic_data->workaround_35388 = true; - else if (rc == -EPERM) { + } else if (rc == -EPERM) { unsigned int enabled; rc = efx_mcdi_get_workarounds(efx, NULL, &enabled); @@ -351,21 +393,35 @@ static int efx_ef10_probe(struct efx_nic *efx) goto fail3; nic_data->workaround_35388 = enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388; + } else if (rc != -ENOSYS && rc != -ENOENT) { + goto fail5; } - else if (rc != -ENOSYS && rc != -ENOENT) - goto fail3; netif_dbg(efx, probe, efx->net_dev, "workaround for bug 35388 is %sabled\n", nic_data->workaround_35388 ? "en" : "dis"); rc = efx_mcdi_mon_probe(efx); if (rc && rc != -EPERM) - goto fail3; + goto fail5; efx_ptp_probe(efx, NULL); +#ifdef CONFIG_SFC_SRIOV + if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { + struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; + struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); + + efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); + } else +#endif + ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); + return 0; +fail5: + device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); +fail4: + device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); fail3: efx_mcdi_fini(efx); fail2: @@ -608,6 +664,9 @@ static void efx_ef10_remove(struct efx_nic *efx) if (!nic_data->must_restore_piobufs) efx_ef10_free_piobufs(efx); + device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); + device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); + efx_mcdi_fini(efx); efx_nic_free_buffer(efx, &nic_data->mcdi_buf); kfree(nic_data); @@ -622,6 +681,24 @@ static int efx_ef10_probe_pf(struct efx_nic *efx) static int efx_ef10_probe_vf(struct efx_nic *efx) { int rc; + struct pci_dev *pci_dev_pf; + + /* If the parent PF has no VF data structure, it doesn't know about this + * VF so fail probe. The VF needs to be re-created. This can happen + * if the PF driver is unloaded while the VF is assigned to a guest. + */ + pci_dev_pf = efx->pci_dev->physfn; + if (pci_dev_pf) { + struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); + struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; + + if (!nic_data_pf->vf) { + netif_info(efx, drv, efx->net_dev, + "The VF cannot link to its parent PF; " + "please destroy and re-create the VF\n"); + return -EBUSY; + } + } rc = efx_ef10_probe(efx); if (rc) @@ -639,6 +716,8 @@ static int efx_ef10_probe_vf(struct efx_nic *efx) struct efx_ef10_nic_data *nic_data = efx->nic_data; nic_data_p->vf[nic_data->vf_index].efx = efx; + nic_data_p->vf[nic_data->vf_index].pci_dev = + efx->pci_dev; } else netif_info(efx, drv, efx->net_dev, "Could not get the PF id from VF\n"); @@ -932,93 +1011,112 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { - EF10_DMA_STAT(tx_bytes, TX_BYTES), - EF10_DMA_STAT(tx_packets, TX_PKTS), - EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS), - EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS), - EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), - EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), - EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), - EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS), - EF10_DMA_STAT(tx_64, TX_64_PKTS), - EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), - EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), - EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), - EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), - EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), - EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), - EF10_DMA_STAT(rx_bytes, RX_BYTES), - EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES), - EF10_OTHER_STAT(rx_good_bytes), - EF10_OTHER_STAT(rx_bad_bytes), - EF10_DMA_STAT(rx_packets, RX_PKTS), - EF10_DMA_STAT(rx_good, RX_GOOD_PKTS), - EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), - EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS), - EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS), - EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), - EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), - EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), - EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), - EF10_DMA_STAT(rx_64, RX_64_PKTS), - EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), - EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), - EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), - EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), - EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), - EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), - EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), - EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), - EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), - EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), - EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), - EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), + EF10_DMA_STAT(port_tx_bytes, TX_BYTES), + EF10_DMA_STAT(port_tx_packets, TX_PKTS), + EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), + EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), + EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), + EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), + EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), + EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), + EF10_DMA_STAT(port_tx_64, TX_64_PKTS), + EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), + EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), + EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), + EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), + EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(port_rx_bytes, RX_BYTES), + EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), + EF10_OTHER_STAT(port_rx_good_bytes), + EF10_OTHER_STAT(port_rx_bad_bytes), + EF10_DMA_STAT(port_rx_packets, RX_PKTS), + EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), + EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), + EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), + EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), + EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), + EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), + EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), + EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), + EF10_DMA_STAT(port_rx_64, RX_64_PKTS), + EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), + EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), + EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), + EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), + EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), + EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), + EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), + EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), + EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), + EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), GENERIC_SW_STAT(rx_nodesc_trunc), GENERIC_SW_STAT(rx_noskb_drops), - EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), - EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), - EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), - EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), - EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB), - EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB), - EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING), - EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), - EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), - EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), - EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), - EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), + EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), + EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), + EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), + EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), + EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), + EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), + EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), + EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), + EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), + EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), + EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), + EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), + EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), + EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), + EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), + EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), + EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), + EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), + EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), + EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), + EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), + EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), + EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), + EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), + EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), + EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), + EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), + EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), + EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), + EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), }; -#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ - (1ULL << EF10_STAT_tx_packets) | \ - (1ULL << EF10_STAT_tx_pause) | \ - (1ULL << EF10_STAT_tx_unicast) | \ - (1ULL << EF10_STAT_tx_multicast) | \ - (1ULL << EF10_STAT_tx_broadcast) | \ - (1ULL << EF10_STAT_rx_bytes) | \ - (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \ - (1ULL << EF10_STAT_rx_good_bytes) | \ - (1ULL << EF10_STAT_rx_bad_bytes) | \ - (1ULL << EF10_STAT_rx_packets) | \ - (1ULL << EF10_STAT_rx_good) | \ - (1ULL << EF10_STAT_rx_bad) | \ - (1ULL << EF10_STAT_rx_pause) | \ - (1ULL << EF10_STAT_rx_control) | \ - (1ULL << EF10_STAT_rx_unicast) | \ - (1ULL << EF10_STAT_rx_multicast) | \ - (1ULL << EF10_STAT_rx_broadcast) | \ - (1ULL << EF10_STAT_rx_lt64) | \ - (1ULL << EF10_STAT_rx_64) | \ - (1ULL << EF10_STAT_rx_65_to_127) | \ - (1ULL << EF10_STAT_rx_128_to_255) | \ - (1ULL << EF10_STAT_rx_256_to_511) | \ - (1ULL << EF10_STAT_rx_512_to_1023) | \ - (1ULL << EF10_STAT_rx_1024_to_15xx) | \ - (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \ - (1ULL << EF10_STAT_rx_gtjumbo) | \ - (1ULL << EF10_STAT_rx_bad_gtjumbo) | \ - (1ULL << EF10_STAT_rx_overflow) | \ - (1ULL << EF10_STAT_rx_nodesc_drops) | \ +#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ + (1ULL << EF10_STAT_port_tx_packets) | \ + (1ULL << EF10_STAT_port_tx_pause) | \ + (1ULL << EF10_STAT_port_tx_unicast) | \ + (1ULL << EF10_STAT_port_tx_multicast) | \ + (1ULL << EF10_STAT_port_tx_broadcast) | \ + (1ULL << EF10_STAT_port_rx_bytes) | \ + (1ULL << \ + EF10_STAT_port_rx_bytes_minus_good_bytes) | \ + (1ULL << EF10_STAT_port_rx_good_bytes) | \ + (1ULL << EF10_STAT_port_rx_bad_bytes) | \ + (1ULL << EF10_STAT_port_rx_packets) | \ + (1ULL << EF10_STAT_port_rx_good) | \ + (1ULL << EF10_STAT_port_rx_bad) | \ + (1ULL << EF10_STAT_port_rx_pause) | \ + (1ULL << EF10_STAT_port_rx_control) | \ + (1ULL << EF10_STAT_port_rx_unicast) | \ + (1ULL << EF10_STAT_port_rx_multicast) | \ + (1ULL << EF10_STAT_port_rx_broadcast) | \ + (1ULL << EF10_STAT_port_rx_lt64) | \ + (1ULL << EF10_STAT_port_rx_64) | \ + (1ULL << EF10_STAT_port_rx_65_to_127) | \ + (1ULL << EF10_STAT_port_rx_128_to_255) | \ + (1ULL << EF10_STAT_port_rx_256_to_511) | \ + (1ULL << EF10_STAT_port_rx_512_to_1023) |\ + (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ + (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ + (1ULL << EF10_STAT_port_rx_gtjumbo) | \ + (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ + (1ULL << EF10_STAT_port_rx_overflow) | \ + (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ (1ULL << GENERIC_STAT_rx_noskb_drops)) @@ -1026,39 +1124,39 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { * switchable port we do not expose these because they might not * include all the packets they should. */ -#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \ - (1ULL << EF10_STAT_tx_lt64) | \ - (1ULL << EF10_STAT_tx_64) | \ - (1ULL << EF10_STAT_tx_65_to_127) | \ - (1ULL << EF10_STAT_tx_128_to_255) | \ - (1ULL << EF10_STAT_tx_256_to_511) | \ - (1ULL << EF10_STAT_tx_512_to_1023) | \ - (1ULL << EF10_STAT_tx_1024_to_15xx) | \ - (1ULL << EF10_STAT_tx_15xx_to_jumbo)) +#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ + (1ULL << EF10_STAT_port_tx_lt64) | \ + (1ULL << EF10_STAT_port_tx_64) | \ + (1ULL << EF10_STAT_port_tx_65_to_127) |\ + (1ULL << EF10_STAT_port_tx_128_to_255) |\ + (1ULL << EF10_STAT_port_tx_256_to_511) |\ + (1ULL << EF10_STAT_port_tx_512_to_1023) |\ + (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ + (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) /* These statistics are only provided by the 40G MAC. For a 10G/40G * switchable port we do expose these because the errors will otherwise * be silent. */ -#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ - (1ULL << EF10_STAT_rx_length_error)) +#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ + (1ULL << EF10_STAT_port_rx_length_error)) /* These statistics are only provided if the firmware supports the * capability PM_AND_RXDP_COUNTERS. */ #define HUNT_PM_AND_RXDP_STAT_MASK ( \ - (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \ - (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \ - (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \ - (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \ - (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \ - (1ULL << EF10_STAT_rx_pm_discard_qbb) | \ - (1ULL << EF10_STAT_rx_pm_discard_mapping) | \ - (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ - (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ - (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ - (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \ - (1ULL << EF10_STAT_rx_dp_hlb_wait)) + (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ + (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ + (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ + (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ + (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ + (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ + (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ + (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) { @@ -1066,6 +1164,10 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) u32 port_caps = efx_mcdi_phy_get_caps(efx); struct efx_ef10_nic_data *nic_data = efx->nic_data; + if (!(efx->mcdi->fn_flags & + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) + return 0; + if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) raw_mask |= HUNT_40G_EXTRA_STAT_MASK; else @@ -1080,13 +1182,28 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) { - u64 raw_mask = efx_ef10_raw_stat_mask(efx); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u64 raw_mask[2]; + + raw_mask[0] = efx_ef10_raw_stat_mask(efx); + + /* Only show vadaptor stats when EVB capability is present */ + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { + raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); + raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; + } else { + raw_mask[1] = 0; + } #if BITS_PER_LONG == 64 - mask[0] = raw_mask; + mask[0] = raw_mask[0]; + mask[1] = raw_mask[1]; #else - mask[0] = raw_mask & 0xffffffff; - mask[1] = raw_mask >> 32; + mask[0] = raw_mask[0] & 0xffffffff; + mask[1] = raw_mask[0] >> 32; + mask[2] = raw_mask[1] & 0xffffffff; + mask[3] = raw_mask[1] >> 32; #endif } @@ -1099,7 +1216,51 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) mask, names); } -static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) +static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + size_t stats_count = 0, index; + + efx_ef10_get_stat_mask(efx, mask); + + if (full_stats) { + for_each_set_bit(index, mask, EF10_STAT_COUNT) { + if (efx_ef10_stat_desc[index].name) { + *full_stats++ = stats[index]; + ++stats_count; + } + } + } + + if (core_stats) { + core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + + stats[EF10_STAT_rx_multicast] + + stats[EF10_STAT_rx_broadcast]; + core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + + stats[EF10_STAT_tx_multicast] + + stats[EF10_STAT_tx_broadcast]; + core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + + stats[EF10_STAT_rx_multicast_bytes] + + stats[EF10_STAT_rx_broadcast_bytes]; + core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + + stats[EF10_STAT_tx_multicast_bytes] + + stats[EF10_STAT_tx_broadcast_bytes]; + core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[EF10_STAT_rx_multicast]; + core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; + core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; + core_stats->rx_errors = core_stats->rx_crc_errors; + core_stats->tx_errors = stats[EF10_STAT_tx_bad]; + } + + return stats_count; +} + +static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; DECLARE_BITMAP(mask, EF10_STAT_COUNT); @@ -1124,67 +1285,114 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) return -EAGAIN; /* Update derived statistics */ - efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]); - stats[EF10_STAT_rx_good_bytes] = - stats[EF10_STAT_rx_bytes] - - stats[EF10_STAT_rx_bytes_minus_good_bytes]; - efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes], - stats[EF10_STAT_rx_bytes_minus_good_bytes]); + efx_nic_fix_nodesc_drop_stat(efx, + &stats[EF10_STAT_port_rx_nodesc_drops]); + stats[EF10_STAT_port_rx_good_bytes] = + stats[EF10_STAT_port_rx_bytes] - + stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; + efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], + stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); efx_update_sw_stats(efx, stats); return 0; } -static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, - struct rtnl_link_stats64 *core_stats) +static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) { - DECLARE_BITMAP(mask, EF10_STAT_COUNT); - struct efx_ef10_nic_data *nic_data = efx->nic_data; - u64 *stats = nic_data->stats; - size_t stats_count = 0, index; int retry; - efx_ef10_get_stat_mask(efx, mask); - /* If we're unlucky enough to read statistics during the DMA, wait * up to 10ms for it to finish (typically takes <500us) */ for (retry = 0; retry < 100; ++retry) { - if (efx_ef10_try_update_nic_stats(efx) == 0) + if (efx_ef10_try_update_nic_stats_pf(efx) == 0) break; udelay(100); } - if (full_stats) { - for_each_set_bit(index, mask, EF10_STAT_COUNT) { - if (efx_ef10_stat_desc[index].name) { - *full_stats++ = stats[index]; - ++stats_count; - } - } + return efx_ef10_update_stats_common(efx, full_stats, core_stats); +} + +static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + __le64 generation_start, generation_end; + u64 *stats = nic_data->stats; + u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64); + struct efx_buffer stats_buf; + __le64 *dma_stats; + int rc; + + spin_unlock_bh(&efx->stats_lock); + + if (in_interrupt()) { + /* If in atomic context, cannot update stats. Just update the + * software stats and return so the caller can continue. + */ + spin_lock_bh(&efx->stats_lock); + efx_update_sw_stats(efx, stats); + return 0; } - if (core_stats) { - core_stats->rx_packets = stats[EF10_STAT_rx_packets]; - core_stats->tx_packets = stats[EF10_STAT_tx_packets]; - core_stats->rx_bytes = stats[EF10_STAT_rx_bytes]; - core_stats->tx_bytes = stats[EF10_STAT_tx_bytes]; - core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] + - stats[GENERIC_STAT_rx_nodesc_trunc] + - stats[GENERIC_STAT_rx_noskb_drops]; - core_stats->multicast = stats[EF10_STAT_rx_multicast]; - core_stats->rx_length_errors = - stats[EF10_STAT_rx_gtjumbo] + - stats[EF10_STAT_rx_length_error]; - core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; - core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error]; - core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; - core_stats->rx_errors = (core_stats->rx_length_errors + - core_stats->rx_crc_errors + - core_stats->rx_frame_errors); + efx_ef10_get_stat_mask(efx, mask); + + rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); + if (rc) { + spin_lock_bh(&efx->stats_lock); + return rc; } - return stats_count; + dma_stats = stats_buf.addr; + dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; + + MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); + MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, 1); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + spin_lock_bh(&efx->stats_lock); + if (rc) { + /* Expect ENOENT if DMA queues have not been set up */ + if (rc != -ENOENT || atomic_read(&efx->active_queues)) + efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, + sizeof(inbuf), NULL, 0, rc); + goto out; + } + + generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { + WARN_ON_ONCE(1); + goto out; + } + rmb(); + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, + stats, stats_buf.addr, false); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end != generation_start) { + rc = -EAGAIN; + goto out; + } + + efx_update_sw_stats(efx, stats); +out: + efx_nic_free_buffer(efx, &stats_buf); + return rc; +} + +static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + if (efx_ef10_try_update_nic_stats_vf(efx)) + return 0; + + return efx_ef10_update_stats_common(efx, full_stats, core_stats); } static void efx_ef10_push_irq_moderation(struct efx_channel *channel) @@ -1313,7 +1521,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) /* MAC statistics have been cleared on the NIC; clear the local * statistic that we update with efx_update_diff_stat(). */ - nic_data->stats[EF10_STAT_rx_bad_bytes] = 0; + nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; return -EIO; } @@ -4029,7 +4237,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .prepare_flr = efx_ef10_prepare_flr, .finish_flr = efx_port_dummy_op_void, .describe_stats = efx_ef10_describe_stats, - .update_stats = efx_ef10_update_stats, + .update_stats = efx_ef10_update_stats_vf, .start_stats = efx_port_dummy_op_void, .pull_stats = efx_port_dummy_op_void, .stop_stats = efx_port_dummy_op_void, @@ -4091,6 +4299,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .vswitching_probe = efx_ef10_vswitching_probe_vf, .vswitching_restore = efx_ef10_vswitching_restore_vf, .vswitching_remove = efx_ef10_vswitching_remove_vf, + .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id, #endif .get_mac_address = efx_ef10_get_mac_address_vf, .set_mac_address = efx_ef10_set_mac_address, @@ -4130,7 +4339,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .prepare_flr = efx_ef10_prepare_flr, .finish_flr = efx_port_dummy_op_void, .describe_stats = efx_ef10_describe_stats, - .update_stats = efx_ef10_update_stats, + .update_stats = efx_ef10_update_stats_pf, .start_stats = efx_mcdi_mac_start_stats, .pull_stats = efx_mcdi_mac_pull_stats, .stop_stats = efx_mcdi_mac_stop_stats, diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 3969b1bf7ef3..6c9b6e45509a 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -165,6 +165,11 @@ static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx) for (i = 0; i < efx->vf_count; i++) { struct ef10_vf *vf = nic_data->vf + i; + /* If VF is assigned, do not free the vport */ + if (vf->pci_dev && + vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + continue; + if (vf->vport_assigned) { efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i); vf->vport_assigned = 0; @@ -380,7 +385,9 @@ void efx_ef10_vswitching_remove_pf(struct efx_nic *efx) efx_ef10_vport_free(efx, nic_data->vport_id); nic_data->vport_id = EVB_PORT_ID_ASSIGNED; - efx_ef10_vswitch_free(efx, nic_data->vport_id); + /* Only free the vswitch if no VFs are assigned */ + if (!pci_vfs_assigned(efx->pci_dev)) + efx_ef10_vswitch_free(efx, nic_data->vport_id); } void efx_ef10_vswitching_remove_vf(struct efx_nic *efx) @@ -413,11 +420,22 @@ static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs) return rc; } -static int efx_ef10_pci_sriov_disable(struct efx_nic *efx) +static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) { struct pci_dev *dev = efx->pci_dev; + unsigned int vfs_assigned = 0; + + vfs_assigned = pci_vfs_assigned(dev); + + if (vfs_assigned && !force) { + netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " + "please detach them before disabling SR-IOV\n"); + return -EBUSY; + } + + if (!vfs_assigned) + pci_disable_sriov(dev); - pci_disable_sriov(dev); efx_ef10_sriov_free_vf_vswitching(efx); efx->vf_count = 0; return 0; @@ -426,7 +444,7 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx) int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs) { if (num_vfs == 0) - return efx_ef10_pci_sriov_disable(efx); + return efx_ef10_pci_sriov_disable(efx, false); else return efx_ef10_pci_sriov_enable(efx, num_vfs); } @@ -439,12 +457,25 @@ int efx_ef10_sriov_init(struct efx_nic *efx) void efx_ef10_sriov_fini(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int i; int rc; - if (!nic_data->vf) + if (!nic_data->vf) { + /* Remove any un-assigned orphaned VFs */ + if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev)) + pci_disable_sriov(efx->pci_dev); return; + } + + /* Remove any VFs in the host */ + for (i = 0; i < efx->vf_count; ++i) { + struct efx_nic *vf_efx = nic_data->vf[i].efx; + + if (vf_efx) + vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); + } - rc = efx_ef10_pci_sriov_disable(efx); + rc = efx_ef10_pci_sriov_disable(efx, true); if (rc) netif_dbg(efx, drv, efx->net_dev, "Disabling SRIOV was not successful rc=%d\n", rc); @@ -736,3 +767,17 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, return 0; } + +int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!is_valid_ether_addr(nic_data->port_id)) + return -EOPNOTSUPP; + + ppid->id_len = ETH_ALEN; + memcpy(ppid->id, nic_data->port_id, ppid->id_len); + + return 0; +} diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index b98557670f73..db4ef537c610 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -15,6 +15,7 @@ /** * struct ef10_vf - PF's store of VF data * @efx: efx_nic struct for the current VF + * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned * @vport_id: vport ID for the VF * @vport_assigned: record whether the vport is currently assigned to the VF * @mac: MAC address for the VF, zero when address is removed from the vport @@ -22,6 +23,7 @@ */ struct ef10_vf { struct efx_nic *efx; + struct pci_dev *pci_dev; unsigned int vport_id; unsigned int vport_assigned; u8 mac[ETH_ALEN]; @@ -54,6 +56,9 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i, int link_state); +int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); + int efx_ef10_vswitching_probe_pf(struct efx_nic *efx); int efx_ef10_vswitching_probe_vf(struct efx_nic *efx); int efx_ef10_vswitching_restore_pf(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 9eafa39d0e7f..0c42ed9c9e4c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1298,7 +1298,9 @@ static void efx_fini_io(struct efx_nic *efx) efx->membase_phys = 0; } - pci_disable_device(efx->pci_dev); + /* Don't disable bus-mastering if VFs are assigned */ + if (!pci_vfs_assigned(efx->pci_dev)) + pci_disable_device(efx->pci_dev); } void efx_set_default_rx_indir_table(struct efx_nic *efx) @@ -2282,6 +2284,7 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, .ndo_get_vf_config = efx_sriov_get_vf_config, .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, + .ndo_get_phys_port_id = efx_sriov_get_phys_port_id, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, @@ -2326,6 +2329,28 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); +#ifdef CONFIG_SFC_MCDI_LOGGING +static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled); +} +static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + bool enable = count > 0 && *buf != '0'; + + mcdi->logging_enabled = enable; + return count; +} +static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log); +#endif + static int efx_register_netdev(struct efx_nic *efx) { struct net_device *net_dev = efx->net_dev; @@ -2383,9 +2408,21 @@ static int efx_register_netdev(struct efx_nic *efx) "failed to init net dev attributes\n"); goto fail_registered; } +#ifdef CONFIG_SFC_MCDI_LOGGING + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + goto fail_attr_mcdi_logging; + } +#endif return 0; +#ifdef CONFIG_SFC_MCDI_LOGGING +fail_attr_mcdi_logging: + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); +#endif fail_registered: rtnl_lock(); efx_dissociate(efx); @@ -2404,13 +2441,14 @@ static void efx_unregister_netdev(struct efx_nic *efx) BUG_ON(netdev_priv(efx->net_dev) != efx); - strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); - device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); - - rtnl_lock(); - unregister_netdevice(efx->net_dev); - efx->state = STATE_UNINIT; - rtnl_unlock(); + if (efx_dev_registered(efx)) { + strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); +#ifdef CONFIG_SFC_MCDI_LOGGING + device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); +#endif + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); + unregister_netdev(efx->net_dev); + } } /************************************************************************** @@ -2866,7 +2904,8 @@ static void efx_pci_remove_main(struct efx_nic *efx) } /* Final NIC shutdown - * This is called only at module unload (or hotplug removal). + * This is called only at module unload (or hotplug removal). A PF can call + * this on its VFs to ensure they are unbound first. */ static void efx_pci_remove(struct pci_dev *pci_dev) { diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 8267a1c75771..81640f8bb811 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -8,6 +8,7 @@ */ #include +#include #include #include "net_driver.h" #include "nic.h" @@ -54,18 +55,32 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, static bool efx_mcdi_poll_once(struct efx_nic *efx); static void efx_mcdi_abandon(struct efx_nic *efx); +#ifdef CONFIG_SFC_MCDI_LOGGING +static bool mcdi_logging_default; +module_param(mcdi_logging_default, bool, 0644); +MODULE_PARM_DESC(mcdi_logging_default, + "Enable MCDI logging on newly-probed functions"); +#endif + int efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; bool already_attached; - int rc; + int rc = -ENOMEM; efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); if (!efx->mcdi) - return -ENOMEM; + goto fail; mcdi = efx_mcdi(efx); mcdi->efx = efx; +#ifdef CONFIG_SFC_MCDI_LOGGING + /* consuming code assumes buffer is page-sized */ + mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL); + if (!mcdi->logging_buffer) + goto fail1; + mcdi->logging_enabled = mcdi_logging_default; +#endif init_waitqueue_head(&mcdi->wq); spin_lock_init(&mcdi->iface_lock); mcdi->state = MCDI_STATE_QUIESCENT; @@ -81,7 +96,7 @@ int efx_mcdi_init(struct efx_nic *efx) /* Recover from a failed assertion before probing */ rc = efx_mcdi_handle_assertion(efx); if (rc) - return rc; + goto fail2; /* Let the MC (and BMC, if this is a LOM) know that the driver * is loaded. We should do this before we reset the NIC. @@ -90,7 +105,7 @@ int efx_mcdi_init(struct efx_nic *efx) if (rc) { netif_err(efx, probe, efx->net_dev, "Unable to register driver with MCPU\n"); - return rc; + goto fail2; } if (already_attached) /* Not a fatal error */ @@ -102,6 +117,15 @@ int efx_mcdi_init(struct efx_nic *efx) efx->primary = efx; return 0; +fail2: +#ifdef CONFIG_SFC_MCDI_LOGGING + free_page((unsigned long)mcdi->logging_buffer); +fail1: +#endif + kfree(efx->mcdi); + efx->mcdi = NULL; +fail: + return rc; } void efx_mcdi_fini(struct efx_nic *efx) @@ -114,6 +138,10 @@ void efx_mcdi_fini(struct efx_nic *efx) /* Relinquish the device (back to the BMC, if this is a LOM) */ efx_mcdi_drv_attach(efx, false, NULL); +#ifdef CONFIG_SFC_MCDI_LOGGING + free_page((unsigned long)efx->mcdi->iface.logging_buffer); +#endif + kfree(efx->mcdi); } @@ -121,6 +149,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); +#ifdef CONFIG_SFC_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif efx_dword_t hdr[2]; size_t hdr_len; u32 xflags, seqno; @@ -165,6 +196,31 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, hdr_len = 8; } +#ifdef CONFIG_SFC_MCDI_LOGGING + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { + int bytes = 0; + int i; + /* Lengths should always be a whole number of dwords, so scream + * if they're not. + */ + WARN_ON_ONCE(hdr_len % 4); + WARN_ON_ONCE(inlen % 4); + + /* We own the logging buffer, as only one MCDI can be in + * progress on a NIC at any one time. So no need for locking. + */ + for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++) + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr[i].u32[0])); + + for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++) + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(inbuf[i].u32[0])); + + netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf); + } +#endif + efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); mcdi->new_epoch = false; @@ -206,6 +262,9 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int respseq, respcmd, error; +#ifdef CONFIG_SFC_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif efx_dword_t hdr; efx->type->mcdi_read_response(efx, &hdr, 0, 4); @@ -223,6 +282,39 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); } +#ifdef CONFIG_SFC_MCDI_LOGGING + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { + size_t hdr_len, data_len; + int bytes = 0; + int i; + + WARN_ON_ONCE(mcdi->resp_hdr_len % 4); + hdr_len = mcdi->resp_hdr_len / 4; + /* MCDI_DECLARE_BUF ensures that underlying buffer is padded + * to dword size, and the MCDI buffer is always dword size + */ + data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4); + + /* We own the logging buffer, as only one MCDI can be in + * progress on a NIC at any one time. So no need for locking. + */ + for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) { + efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4); + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + } + + for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) { + efx->type->mcdi_read_response(efx, &hdr, + mcdi->resp_hdr_len + (i * 4), 4); + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + } + + netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf); + } +#endif + if (error && mcdi->resp_data_len == 0) { netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); mcdi->resprc = -EIO; diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 7afab2fff4fe..1838afe2da92 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -58,6 +58,8 @@ enum efx_mcdi_mode { * enabled * @async_list: Queue of asynchronous requests * @async_timer: Timer for asynchronous request timeout + * @logging_buffer: buffer that may be used to build MCDI tracing messages + * @logging_enabled: whether to trace MCDI */ struct efx_mcdi_iface { struct efx_nic *efx; @@ -74,6 +76,10 @@ struct efx_mcdi_iface { spinlock_t async_lock; struct list_head async_list; struct timer_list async_timer; +#ifdef CONFIG_SFC_MCDI_LOGGING + char *logging_buffer; + bool logging_enabled; +#endif }; struct efx_mcdi_mon { diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 9efdf0a5df64..45fca9fc66b7 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -2755,7 +2755,7 @@ #define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_MAC_STATS_IN msgrequest */ -#define MC_CMD_MAC_STATS_IN_LEN 16 +#define MC_CMD_MAC_STATS_IN_LEN 20 /* ??? */ #define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 #define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 @@ -2777,6 +2777,8 @@ #define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 #define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 +/* port id so vadapter stats can be provided */ +#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ #define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 @@ -2891,11 +2893,31 @@ /* enum: RXDP counter: Number of times an emergency descriptor fetch was * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47 +#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 /* enum: RXDP counter: Number of times the DPCPU waited for an existing * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ /* enum: Start of GMAC stats buffer space, for Siena only. */ #define MC_CMD_GMAC_DMABUF_START 0x40 /* enum: End of GMAC stats buffer space, for Siena only. */ @@ -5578,6 +5600,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 /* RxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 9bf04cbce20a..7f295c4d7b80 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -924,6 +924,7 @@ enum efx_stats_action { static int efx_mcdi_mac_stats(struct efx_nic *efx, enum efx_stats_action action, int clear) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); int rc; int change = action == EFX_STATS_PULL ? 0 : 1; @@ -945,9 +946,14 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, MAC_STATS_IN_PERIODIC_NOEVENT, 1, MAC_STATS_IN_PERIOD_MS, period); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); - - rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), - NULL, 0, NULL); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + /* Expect ENOENT if DMA queues have not been set up */ + if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues))) + efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf), + NULL, 0, rc); return rc; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index a468a22e7a88..d72f522bf9c3 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1350,6 +1350,8 @@ struct efx_nic_type { struct ifla_vf_info *ivi); int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i, int link_state); + int (*sriov_get_phys_port_id)(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); int (*vswitching_probe)(struct efx_nic *efx); int (*vswitching_restore)(struct efx_nic *efx); void (*vswitching_remove)(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index db8562ec586d..31ff9084d9a4 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -407,59 +407,77 @@ struct siena_nic_data { }; enum { - EF10_STAT_tx_bytes = GENERIC_STAT_COUNT, - EF10_STAT_tx_packets, - EF10_STAT_tx_pause, - EF10_STAT_tx_control, - EF10_STAT_tx_unicast, - EF10_STAT_tx_multicast, - EF10_STAT_tx_broadcast, - EF10_STAT_tx_lt64, - EF10_STAT_tx_64, - EF10_STAT_tx_65_to_127, - EF10_STAT_tx_128_to_255, - EF10_STAT_tx_256_to_511, - EF10_STAT_tx_512_to_1023, - EF10_STAT_tx_1024_to_15xx, - EF10_STAT_tx_15xx_to_jumbo, - EF10_STAT_rx_bytes, - EF10_STAT_rx_bytes_minus_good_bytes, - EF10_STAT_rx_good_bytes, - EF10_STAT_rx_bad_bytes, - EF10_STAT_rx_packets, - EF10_STAT_rx_good, - EF10_STAT_rx_bad, - EF10_STAT_rx_pause, - EF10_STAT_rx_control, + EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT, + EF10_STAT_port_tx_packets, + EF10_STAT_port_tx_pause, + EF10_STAT_port_tx_control, + EF10_STAT_port_tx_unicast, + EF10_STAT_port_tx_multicast, + EF10_STAT_port_tx_broadcast, + EF10_STAT_port_tx_lt64, + EF10_STAT_port_tx_64, + EF10_STAT_port_tx_65_to_127, + EF10_STAT_port_tx_128_to_255, + EF10_STAT_port_tx_256_to_511, + EF10_STAT_port_tx_512_to_1023, + EF10_STAT_port_tx_1024_to_15xx, + EF10_STAT_port_tx_15xx_to_jumbo, + EF10_STAT_port_rx_bytes, + EF10_STAT_port_rx_bytes_minus_good_bytes, + EF10_STAT_port_rx_good_bytes, + EF10_STAT_port_rx_bad_bytes, + EF10_STAT_port_rx_packets, + EF10_STAT_port_rx_good, + EF10_STAT_port_rx_bad, + EF10_STAT_port_rx_pause, + EF10_STAT_port_rx_control, + EF10_STAT_port_rx_unicast, + EF10_STAT_port_rx_multicast, + EF10_STAT_port_rx_broadcast, + EF10_STAT_port_rx_lt64, + EF10_STAT_port_rx_64, + EF10_STAT_port_rx_65_to_127, + EF10_STAT_port_rx_128_to_255, + EF10_STAT_port_rx_256_to_511, + EF10_STAT_port_rx_512_to_1023, + EF10_STAT_port_rx_1024_to_15xx, + EF10_STAT_port_rx_15xx_to_jumbo, + EF10_STAT_port_rx_gtjumbo, + EF10_STAT_port_rx_bad_gtjumbo, + EF10_STAT_port_rx_overflow, + EF10_STAT_port_rx_align_error, + EF10_STAT_port_rx_length_error, + EF10_STAT_port_rx_nodesc_drops, + EF10_STAT_port_rx_pm_trunc_bb_overflow, + EF10_STAT_port_rx_pm_discard_bb_overflow, + EF10_STAT_port_rx_pm_trunc_vfifo_full, + EF10_STAT_port_rx_pm_discard_vfifo_full, + EF10_STAT_port_rx_pm_trunc_qbb, + EF10_STAT_port_rx_pm_discard_qbb, + EF10_STAT_port_rx_pm_discard_mapping, + EF10_STAT_port_rx_dp_q_disabled_packets, + EF10_STAT_port_rx_dp_di_dropped_packets, + EF10_STAT_port_rx_dp_streaming_packets, + EF10_STAT_port_rx_dp_hlb_fetch, + EF10_STAT_port_rx_dp_hlb_wait, EF10_STAT_rx_unicast, + EF10_STAT_rx_unicast_bytes, EF10_STAT_rx_multicast, + EF10_STAT_rx_multicast_bytes, EF10_STAT_rx_broadcast, - EF10_STAT_rx_lt64, - EF10_STAT_rx_64, - EF10_STAT_rx_65_to_127, - EF10_STAT_rx_128_to_255, - EF10_STAT_rx_256_to_511, - EF10_STAT_rx_512_to_1023, - EF10_STAT_rx_1024_to_15xx, - EF10_STAT_rx_15xx_to_jumbo, - EF10_STAT_rx_gtjumbo, - EF10_STAT_rx_bad_gtjumbo, + EF10_STAT_rx_broadcast_bytes, + EF10_STAT_rx_bad, + EF10_STAT_rx_bad_bytes, EF10_STAT_rx_overflow, - EF10_STAT_rx_align_error, - EF10_STAT_rx_length_error, - EF10_STAT_rx_nodesc_drops, - EF10_STAT_rx_pm_trunc_bb_overflow, - EF10_STAT_rx_pm_discard_bb_overflow, - EF10_STAT_rx_pm_trunc_vfifo_full, - EF10_STAT_rx_pm_discard_vfifo_full, - EF10_STAT_rx_pm_trunc_qbb, - EF10_STAT_rx_pm_discard_qbb, - EF10_STAT_rx_pm_discard_mapping, - EF10_STAT_rx_dp_q_disabled_packets, - EF10_STAT_rx_dp_di_dropped_packets, - EF10_STAT_rx_dp_streaming_packets, - EF10_STAT_rx_dp_hlb_fetch, - EF10_STAT_rx_dp_hlb_wait, + EF10_STAT_tx_unicast, + EF10_STAT_tx_unicast_bytes, + EF10_STAT_tx_multicast, + EF10_STAT_tx_multicast_bytes, + EF10_STAT_tx_broadcast, + EF10_STAT_tx_broadcast_bytes, + EF10_STAT_tx_bad, + EF10_STAT_tx_bad_bytes, + EF10_STAT_tx_overflow, EF10_STAT_COUNT }; @@ -524,6 +542,7 @@ struct efx_ef10_nic_data { unsigned int vport_id; bool must_probe_vswitching; unsigned int pf_index; + u8 port_id[ETH_ALEN]; #ifdef CONFIG_SFC_SRIOV unsigned int vf_index; struct ef10_vf *vf; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index c0ad95d2f63d..809ea4610a77 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, } } -static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) +static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs) { - if (rx_buf->page) { - put_page(rx_buf->page); - rx_buf->page = NULL; - } + do { + if (rx_buf->page) { + put_page(rx_buf->page); + rx_buf->page = NULL; + } + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--num_bufs); } /* Attempt to recycle the page if there is an RX recycle ring; the page can @@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, /* If this is the last buffer in a page, unmap and free it. */ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { efx_unmap_rx_buffer(rx_queue->efx, rx_buf); - efx_free_rx_buffer(rx_buf); + efx_free_rx_buffers(rx_queue, rx_buf, 1); } rx_buf->page = NULL; } @@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel, efx_recycle_rx_pages(channel, rx_buf, n_frags); - do { - efx_free_rx_buffer(rx_buf); - rx_buf = efx_rx_buf_next(rx_queue, rx_buf); - } while (--n_frags); + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); } /** @@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, skb = napi_get_frags(napi); if (unlikely(!skb)) { - while (n_frags--) { - put_page(rx_buf->page); - rx_buf->page = NULL; - rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); - } + struct efx_rx_queue *rx_queue; + + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); return; } @@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); if (unlikely(skb == NULL)) { - efx_free_rx_buffer(rx_buf); + struct efx_rx_queue *rx_queue; + + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); return; } skb_record_rx_queue(skb, channel->rx_queue.core_index); @@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel) * loopback layer, and free the rx_buf here */ if (unlikely(efx->loopback_selftest)) { + struct efx_rx_queue *rx_queue; + efx_loopback_rx_packet(efx, eh, rx_buf->len); - efx_free_rx_buffer(rx_buf); + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, + channel->rx_pkt_n_frags); goto out; } diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c index 6c5edbdbfa27..816c44689e67 100644 --- a/drivers/net/ethernet/sfc/sriov.c +++ b/drivers/net/ethernet/sfc/sriov.c @@ -70,3 +70,14 @@ int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, else return -EOPNOTSUPP; } + +int efx_sriov_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_get_phys_port_id) + return efx->type->sriov_get_phys_port_id(efx, ppid); + else + return -EOPNOTSUPP; +} diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h index 3be15a54c562..400df526586d 100644 --- a/drivers/net/ethernet/sfc/sriov.h +++ b/drivers/net/ethernet/sfc/sriov.h @@ -23,6 +23,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, struct ifla_vf_info *ivi); int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, int link_state); +int efx_sriov_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid); #endif /* CONFIG_SFC_SRIOV */ diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 731e0453a7d4..cec147d1d34f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -16,6 +16,7 @@ if STMMAC_ETH config STMMAC_PLATFORM tristate "STMMAC Platform bus support" depends on STMMAC_ETH + select MFD_SYSCON default y ---help--- This selects the platform specific bus support for the stmmac driver. @@ -36,6 +37,19 @@ config DWMAC_GENERIC platform specific code to function or is using platform data for setup. +config DWMAC_IPQ806X + tristate "QCA IPQ806x DWMAC support" + default ARCH_QCOM + depends on OF + select MFD_SYSCON + help + Support for QCA IPQ806X DWMAC Ethernet. + + This selects the IPQ806x SoC glue layer support for the stmmac + device driver. This driver does not use any of the hardware + acceleration features available on this SoC. Network devices + will behave like standard non-accelerated ethernet interfaces. + config DWMAC_LPC18XX tristate "NXP LPC18xx/43xx DWMAC support" default ARCH_LPC18XX diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 92e714a48367..b3901616f4f6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -6,6 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o +obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c new file mode 100644 index 000000000000..7e3129e7f143 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -0,0 +1,365 @@ +/* + * Qualcomm Atheros IPQ806x GMAC glue layer + * + * Copyright (C) 2015 The Linux Foundation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac_platform.h" + +#define NSS_COMMON_CLK_GATE 0x8 +#define NSS_COMMON_CLK_GATE_PTP_EN(x) BIT(0x10 + x) +#define NSS_COMMON_CLK_GATE_RGMII_RX_EN(x) BIT(0x9 + (x * 2)) +#define NSS_COMMON_CLK_GATE_RGMII_TX_EN(x) BIT(0x8 + (x * 2)) +#define NSS_COMMON_CLK_GATE_GMII_RX_EN(x) BIT(0x4 + x) +#define NSS_COMMON_CLK_GATE_GMII_TX_EN(x) BIT(0x0 + x) + +#define NSS_COMMON_CLK_DIV0 0xC +#define NSS_COMMON_CLK_DIV_OFFSET(x) (x * 8) +#define NSS_COMMON_CLK_DIV_MASK 0x7f + +#define NSS_COMMON_CLK_SRC_CTRL 0x14 +#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) +/* Mode is coded on 1 bit but is different depending on the MAC ID: + * MAC0: QSGMII=0 RGMII=1 + * MAC1: QSGMII=0 SGMII=0 RGMII=1 + * MAC2 & MAC3: QSGMII=0 SGMII=1 + */ +#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x) 1 +#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x) ((x >= 2) ? 1 : 0) + +#define NSS_COMMON_MACSEC_CTL 0x28 +#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x) + +#define NSS_COMMON_GMAC_CTL(x) (0x30 + (x * 4)) +#define NSS_COMMON_GMAC_CTL_CSYS_REQ BIT(19) +#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL BIT(16) +#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET 8 +#define NSS_COMMON_GMAC_CTL_IFG_OFFSET 0 +#define NSS_COMMON_GMAC_CTL_IFG_MASK 0x3f + +#define NSS_COMMON_CLK_DIV_RGMII_1000 1 +#define NSS_COMMON_CLK_DIV_RGMII_100 9 +#define NSS_COMMON_CLK_DIV_RGMII_10 99 +#define NSS_COMMON_CLK_DIV_SGMII_1000 0 +#define NSS_COMMON_CLK_DIV_SGMII_100 4 +#define NSS_COMMON_CLK_DIV_SGMII_10 49 + +#define QSGMII_PCS_MODE_CTL 0x68 +#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x) BIT((x * 8) + 7) + +#define QSGMII_PCS_CAL_LCKDT_CTL 0x120 +#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19) + +/* Only GMAC1/2/3 support SGMII and their CTL register are not contiguous */ +#define QSGMII_PHY_SGMII_CTL(x) ((x == 1) ? 0x134 : \ + (0x13c + (4 * (x - 2)))) +#define QSGMII_PHY_CDR_EN BIT(0) +#define QSGMII_PHY_RX_FRONT_EN BIT(1) +#define QSGMII_PHY_RX_SIGNAL_DETECT_EN BIT(2) +#define QSGMII_PHY_TX_DRIVER_EN BIT(3) +#define QSGMII_PHY_QSGMII_EN BIT(7) +#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12 +#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK 0x7 +#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18 +#define QSGMII_PHY_RX_DC_BIAS_MASK 0x3 +#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20 +#define QSGMII_PHY_RX_INPUT_EQU_MASK 0x3 +#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22 +#define QSGMII_PHY_CDR_PI_SLEW_MASK 0x3 +#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28 +#define QSGMII_PHY_TX_DRV_AMP_MASK 0xf + +struct ipq806x_gmac { + struct platform_device *pdev; + struct regmap *nss_common; + struct regmap *qsgmii_csr; + uint32_t id; + struct clk *core_clk; + phy_interface_t phy_mode; +}; + +static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed) +{ + struct device *dev = &gmac->pdev->dev; + int div; + + switch (speed) { + case SPEED_1000: + div = NSS_COMMON_CLK_DIV_SGMII_1000; + break; + + case SPEED_100: + div = NSS_COMMON_CLK_DIV_SGMII_100; + break; + + case SPEED_10: + div = NSS_COMMON_CLK_DIV_SGMII_10; + break; + + default: + dev_err(dev, "Speed %dMbps not supported in SGMII\n", speed); + return -EINVAL; + } + + return div; +} + +static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed) +{ + struct device *dev = &gmac->pdev->dev; + int div; + + switch (speed) { + case SPEED_1000: + div = NSS_COMMON_CLK_DIV_RGMII_1000; + break; + + case SPEED_100: + div = NSS_COMMON_CLK_DIV_RGMII_100; + break; + + case SPEED_10: + div = NSS_COMMON_CLK_DIV_RGMII_10; + break; + + default: + dev_err(dev, "Speed %dMbps not supported in RGMII\n", speed); + return -EINVAL; + } + + return div; +} + +static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed) +{ + uint32_t clk_bits, val; + int div; + + switch (gmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + div = get_clk_div_rgmii(gmac, speed); + clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | + NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); + break; + + case PHY_INTERFACE_MODE_SGMII: + div = get_clk_div_sgmii(gmac, speed); + clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) | + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); + break; + + default: + dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n", + phy_modes(gmac->phy_mode)); + return -EINVAL; + } + + /* Disable the clocks */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); + val &= ~clk_bits; + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); + + /* Set the divider */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val); + val &= ~(NSS_COMMON_CLK_DIV_MASK + << NSS_COMMON_CLK_DIV_OFFSET(gmac->id)); + val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id); + regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val); + + /* Enable the clock back */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); + val |= clk_bits; + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); + + return 0; +} + +static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) +{ + struct device *dev = &gmac->pdev->dev; + + gmac->phy_mode = of_get_phy_mode(dev->of_node); + if (gmac->phy_mode < 0) { + dev_err(dev, "missing phy mode property\n"); + return ERR_PTR(-EINVAL); + } + + if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) { + dev_err(dev, "missing qcom id property\n"); + return ERR_PTR(-EINVAL); + } + + /* The GMACs are called 1 to 4 in the documentation, but to simplify the + * code and keep it consistent with the Linux convention, we'll number + * them from 0 to 3 here. + */ + if (gmac->id < 0 || gmac->id > 3) { + dev_err(dev, "invalid gmac id\n"); + return ERR_PTR(-EINVAL); + } + + gmac->core_clk = devm_clk_get(dev, "stmmaceth"); + if (IS_ERR(gmac->core_clk)) { + dev_err(dev, "missing stmmaceth clk property\n"); + return gmac->core_clk; + } + clk_set_rate(gmac->core_clk, 266000000); + + /* Setup the register map for the nss common registers */ + gmac->nss_common = syscon_regmap_lookup_by_phandle(dev->of_node, + "qcom,nss-common"); + if (IS_ERR(gmac->nss_common)) { + dev_err(dev, "missing nss-common node\n"); + return gmac->nss_common; + } + + /* Setup the register map for the qsgmii csr registers */ + gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node, + "qcom,qsgmii-csr"); + if (IS_ERR(gmac->qsgmii_csr)) { + dev_err(dev, "missing qsgmii-csr node\n"); + return gmac->qsgmii_csr; + } + + return NULL; +} + +static void *ipq806x_gmac_setup(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ipq806x_gmac *gmac; + int val; + void *err; + + gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return ERR_PTR(-ENOMEM); + + gmac->pdev = pdev; + + err = ipq806x_gmac_of_parse(gmac); + if (err) { + dev_err(dev, "device tree parsing error\n"); + return err; + } + + regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, + QSGMII_PCS_CAL_LCKDT_CTL_RST); + + /* Inter frame gap is set to 12 */ + val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET | + 12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET; + /* We also initiate an AXI low power exit request */ + val |= NSS_COMMON_GMAC_CTL_CSYS_REQ; + switch (gmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL; + break; + case PHY_INTERFACE_MODE_SGMII: + val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL; + break; + default: + dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", + phy_modes(gmac->phy_mode)); + return NULL; + } + regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); + + /* Configure the clock src according to the mode */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); + val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); + switch (gmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << + NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); + break; + case PHY_INTERFACE_MODE_SGMII: + val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) << + NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); + break; + default: + dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", + phy_modes(gmac->phy_mode)); + return NULL; + } + regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); + + /* Enable PTP clock */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); + val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); + + if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) { + regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id), + QSGMII_PHY_CDR_EN | + QSGMII_PHY_RX_FRONT_EN | + QSGMII_PHY_RX_SIGNAL_DETECT_EN | + QSGMII_PHY_TX_DRIVER_EN | + QSGMII_PHY_QSGMII_EN | + 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | + 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET | + 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET | + 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET | + 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); + } + + return gmac; +} + +static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed) +{ + struct ipq806x_gmac *gmac = priv; + + ipq806x_gmac_set_speed(gmac, speed); +} + +static const struct stmmac_of_data ipq806x_gmac_data = { + .has_gmac = 1, + .setup = ipq806x_gmac_setup, + .fix_mac_speed = ipq806x_gmac_fix_mac_speed, +}; + +static const struct of_device_id ipq806x_gmac_dwmac_match[] = { + { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data }, + { } +}; +MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match); + +static struct platform_driver ipq806x_gmac_dwmac_driver = { + .probe = stmmac_pltfr_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "ipq806x-gmac-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = ipq806x_gmac_dwmac_match, + }, +}; +module_platform_driver(ipq806x_gmac_dwmac_driver); + +MODULE_AUTHOR("Mathieu Olivari "); +MODULE_DESCRIPTION("Qualcomm Atheros IPQ806x DWMAC specific glue layer"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 9cbcae203597..1f3b33a6c6a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -125,6 +125,12 @@ struct stmmac_priv { int use_riwt; int irq_wake; spinlock_t ptp_lock; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbgfs_dir; + struct dentry *dbgfs_rings_status; + struct dentry *dbgfs_dma_cap; +#endif }; int stmmac_mdio_unregister(struct net_device *ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e4f273976071..a5156739e1e1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -52,6 +52,7 @@ #include "stmmac_ptp.h" #include "stmmac.h" #include +#include #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) @@ -118,7 +119,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id); #ifdef CONFIG_DEBUG_FS static int stmmac_init_fs(struct net_device *dev); -static void stmmac_exit_fs(void); +static void stmmac_exit_fs(struct net_device *dev); #endif #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) @@ -816,18 +817,25 @@ static int stmmac_init_phy(struct net_device *dev) priv->speed = 0; priv->oldduplex = -1; - if (priv->plat->phy_bus_name) - snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", - priv->plat->phy_bus_name, priv->plat->bus_id); - else - snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", - priv->plat->bus_id); + if (priv->plat->phy_node) { + phydev = of_phy_connect(dev, priv->plat->phy_node, + &stmmac_adjust_link, 0, interface); + } else { + if (priv->plat->phy_bus_name) + snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", + priv->plat->phy_bus_name, priv->plat->bus_id); + else + snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", + priv->plat->bus_id); - snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, - priv->plat->phy_addr); - pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt); + snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, + priv->plat->phy_addr); + pr_debug("stmmac_init_phy: trying to attach to %s\n", + phy_id_fmt); - phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface); + phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, + interface); + } if (IS_ERR(phydev)) { pr_err("%s: Could not attach to PHY\n", dev->name); @@ -848,7 +856,7 @@ static int stmmac_init_phy(struct net_device *dev) * device as well. * Note: phydev->phy_id is the result of reading the UID PHY registers. */ - if (phydev->phy_id == 0) { + if (!priv->plat->phy_node && phydev->phy_id == 0) { phy_disconnect(phydev); return -ENODEV; } @@ -1914,7 +1922,7 @@ static int stmmac_release(struct net_device *dev) netif_carrier_off(dev); #ifdef CONFIG_DEBUG_FS - stmmac_exit_fs(); + stmmac_exit_fs(dev); #endif stmmac_release_ptp(priv); @@ -2506,8 +2514,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) #ifdef CONFIG_DEBUG_FS static struct dentry *stmmac_fs_dir; -static struct dentry *stmmac_rings_status; -static struct dentry *stmmac_dma_cap; static void sysfs_display_ring(void *head, int size, int extend_desc, struct seq_file *seq) @@ -2646,36 +2652,39 @@ static const struct file_operations stmmac_dma_cap_fops = { static int stmmac_init_fs(struct net_device *dev) { - /* Create debugfs entries */ - stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); + struct stmmac_priv *priv = netdev_priv(dev); + + /* Create per netdev entries */ + priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); - if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { - pr_err("ERROR %s, debugfs create directory failed\n", - STMMAC_RESOURCE_NAME); + if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { + pr_err("ERROR %s/%s, debugfs create directory failed\n", + STMMAC_RESOURCE_NAME, dev->name); return -ENOMEM; } /* Entry to report DMA RX/TX rings */ - stmmac_rings_status = debugfs_create_file("descriptors_status", - S_IRUGO, stmmac_fs_dir, dev, - &stmmac_rings_status_fops); + priv->dbgfs_rings_status = + debugfs_create_file("descriptors_status", S_IRUGO, + priv->dbgfs_dir, dev, + &stmmac_rings_status_fops); - if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) { + if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { pr_info("ERROR creating stmmac ring debugfs file\n"); - debugfs_remove(stmmac_fs_dir); + debugfs_remove_recursive(priv->dbgfs_dir); return -ENOMEM; } /* Entry to report the DMA HW features */ - stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir, - dev, &stmmac_dma_cap_fops); + priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, + priv->dbgfs_dir, + dev, &stmmac_dma_cap_fops); - if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) { + if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { pr_info("ERROR creating stmmac MMC debugfs file\n"); - debugfs_remove(stmmac_rings_status); - debugfs_remove(stmmac_fs_dir); + debugfs_remove_recursive(priv->dbgfs_dir); return -ENOMEM; } @@ -2683,11 +2692,11 @@ static int stmmac_init_fs(struct net_device *dev) return 0; } -static void stmmac_exit_fs(void) +static void stmmac_exit_fs(struct net_device *dev) { - debugfs_remove(stmmac_rings_status); - debugfs_remove(stmmac_dma_cap); - debugfs_remove(stmmac_fs_dir); + struct stmmac_priv *priv = netdev_priv(dev); + + debugfs_remove_recursive(priv->dbgfs_dir); } #endif /* CONFIG_DEBUG_FS */ @@ -3155,6 +3164,35 @@ static int __init stmmac_cmdline_opt(char *str) __setup("stmmaceth=", stmmac_cmdline_opt); #endif /* MODULE */ +static int __init stmmac_init(void) +{ +#ifdef CONFIG_DEBUG_FS + /* Create debugfs main directory if it doesn't exist yet */ + if (!stmmac_fs_dir) { + stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); + + if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { + pr_err("ERROR %s, debugfs create directory failed\n", + STMMAC_RESOURCE_NAME); + + return -ENOMEM; + } + } +#endif + + return 0; +} + +static void __exit stmmac_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(stmmac_fs_dir); +#endif +} + +module_init(stmmac_init) +module_exit(stmmac_exit) + MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); MODULE_AUTHOR("Giuseppe Cavallaro "); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 1664c0186f5b..f3918c7e7eeb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "stmmac.h" #include "stmmac_platform.h" @@ -144,13 +145,24 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, /* Default to phy auto-detection */ plat->phy_addr = -1; + /* If we find a phy-handle property, use it as the PHY */ + plat->phy_node = of_parse_phandle(np, "phy-handle", 0); + + /* If phy-handle is not specified, check if we have a fixed-phy */ + if (!plat->phy_node && of_phy_is_fixed_link(np)) { + if ((of_phy_register_fixed_link(np) < 0)) + return -ENODEV; + + plat->phy_node = of_node_get(np); + } + /* "snps,phy-addr" is not a standard property. Mark it as deprecated * and warn of its use. Remove this when phy node support is added. */ if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); - if (plat->phy_bus_name) + if (plat->phy_node || plat->phy_bus_name) plat->mdio_bus_data = NULL; else plat->mdio_bus_data = @@ -208,8 +220,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, if (of_find_property(np, "snps,pbl", NULL)) { dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); - if (!dma_cfg) + if (!dma_cfg) { + of_node_put(np); return -ENOMEM; + } plat->dma_cfg = dma_cfg; of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); dma_cfg->fixed_burst = diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 3b99a4df71f8..5a1068df7038 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -62,12 +62,12 @@ u32 temac_ior(struct temac_local *lp, int offset) { - return in_be32((u32 *)(lp->regs + offset)); + return in_be32(lp->regs + offset); } void temac_iow(struct temac_local *lp, int offset, u32 value) { - out_be32((u32 *) (lp->regs + offset), value); + out_be32(lp->regs + offset, value); } int temac_indirect_busywait(struct temac_local *lp) @@ -124,7 +124,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) */ static u32 temac_dma_in32(struct temac_local *lp, int reg) { - return in_be32((u32 *)(lp->sdma_regs + (reg << 2))); + return in_be32(lp->sdma_regs + (reg << 2)); } /** @@ -134,7 +134,7 @@ static u32 temac_dma_in32(struct temac_local *lp, int reg) */ static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) { - out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value); + out_be32(lp->sdma_regs + (reg << 2), value); } /* DMA register access functions can be DCR based or memory mapped. @@ -400,7 +400,7 @@ static void temac_set_multicast_list(struct net_device *ndev) mutex_unlock(&lp->indirect_mutex); } -struct temac_option { +static struct temac_option { int flg; u32 opt; u32 reg; @@ -587,7 +587,7 @@ static void temac_device_reset(struct net_device *ndev) ndev->trans_start = jiffies; /* prevent tx timeout */ } -void temac_adjust_link(struct net_device *ndev) +static void temac_adjust_link(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct phy_device *phy = lp->phy_dev; diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c index cc27dea3414e..9956680402de 100644 --- a/drivers/net/fddi/skfp/srf.c +++ b/drivers/net/fddi/skfp/srf.c @@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc) smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ; mb->sm_len = smt->smt_len + sizeof(struct smt_header) ; - DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ; + DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ; DB_SMT("SRF: state SR%d Threshold %d\n", smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ; #ifdef DEBUG diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index b7eafa4c1a67..78d49d186e05 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -44,7 +44,9 @@ struct geneve_dev { struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for geneve tunnel */ struct geneve_sock *sock; /* socket used for geneve tunnel */ - u8 vni[3]; /* virtual network ID for tunnel */ + u8 vni[3]; /* virtual network ID for tunnel */ + u8 ttl; /* TTL override */ + u8 tos; /* TOS override */ struct sockaddr_in remote; /* IPv4 address for link partner */ struct list_head next; /* geneve's per namespace list */ }; @@ -184,7 +186,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) struct flowi4 fl4; int err; __be16 sport; - __u8 tos, ttl = 0; + __u8 tos, ttl; iip = ip_hdr(skb); @@ -193,7 +195,12 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) /* TODO: port min/max limits should be configurable */ sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true); + tos = geneve->tos; + if (tos == 1) + tos = ip_tunnel_get_dsfield(iip, skb); + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_tos = RT_TOS(tos); fl4.daddr = geneve->remote.sin_addr.s_addr; rt = ip_route_output_key(geneve->net, &fl4); if (IS_ERR(rt)) { @@ -207,11 +214,10 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) goto rt_tx_error; } - /* TODO: tos and ttl should be configurable */ - - tos = ip_tunnel_ecn_encap(0, iip, skb); + tos = ip_tunnel_ecn_encap(tos, iip, skb); - if (IN_MULTICAST(ntohl(fl4.daddr))) + ttl = geneve->ttl; + if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) ttl = 1; ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); @@ -297,6 +303,8 @@ static void geneve_setup(struct net_device *dev) static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_ID] = { .type = NLA_U32 }, [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, + [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, + [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -364,6 +372,12 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (err) return err; + if (data[IFLA_GENEVE_TTL]) + geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); + + if (data[IFLA_GENEVE_TOS]) + geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]); + list_add(&geneve->next, &gn->geneve_list); hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]); @@ -386,6 +400,8 @@ static size_t geneve_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ 0; } @@ -402,6 +418,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) geneve->remote.sin_addr.s_addr)) goto nla_put_failure; + if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) || + nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) + goto nla_put_failure; + return 0; nla_put_failure: diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index ddcc7f8d22b4..dd4544085db3 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -161,6 +161,7 @@ struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; bool link_state; /* 0 - link up, 1 - link down */ int ring_size; + u32 max_num_vrss_chns; }; enum rndis_device_state { diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index b0249685139c..06de98a05622 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -227,13 +227,18 @@ static int netvsc_init_buf(struct hv_device *device) struct netvsc_device *net_device; struct nvsp_message *init_packet; struct net_device *ndev; + int node; net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; ndev = net_device->ndev; - net_device->recv_buf = vzalloc(net_device->recv_buf_size); + node = cpu_to_node(device->channel->target_cpu); + net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node); + if (!net_device->recv_buf) + net_device->recv_buf = vzalloc(net_device->recv_buf_size); + if (!net_device->recv_buf) { netdev_err(ndev, "unable to allocate receive " "buffer of size %d\n", net_device->recv_buf_size); @@ -321,7 +326,9 @@ static int netvsc_init_buf(struct hv_device *device) /* Now setup the send buffer. */ - net_device->send_buf = vzalloc(net_device->send_buf_size); + net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); + if (!net_device->send_buf) + net_device->send_buf = vzalloc(net_device->send_buf_size); if (!net_device->send_buf) { netdev_err(ndev, "unable to allocate send " "buffer of size %d\n", net_device->send_buf_size); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index d9c88bc09f45..358475ed9b59 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -46,6 +46,8 @@ static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); +static int max_num_vrss_chns = 8; + static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | @@ -755,6 +757,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) ndevctx->device_ctx = hdev; hv_set_drvdata(hdev, ndev); device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = max_num_vrss_chns; rndis_filter_device_add(hdev, &device_info); netif_tx_wake_all_queues(ndev); @@ -975,6 +978,7 @@ static int netvsc_probe(struct hv_device *dev, /* Notify the netvsc driver of the new device */ device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = max_num_vrss_chns; ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 9118cea91882..006c1b8c2385 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1013,6 +1013,9 @@ int rndis_filter_device_add(struct hv_device *dev, struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 mtu, size; + u32 num_rss_qs; + const struct cpumask *node_cpu_mask; + u32 num_possible_rss_qs; rndis_device = get_rndis_device(); if (!rndis_device) @@ -1100,9 +1103,18 @@ int rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; + num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que); + net_device->max_chn = rsscap.num_recv_que; - net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ? - num_online_cpus() : rsscap.num_recv_que; + + /* + * We will limit the VRSS channels to the number CPUs in the NUMA node + * the primary channel is currently bound to. + */ + node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); + num_possible_rss_qs = cpumask_weight(node_cpu_mask); + net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); + if (net_device->num_chn == 1) goto out; diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index 1a3c3e57aa0b..1dd5ab8e5054 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -53,3 +53,13 @@ config IEEE802154_CC2520 This driver can also be built as a module. To do so, say M here. the module will be called 'cc2520'. + +config IEEE802154_ATUSB + tristate "ATUSB transceiver driver" + depends on IEEE802154_DRIVERS && MAC802154 && USB + ---help--- + Say Y here to enable the ATUSB IEEE 802.15.4 wireless + controller. + + This driver can also be built as a module. To do so say M here. + The module will be called 'atusb'. diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile index d77fa4d77e27..cf1d2a6db023 100644 --- a/drivers/net/ieee802154/Makefile +++ b/drivers/net/ieee802154/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o +obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 67d00fbc2e0e..2f25a5ed8247 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -35,6 +35,8 @@ #include #include +#include "at86rf230.h" + struct at86rf230_local; /* at86rf2xx chip depend data. * All timings are in us. @@ -50,7 +52,7 @@ struct at86rf2xx_chip_data { int rssi_base_val; int (*set_channel)(struct at86rf230_local *, u8, u8); - int (*get_desense_steps)(struct at86rf230_local *, s32); + int (*set_txpower)(struct at86rf230_local *, s32); }; #define AT86RF2XX_MAX_BUF (127 + 3) @@ -102,200 +104,6 @@ struct at86rf230_local { struct at86rf230_state_change tx; }; -#define RG_TRX_STATUS (0x01) -#define SR_TRX_STATUS 0x01, 0x1f, 0 -#define SR_RESERVED_01_3 0x01, 0x20, 5 -#define SR_CCA_STATUS 0x01, 0x40, 6 -#define SR_CCA_DONE 0x01, 0x80, 7 -#define RG_TRX_STATE (0x02) -#define SR_TRX_CMD 0x02, 0x1f, 0 -#define SR_TRAC_STATUS 0x02, 0xe0, 5 -#define RG_TRX_CTRL_0 (0x03) -#define SR_CLKM_CTRL 0x03, 0x07, 0 -#define SR_CLKM_SHA_SEL 0x03, 0x08, 3 -#define SR_PAD_IO_CLKM 0x03, 0x30, 4 -#define SR_PAD_IO 0x03, 0xc0, 6 -#define RG_TRX_CTRL_1 (0x04) -#define SR_IRQ_POLARITY 0x04, 0x01, 0 -#define SR_IRQ_MASK_MODE 0x04, 0x02, 1 -#define SR_SPI_CMD_MODE 0x04, 0x0c, 2 -#define SR_RX_BL_CTRL 0x04, 0x10, 4 -#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 -#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 -#define SR_PA_EXT_EN 0x04, 0x80, 7 -#define RG_PHY_TX_PWR (0x05) -#define SR_TX_PWR 0x05, 0x0f, 0 -#define SR_PA_LT 0x05, 0x30, 4 -#define SR_PA_BUF_LT 0x05, 0xc0, 6 -#define RG_PHY_RSSI (0x06) -#define SR_RSSI 0x06, 0x1f, 0 -#define SR_RND_VALUE 0x06, 0x60, 5 -#define SR_RX_CRC_VALID 0x06, 0x80, 7 -#define RG_PHY_ED_LEVEL (0x07) -#define SR_ED_LEVEL 0x07, 0xff, 0 -#define RG_PHY_CC_CCA (0x08) -#define SR_CHANNEL 0x08, 0x1f, 0 -#define SR_CCA_MODE 0x08, 0x60, 5 -#define SR_CCA_REQUEST 0x08, 0x80, 7 -#define RG_CCA_THRES (0x09) -#define SR_CCA_ED_THRES 0x09, 0x0f, 0 -#define SR_RESERVED_09_1 0x09, 0xf0, 4 -#define RG_RX_CTRL (0x0a) -#define SR_PDT_THRES 0x0a, 0x0f, 0 -#define SR_RESERVED_0a_1 0x0a, 0xf0, 4 -#define RG_SFD_VALUE (0x0b) -#define SR_SFD_VALUE 0x0b, 0xff, 0 -#define RG_TRX_CTRL_2 (0x0c) -#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 -#define SR_SUB_MODE 0x0c, 0x04, 2 -#define SR_BPSK_QPSK 0x0c, 0x08, 3 -#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4 -#define SR_RESERVED_0c_5 0x0c, 0x60, 5 -#define SR_RX_SAFE_MODE 0x0c, 0x80, 7 -#define RG_ANT_DIV (0x0d) -#define SR_ANT_CTRL 0x0d, 0x03, 0 -#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 -#define SR_ANT_DIV_EN 0x0d, 0x08, 3 -#define SR_RESERVED_0d_2 0x0d, 0x70, 4 -#define SR_ANT_SEL 0x0d, 0x80, 7 -#define RG_IRQ_MASK (0x0e) -#define SR_IRQ_MASK 0x0e, 0xff, 0 -#define RG_IRQ_STATUS (0x0f) -#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 -#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 -#define SR_IRQ_2_RX_START 0x0f, 0x04, 2 -#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 -#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 -#define SR_IRQ_5_AMI 0x0f, 0x20, 5 -#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 -#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 -#define RG_VREG_CTRL (0x10) -#define SR_RESERVED_10_6 0x10, 0x03, 0 -#define SR_DVDD_OK 0x10, 0x04, 2 -#define SR_DVREG_EXT 0x10, 0x08, 3 -#define SR_RESERVED_10_3 0x10, 0x30, 4 -#define SR_AVDD_OK 0x10, 0x40, 6 -#define SR_AVREG_EXT 0x10, 0x80, 7 -#define RG_BATMON (0x11) -#define SR_BATMON_VTH 0x11, 0x0f, 0 -#define SR_BATMON_HR 0x11, 0x10, 4 -#define SR_BATMON_OK 0x11, 0x20, 5 -#define SR_RESERVED_11_1 0x11, 0xc0, 6 -#define RG_XOSC_CTRL (0x12) -#define SR_XTAL_TRIM 0x12, 0x0f, 0 -#define SR_XTAL_MODE 0x12, 0xf0, 4 -#define RG_RX_SYN (0x15) -#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 -#define SR_RESERVED_15_2 0x15, 0x70, 4 -#define SR_RX_PDT_DIS 0x15, 0x80, 7 -#define RG_XAH_CTRL_1 (0x17) -#define SR_RESERVED_17_8 0x17, 0x01, 0 -#define SR_AACK_PROM_MODE 0x17, 0x02, 1 -#define SR_AACK_ACK_TIME 0x17, 0x04, 2 -#define SR_RESERVED_17_5 0x17, 0x08, 3 -#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 -#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 -#define SR_CSMA_LBT_MODE 0x17, 0x40, 6 -#define SR_RESERVED_17_1 0x17, 0x80, 7 -#define RG_FTN_CTRL (0x18) -#define SR_RESERVED_18_2 0x18, 0x7f, 0 -#define SR_FTN_START 0x18, 0x80, 7 -#define RG_PLL_CF (0x1a) -#define SR_RESERVED_1a_2 0x1a, 0x7f, 0 -#define SR_PLL_CF_START 0x1a, 0x80, 7 -#define RG_PLL_DCU (0x1b) -#define SR_RESERVED_1b_3 0x1b, 0x3f, 0 -#define SR_RESERVED_1b_2 0x1b, 0x40, 6 -#define SR_PLL_DCU_START 0x1b, 0x80, 7 -#define RG_PART_NUM (0x1c) -#define SR_PART_NUM 0x1c, 0xff, 0 -#define RG_VERSION_NUM (0x1d) -#define SR_VERSION_NUM 0x1d, 0xff, 0 -#define RG_MAN_ID_0 (0x1e) -#define SR_MAN_ID_0 0x1e, 0xff, 0 -#define RG_MAN_ID_1 (0x1f) -#define SR_MAN_ID_1 0x1f, 0xff, 0 -#define RG_SHORT_ADDR_0 (0x20) -#define SR_SHORT_ADDR_0 0x20, 0xff, 0 -#define RG_SHORT_ADDR_1 (0x21) -#define SR_SHORT_ADDR_1 0x21, 0xff, 0 -#define RG_PAN_ID_0 (0x22) -#define SR_PAN_ID_0 0x22, 0xff, 0 -#define RG_PAN_ID_1 (0x23) -#define SR_PAN_ID_1 0x23, 0xff, 0 -#define RG_IEEE_ADDR_0 (0x24) -#define SR_IEEE_ADDR_0 0x24, 0xff, 0 -#define RG_IEEE_ADDR_1 (0x25) -#define SR_IEEE_ADDR_1 0x25, 0xff, 0 -#define RG_IEEE_ADDR_2 (0x26) -#define SR_IEEE_ADDR_2 0x26, 0xff, 0 -#define RG_IEEE_ADDR_3 (0x27) -#define SR_IEEE_ADDR_3 0x27, 0xff, 0 -#define RG_IEEE_ADDR_4 (0x28) -#define SR_IEEE_ADDR_4 0x28, 0xff, 0 -#define RG_IEEE_ADDR_5 (0x29) -#define SR_IEEE_ADDR_5 0x29, 0xff, 0 -#define RG_IEEE_ADDR_6 (0x2a) -#define SR_IEEE_ADDR_6 0x2a, 0xff, 0 -#define RG_IEEE_ADDR_7 (0x2b) -#define SR_IEEE_ADDR_7 0x2b, 0xff, 0 -#define RG_XAH_CTRL_0 (0x2c) -#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 -#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 -#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 -#define RG_CSMA_SEED_0 (0x2d) -#define SR_CSMA_SEED_0 0x2d, 0xff, 0 -#define RG_CSMA_SEED_1 (0x2e) -#define SR_CSMA_SEED_1 0x2e, 0x07, 0 -#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 -#define SR_AACK_DIS_ACK 0x2e, 0x10, 4 -#define SR_AACK_SET_PD 0x2e, 0x20, 5 -#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 -#define RG_CSMA_BE (0x2f) -#define SR_MIN_BE 0x2f, 0x0f, 0 -#define SR_MAX_BE 0x2f, 0xf0, 4 - -#define CMD_REG 0x80 -#define CMD_REG_MASK 0x3f -#define CMD_WRITE 0x40 -#define CMD_FB 0x20 - -#define IRQ_BAT_LOW (1 << 7) -#define IRQ_TRX_UR (1 << 6) -#define IRQ_AMI (1 << 5) -#define IRQ_CCA_ED (1 << 4) -#define IRQ_TRX_END (1 << 3) -#define IRQ_RX_START (1 << 2) -#define IRQ_PLL_UNL (1 << 1) -#define IRQ_PLL_LOCK (1 << 0) - -#define IRQ_ACTIVE_HIGH 0 -#define IRQ_ACTIVE_LOW 1 - -#define STATE_P_ON 0x00 /* BUSY */ -#define STATE_BUSY_RX 0x01 -#define STATE_BUSY_TX 0x02 -#define STATE_FORCE_TRX_OFF 0x03 -#define STATE_FORCE_TX_ON 0x04 /* IDLE */ -/* 0x05 */ /* INVALID_PARAMETER */ -#define STATE_RX_ON 0x06 -/* 0x07 */ /* SUCCESS */ -#define STATE_TRX_OFF 0x08 -#define STATE_TX_ON 0x09 -/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */ -#define STATE_SLEEP 0x0F -#define STATE_PREP_DEEP_SLEEP 0x10 -#define STATE_BUSY_RX_AACK 0x11 -#define STATE_BUSY_TX_ARET 0x12 -#define STATE_RX_AACK_ON 0x16 -#define STATE_TX_ARET_ON 0x19 -#define STATE_RX_ON_NOCLK 0x1C -#define STATE_RX_AACK_ON_NOCLK 0x1D -#define STATE_BUSY_RX_AACK_NOCLK 0x1E -#define STATE_TRANSITION_IN_PROGRESS 0x1F - -#define TRX_STATE_MASK (0x1F) - #define AT86RF2XX_NUMREGS 0x3F static void @@ -1010,7 +818,7 @@ at86rf230_xmit_start(void *context) if (lp->is_tx_from_off) { lp->is_tx_from_off = false; at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, - at86rf230_xmit_tx_on, + at86rf230_write_frame, false); } else { at86rf230_async_state_change(lp, ctx, STATE_TX_ON, @@ -1076,6 +884,50 @@ at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) return at86rf230_write_subreg(lp, SR_CHANNEL, channel); } +#define AT86RF2XX_MAX_ED_LEVELS 0xF +static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = { + -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300, + -7100, -6900, -6700, -6500, -6300, -6100, +}; + +static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = { + -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, + -8000, -7800, -7600, -7400, -7200, -7000, +}; + +static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = { + -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, + -7800, -7600, -7400, -7200, -7000, -6800, +}; + +static inline int +at86rf212_update_cca_ed_level(struct at86rf230_local *lp, int rssi_base_val) +{ + unsigned int cca_ed_thres; + int rc; + + rc = at86rf230_read_subreg(lp, SR_CCA_ED_THRES, &cca_ed_thres); + if (rc < 0) + return rc; + + switch (rssi_base_val) { + case -98: + lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98; + lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98); + lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres]; + break; + case -100: + lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; + lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); + lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres]; + break; + default: + WARN_ON(1); + } + + return 0; +} + static int at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) { @@ -1098,6 +950,10 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) if (rc < 0) return rc; + rc = at86rf212_update_cca_ed_level(lp, lp->data->rssi_base_val); + if (rc < 0) + return rc; + /* This sets the symbol_duration according frequency on the 212. * TODO move this handling while set channel and page in cfg802154. * We can do that, this timings are according 802.15.4 standard. @@ -1193,23 +1049,56 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, return 0; } +#define AT86RF23X_MAX_TX_POWERS 0xF +static const s32 at86rf233_powers[AT86RF23X_MAX_TX_POWERS + 1] = { + 400, 370, 340, 300, 250, 200, 100, 0, -100, -200, -300, -400, -600, + -800, -1200, -1700, +}; + +static const s32 at86rf231_powers[AT86RF23X_MAX_TX_POWERS + 1] = { + 300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700, + -900, -1200, -1700, +}; + +#define AT86RF212_MAX_TX_POWERS 0x1F +static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = { + 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700, + -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700, + -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600, +}; + +static int +at86rf23x_set_txpower(struct at86rf230_local *lp, s32 mbm) +{ + u32 i; + + for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) { + if (lp->hw->phy->supported.tx_powers[i] == mbm) + return at86rf230_write_subreg(lp, SR_TX_PWR_23X, i); + } + + return -EINVAL; +} + static int -at86rf230_set_txpower(struct ieee802154_hw *hw, s8 db) +at86rf212_set_txpower(struct at86rf230_local *lp, s32 mbm) { - struct at86rf230_local *lp = hw->priv; + u32 i; - /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five - * bits decrease power in 1dB steps. 0x60 represents extra PA gain of - * 0dB. - * thus, supported values for db range from -26 to 5, for 31dB of - * reduction to 0dB of reduction. - */ - if (db > 5 || db < -26) - return -EINVAL; + for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) { + if (lp->hw->phy->supported.tx_powers[i] == mbm) + return at86rf230_write_subreg(lp, SR_TX_PWR_212, i); + } - db = -(db - 5); + return -EINVAL; +} + +static int +at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm) +{ + struct at86rf230_local *lp = hw->priv; - return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db); + return lp->data->set_txpower(lp, mbm); } static int @@ -1254,28 +1143,19 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw, return at86rf230_write_subreg(lp, SR_CCA_MODE, val); } -static int -at86rf212_get_desens_steps(struct at86rf230_local *lp, s32 level) -{ - return (level - lp->data->rssi_base_val) * 100 / 207; -} - -static int -at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level) -{ - return (level - lp->data->rssi_base_val) / 2; -} static int -at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level) +at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { struct at86rf230_local *lp = hw->priv; + u32 i; - if (level < lp->data->rssi_base_val || level > 30) - return -EINVAL; + for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) { + if (hw->phy->supported.cca_ed_levels[i] == mbm) + return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, i); + } - return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, - lp->data->get_desense_steps(lp, level)); + return -EINVAL; } static int @@ -1365,7 +1245,7 @@ static struct at86rf2xx_chip_data at86rf233_data = { .t_p_ack = 545, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, - .get_desense_steps = at86rf23x_get_desens_steps + .set_txpower = at86rf23x_set_txpower, }; static struct at86rf2xx_chip_data at86rf231_data = { @@ -1378,7 +1258,7 @@ static struct at86rf2xx_chip_data at86rf231_data = { .t_p_ack = 545, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, - .get_desense_steps = at86rf23x_get_desens_steps + .set_txpower = at86rf23x_set_txpower, }; static struct at86rf2xx_chip_data at86rf212_data = { @@ -1391,7 +1271,7 @@ static struct at86rf2xx_chip_data at86rf212_data = { .t_p_ack = 545, .rssi_base_val = -100, .set_channel = at86rf212_set_channel, - .get_desense_steps = at86rf212_get_desens_steps + .set_txpower = at86rf212_set_txpower, }; static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim) @@ -1564,8 +1444,21 @@ at86rf230_detect_device(struct at86rf230_local *lp) } lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK | - IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET | - IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS; + IEEE802154_HW_CSMA_PARAMS | + IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT | + IEEE802154_HW_PROMISCUOUS; + + lp->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | + WPAN_PHY_FLAG_CCA_ED_LEVEL | + WPAN_PHY_FLAG_CCA_MODE; + + lp->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | + BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); + lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | + BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); + + lp->hw->phy->supported.cca_ed_levels = at86rf23x_ed_levels; + lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf23x_ed_levels); lp->hw->phy->cca.mode = NL802154_CCA_ENERGY; @@ -1573,36 +1466,49 @@ at86rf230_detect_device(struct at86rf230_local *lp) case 2: chip = "at86rf230"; rc = -ENOTSUPP; - break; + goto not_supp; case 3: chip = "at86rf231"; lp->data = &at86rf231_data; - lp->hw->phy->channels_supported[0] = 0x7FFF800; + lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 11; lp->hw->phy->symbol_duration = 16; + lp->hw->phy->supported.tx_powers = at86rf231_powers; + lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers); break; case 7: chip = "at86rf212"; lp->data = &at86rf212_data; lp->hw->flags |= IEEE802154_HW_LBT; - lp->hw->phy->channels_supported[0] = 0x00007FF; - lp->hw->phy->channels_supported[2] = 0x00007FF; + lp->hw->phy->supported.channels[0] = 0x00007FF; + lp->hw->phy->supported.channels[2] = 0x00007FF; lp->hw->phy->current_channel = 5; lp->hw->phy->symbol_duration = 25; + lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; + lp->hw->phy->supported.tx_powers = at86rf212_powers; + lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); + lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; + lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); break; case 11: chip = "at86rf233"; lp->data = &at86rf233_data; - lp->hw->phy->channels_supported[0] = 0x7FFF800; + lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 13; lp->hw->phy->symbol_duration = 16; + lp->hw->phy->supported.tx_powers = at86rf233_powers; + lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers); break; default: chip = "unknown"; rc = -ENOTSUPP; - break; + goto not_supp; } + lp->hw->phy->cca_ed_level = lp->hw->phy->supported.cca_ed_levels[7]; + lp->hw->phy->transmit_power = lp->hw->phy->supported.tx_powers[0]; + +not_supp: dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version); return rc; diff --git a/drivers/net/ieee802154/at86rf230.h b/drivers/net/ieee802154/at86rf230.h new file mode 100644 index 000000000000..1e6d1cc677f6 --- /dev/null +++ b/drivers/net/ieee802154/at86rf230.h @@ -0,0 +1,220 @@ +/* + * AT86RF230/RF231 driver + * + * Copyright (C) 2009-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Written by: + * Dmitry Eremin-Solenikov + * Alexander Smirnov + */ + +#ifndef _AT86RF230_H +#define _AT86RF230_H + +#define RG_TRX_STATUS (0x01) +#define SR_TRX_STATUS 0x01, 0x1f, 0 +#define SR_RESERVED_01_3 0x01, 0x20, 5 +#define SR_CCA_STATUS 0x01, 0x40, 6 +#define SR_CCA_DONE 0x01, 0x80, 7 +#define RG_TRX_STATE (0x02) +#define SR_TRX_CMD 0x02, 0x1f, 0 +#define SR_TRAC_STATUS 0x02, 0xe0, 5 +#define RG_TRX_CTRL_0 (0x03) +#define SR_CLKM_CTRL 0x03, 0x07, 0 +#define SR_CLKM_SHA_SEL 0x03, 0x08, 3 +#define SR_PAD_IO_CLKM 0x03, 0x30, 4 +#define SR_PAD_IO 0x03, 0xc0, 6 +#define RG_TRX_CTRL_1 (0x04) +#define SR_IRQ_POLARITY 0x04, 0x01, 0 +#define SR_IRQ_MASK_MODE 0x04, 0x02, 1 +#define SR_SPI_CMD_MODE 0x04, 0x0c, 2 +#define SR_RX_BL_CTRL 0x04, 0x10, 4 +#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 +#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 +#define SR_PA_EXT_EN 0x04, 0x80, 7 +#define RG_PHY_TX_PWR (0x05) +#define SR_TX_PWR_23X 0x05, 0x0f, 0 +#define SR_PA_LT_230 0x05, 0x30, 4 +#define SR_PA_BUF_LT_230 0x05, 0xc0, 6 +#define SR_TX_PWR_212 0x05, 0x1f, 0 +#define SR_GC_PA_212 0x05, 0x60, 5 +#define SR_PA_BOOST_LT_212 0x05, 0x80, 7 +#define RG_PHY_RSSI (0x06) +#define SR_RSSI 0x06, 0x1f, 0 +#define SR_RND_VALUE 0x06, 0x60, 5 +#define SR_RX_CRC_VALID 0x06, 0x80, 7 +#define RG_PHY_ED_LEVEL (0x07) +#define SR_ED_LEVEL 0x07, 0xff, 0 +#define RG_PHY_CC_CCA (0x08) +#define SR_CHANNEL 0x08, 0x1f, 0 +#define SR_CCA_MODE 0x08, 0x60, 5 +#define SR_CCA_REQUEST 0x08, 0x80, 7 +#define RG_CCA_THRES (0x09) +#define SR_CCA_ED_THRES 0x09, 0x0f, 0 +#define SR_RESERVED_09_1 0x09, 0xf0, 4 +#define RG_RX_CTRL (0x0a) +#define SR_PDT_THRES 0x0a, 0x0f, 0 +#define SR_RESERVED_0a_1 0x0a, 0xf0, 4 +#define RG_SFD_VALUE (0x0b) +#define SR_SFD_VALUE 0x0b, 0xff, 0 +#define RG_TRX_CTRL_2 (0x0c) +#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 +#define SR_SUB_MODE 0x0c, 0x04, 2 +#define SR_BPSK_QPSK 0x0c, 0x08, 3 +#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4 +#define SR_RESERVED_0c_5 0x0c, 0x60, 5 +#define SR_RX_SAFE_MODE 0x0c, 0x80, 7 +#define RG_ANT_DIV (0x0d) +#define SR_ANT_CTRL 0x0d, 0x03, 0 +#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 +#define SR_ANT_DIV_EN 0x0d, 0x08, 3 +#define SR_RESERVED_0d_2 0x0d, 0x70, 4 +#define SR_ANT_SEL 0x0d, 0x80, 7 +#define RG_IRQ_MASK (0x0e) +#define SR_IRQ_MASK 0x0e, 0xff, 0 +#define RG_IRQ_STATUS (0x0f) +#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 +#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 +#define SR_IRQ_2_RX_START 0x0f, 0x04, 2 +#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 +#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 +#define SR_IRQ_5_AMI 0x0f, 0x20, 5 +#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 +#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 +#define RG_VREG_CTRL (0x10) +#define SR_RESERVED_10_6 0x10, 0x03, 0 +#define SR_DVDD_OK 0x10, 0x04, 2 +#define SR_DVREG_EXT 0x10, 0x08, 3 +#define SR_RESERVED_10_3 0x10, 0x30, 4 +#define SR_AVDD_OK 0x10, 0x40, 6 +#define SR_AVREG_EXT 0x10, 0x80, 7 +#define RG_BATMON (0x11) +#define SR_BATMON_VTH 0x11, 0x0f, 0 +#define SR_BATMON_HR 0x11, 0x10, 4 +#define SR_BATMON_OK 0x11, 0x20, 5 +#define SR_RESERVED_11_1 0x11, 0xc0, 6 +#define RG_XOSC_CTRL (0x12) +#define SR_XTAL_TRIM 0x12, 0x0f, 0 +#define SR_XTAL_MODE 0x12, 0xf0, 4 +#define RG_RX_SYN (0x15) +#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 +#define SR_RESERVED_15_2 0x15, 0x70, 4 +#define SR_RX_PDT_DIS 0x15, 0x80, 7 +#define RG_XAH_CTRL_1 (0x17) +#define SR_RESERVED_17_8 0x17, 0x01, 0 +#define SR_AACK_PROM_MODE 0x17, 0x02, 1 +#define SR_AACK_ACK_TIME 0x17, 0x04, 2 +#define SR_RESERVED_17_5 0x17, 0x08, 3 +#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 +#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 +#define SR_CSMA_LBT_MODE 0x17, 0x40, 6 +#define SR_RESERVED_17_1 0x17, 0x80, 7 +#define RG_FTN_CTRL (0x18) +#define SR_RESERVED_18_2 0x18, 0x7f, 0 +#define SR_FTN_START 0x18, 0x80, 7 +#define RG_PLL_CF (0x1a) +#define SR_RESERVED_1a_2 0x1a, 0x7f, 0 +#define SR_PLL_CF_START 0x1a, 0x80, 7 +#define RG_PLL_DCU (0x1b) +#define SR_RESERVED_1b_3 0x1b, 0x3f, 0 +#define SR_RESERVED_1b_2 0x1b, 0x40, 6 +#define SR_PLL_DCU_START 0x1b, 0x80, 7 +#define RG_PART_NUM (0x1c) +#define SR_PART_NUM 0x1c, 0xff, 0 +#define RG_VERSION_NUM (0x1d) +#define SR_VERSION_NUM 0x1d, 0xff, 0 +#define RG_MAN_ID_0 (0x1e) +#define SR_MAN_ID_0 0x1e, 0xff, 0 +#define RG_MAN_ID_1 (0x1f) +#define SR_MAN_ID_1 0x1f, 0xff, 0 +#define RG_SHORT_ADDR_0 (0x20) +#define SR_SHORT_ADDR_0 0x20, 0xff, 0 +#define RG_SHORT_ADDR_1 (0x21) +#define SR_SHORT_ADDR_1 0x21, 0xff, 0 +#define RG_PAN_ID_0 (0x22) +#define SR_PAN_ID_0 0x22, 0xff, 0 +#define RG_PAN_ID_1 (0x23) +#define SR_PAN_ID_1 0x23, 0xff, 0 +#define RG_IEEE_ADDR_0 (0x24) +#define SR_IEEE_ADDR_0 0x24, 0xff, 0 +#define RG_IEEE_ADDR_1 (0x25) +#define SR_IEEE_ADDR_1 0x25, 0xff, 0 +#define RG_IEEE_ADDR_2 (0x26) +#define SR_IEEE_ADDR_2 0x26, 0xff, 0 +#define RG_IEEE_ADDR_3 (0x27) +#define SR_IEEE_ADDR_3 0x27, 0xff, 0 +#define RG_IEEE_ADDR_4 (0x28) +#define SR_IEEE_ADDR_4 0x28, 0xff, 0 +#define RG_IEEE_ADDR_5 (0x29) +#define SR_IEEE_ADDR_5 0x29, 0xff, 0 +#define RG_IEEE_ADDR_6 (0x2a) +#define SR_IEEE_ADDR_6 0x2a, 0xff, 0 +#define RG_IEEE_ADDR_7 (0x2b) +#define SR_IEEE_ADDR_7 0x2b, 0xff, 0 +#define RG_XAH_CTRL_0 (0x2c) +#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 +#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 +#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 +#define RG_CSMA_SEED_0 (0x2d) +#define SR_CSMA_SEED_0 0x2d, 0xff, 0 +#define RG_CSMA_SEED_1 (0x2e) +#define SR_CSMA_SEED_1 0x2e, 0x07, 0 +#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 +#define SR_AACK_DIS_ACK 0x2e, 0x10, 4 +#define SR_AACK_SET_PD 0x2e, 0x20, 5 +#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 +#define RG_CSMA_BE (0x2f) +#define SR_MIN_BE 0x2f, 0x0f, 0 +#define SR_MAX_BE 0x2f, 0xf0, 4 + +#define CMD_REG 0x80 +#define CMD_REG_MASK 0x3f +#define CMD_WRITE 0x40 +#define CMD_FB 0x20 + +#define IRQ_BAT_LOW BIT(7) +#define IRQ_TRX_UR BIT(6) +#define IRQ_AMI BIT(5) +#define IRQ_CCA_ED BIT(4) +#define IRQ_TRX_END BIT(3) +#define IRQ_RX_START BIT(2) +#define IRQ_PLL_UNL BIT(1) +#define IRQ_PLL_LOCK BIT(0) + +#define IRQ_ACTIVE_HIGH 0 +#define IRQ_ACTIVE_LOW 1 + +#define STATE_P_ON 0x00 /* BUSY */ +#define STATE_BUSY_RX 0x01 +#define STATE_BUSY_TX 0x02 +#define STATE_FORCE_TRX_OFF 0x03 +#define STATE_FORCE_TX_ON 0x04 /* IDLE */ +/* 0x05 */ /* INVALID_PARAMETER */ +#define STATE_RX_ON 0x06 +/* 0x07 */ /* SUCCESS */ +#define STATE_TRX_OFF 0x08 +#define STATE_TX_ON 0x09 +/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */ +#define STATE_SLEEP 0x0F +#define STATE_PREP_DEEP_SLEEP 0x10 +#define STATE_BUSY_RX_AACK 0x11 +#define STATE_BUSY_TX_ARET 0x12 +#define STATE_RX_AACK_ON 0x16 +#define STATE_TX_ARET_ON 0x19 +#define STATE_RX_ON_NOCLK 0x1C +#define STATE_RX_AACK_ON_NOCLK 0x1D +#define STATE_BUSY_RX_AACK_NOCLK 0x1E +#define STATE_TRANSITION_IN_PROGRESS 0x1F + +#define TRX_STATE_MASK (0x1F) + +#endif /* !_AT86RF230_H */ diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c new file mode 100644 index 000000000000..5b6bb9adf9ae --- /dev/null +++ b/drivers/net/ieee802154/atusb.c @@ -0,0 +1,699 @@ +/* + * atusb.c - Driver for the ATUSB IEEE 802.15.4 dongle + * + * Written 2013 by Werner Almesberger + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 + * + * Based on at86rf230.c and spi_atusb.c. + * at86rf230.c is + * Copyright (C) 2009 Siemens AG + * Written by: Dmitry Eremin-Solenikov + * + * spi_atusb.c is + * Copyright (c) 2011 Richard Sharpe + * Copyright (c) 2011 Stefan Schmidt + * Copyright (c) 2011 Werner Almesberger + * + * USB initialization is + * Copyright (c) 2013 Alexander Aring + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "at86rf230.h" +#include "atusb.h" + +#define ATUSB_JEDEC_ATMEL 0x1f /* JEDEC manufacturer ID */ + +#define ATUSB_NUM_RX_URBS 4 /* allow for a bit of local latency */ +#define ATUSB_ALLOC_DELAY_MS 100 /* delay after failed allocation */ +#define ATUSB_TX_TIMEOUT_MS 200 /* on the air timeout */ + +struct atusb { + struct ieee802154_hw *hw; + struct usb_device *usb_dev; + int shutdown; /* non-zero if shutting down */ + int err; /* set by first error */ + + /* RX variables */ + struct delayed_work work; /* memory allocations */ + struct usb_anchor idle_urbs; /* URBs waiting to be submitted */ + struct usb_anchor rx_urbs; /* URBs waiting for reception */ + + /* TX variables */ + struct usb_ctrlrequest tx_dr; + struct urb *tx_urb; + struct sk_buff *tx_skb; + uint8_t tx_ack_seq; /* current TX ACK sequence number */ +}; + +/* at86rf230.h defines values as tuples. We use the more + * traditional style of having registers and or-able values. SR_REG extracts + * the register number. SR_VALUE uses the shift to prepare a value accordingly. + */ + +#define __SR_REG(reg, mask, shift) (reg) +#define SR_REG(sr) __SR_REG(sr) + +#define __SR_VALUE(reg, mask, shift, val) ((val) << (shift)) +#define SR_VALUE(sr, val) __SR_VALUE(sr, (val)) + +/* ----- USB commands without data ----------------------------------------- */ + +/* To reduce the number of error checks in the code, we record the first error + * in atusb->err and reject all subsequent requests until the error is cleared. + */ + +static int atusb_control_msg(struct atusb *atusb, unsigned int pipe, + __u8 request, __u8 requesttype, + __u16 value, __u16 index, + void *data, __u16 size, int timeout) +{ + struct usb_device *usb_dev = atusb->usb_dev; + int ret; + + if (atusb->err) + return atusb->err; + + ret = usb_control_msg(usb_dev, pipe, request, requesttype, + value, index, data, size, timeout); + if (ret < 0) { + atusb->err = ret; + dev_err(&usb_dev->dev, + "atusb_control_msg: req 0x%02x val 0x%x idx 0x%x, error %d\n", + request, value, index, ret); + } + return ret; +} + +static int atusb_command(struct atusb *atusb, uint8_t cmd, uint8_t arg) +{ + struct usb_device *usb_dev = atusb->usb_dev; + + dev_dbg(&usb_dev->dev, "atusb_command: cmd = 0x%x\n", cmd); + return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0), + cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000); +} + +static int atusb_write_reg(struct atusb *atusb, uint8_t reg, uint8_t value) +{ + struct usb_device *usb_dev = atusb->usb_dev; + + dev_dbg(&usb_dev->dev, "atusb_write_reg: 0x%02x <- 0x%02x\n", + reg, value); + return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0), + ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, + value, reg, NULL, 0, 1000); +} + +static int atusb_read_reg(struct atusb *atusb, uint8_t reg) +{ + struct usb_device *usb_dev = atusb->usb_dev; + int ret; + uint8_t value; + + dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg); + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), + ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, + 0, reg, &value, 1, 1000); + return ret >= 0 ? value : ret; +} + +static int atusb_get_and_clear_error(struct atusb *atusb) +{ + int err = atusb->err; + + atusb->err = 0; + return err; +} + +/* ----- skb allocation ---------------------------------------------------- */ + +#define MAX_PSDU 127 +#define MAX_RX_XFER (1 + MAX_PSDU + 2 + 1) /* PHR+PSDU+CRC+LQI */ + +#define SKB_ATUSB(skb) (*(struct atusb **)(skb)->cb) + +static void atusb_in(struct urb *urb); + +static int atusb_submit_rx_urb(struct atusb *atusb, struct urb *urb) +{ + struct usb_device *usb_dev = atusb->usb_dev; + struct sk_buff *skb = urb->context; + int ret; + + if (!skb) { + skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL); + if (!skb) { + dev_warn_ratelimited(&usb_dev->dev, + "atusb_in: can't allocate skb\n"); + return -ENOMEM; + } + skb_put(skb, MAX_RX_XFER); + SKB_ATUSB(skb) = atusb; + } + + usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1), + skb->data, MAX_RX_XFER, atusb_in, skb); + usb_anchor_urb(urb, &atusb->rx_urbs); + + ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(urb); + kfree_skb(skb); + urb->context = NULL; + } + return ret; +} + +static void atusb_work_urbs(struct work_struct *work) +{ + struct atusb *atusb = + container_of(to_delayed_work(work), struct atusb, work); + struct usb_device *usb_dev = atusb->usb_dev; + struct urb *urb; + int ret; + + if (atusb->shutdown) + return; + + do { + urb = usb_get_from_anchor(&atusb->idle_urbs); + if (!urb) + return; + ret = atusb_submit_rx_urb(atusb, urb); + } while (!ret); + + usb_anchor_urb(urb, &atusb->idle_urbs); + dev_warn_ratelimited(&usb_dev->dev, + "atusb_in: can't allocate/submit URB (%d)\n", ret); + schedule_delayed_work(&atusb->work, + msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1); +} + +/* ----- Asynchronous USB -------------------------------------------------- */ + +static void atusb_tx_done(struct atusb *atusb, uint8_t seq) +{ + struct usb_device *usb_dev = atusb->usb_dev; + uint8_t expect = atusb->tx_ack_seq; + + dev_dbg(&usb_dev->dev, "atusb_tx_done (0x%02x/0x%02x)\n", seq, expect); + if (seq == expect) { + /* TODO check for ifs handling in firmware */ + ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false); + } else { + /* TODO I experience this case when atusb has a tx complete + * irq before probing, we should fix the firmware it's an + * unlikely case now that seq == expect is then true, but can + * happen and fail with a tx_skb = NULL; + */ + ieee802154_wake_queue(atusb->hw); + if (atusb->tx_skb) + dev_kfree_skb_irq(atusb->tx_skb); + } +} + +static void atusb_in_good(struct urb *urb) +{ + struct usb_device *usb_dev = urb->dev; + struct sk_buff *skb = urb->context; + struct atusb *atusb = SKB_ATUSB(skb); + uint8_t len, lqi; + + if (!urb->actual_length) { + dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n"); + return; + } + + len = *skb->data; + + if (urb->actual_length == 1) { + atusb_tx_done(atusb, len); + return; + } + + if (len + 1 > urb->actual_length - 1) { + dev_dbg(&usb_dev->dev, "atusb_in: frame len %d+1 > URB %u-1\n", + len, urb->actual_length); + return; + } + + if (!ieee802154_is_valid_psdu_len(len)) { + dev_dbg(&usb_dev->dev, "atusb_in: frame corrupted\n"); + return; + } + + lqi = skb->data[len + 1]; + dev_dbg(&usb_dev->dev, "atusb_in: rx len %d lqi 0x%02x\n", len, lqi); + skb_pull(skb, 1); /* remove PHR */ + skb_trim(skb, len); /* get payload only */ + ieee802154_rx_irqsafe(atusb->hw, skb, lqi); + urb->context = NULL; /* skb is gone */ +} + +static void atusb_in(struct urb *urb) +{ + struct usb_device *usb_dev = urb->dev; + struct sk_buff *skb = urb->context; + struct atusb *atusb = SKB_ATUSB(skb); + + dev_dbg(&usb_dev->dev, "atusb_in: status %d len %d\n", + urb->status, urb->actual_length); + if (urb->status) { + if (urb->status == -ENOENT) { /* being killed */ + kfree_skb(skb); + urb->context = NULL; + return; + } + dev_dbg(&usb_dev->dev, "atusb_in: URB error %d\n", urb->status); + } else { + atusb_in_good(urb); + } + + usb_anchor_urb(urb, &atusb->idle_urbs); + if (!atusb->shutdown) + schedule_delayed_work(&atusb->work, 0); +} + +/* ----- URB allocation/deallocation --------------------------------------- */ + +static void atusb_free_urbs(struct atusb *atusb) +{ + struct urb *urb; + + while (1) { + urb = usb_get_from_anchor(&atusb->idle_urbs); + if (!urb) + break; + if (urb->context) + kfree_skb(urb->context); + usb_free_urb(urb); + } +} + +static int atusb_alloc_urbs(struct atusb *atusb, int n) +{ + struct urb *urb; + + while (n) { + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + atusb_free_urbs(atusb); + return -ENOMEM; + } + usb_anchor_urb(urb, &atusb->idle_urbs); + n--; + } + return 0; +} + +/* ----- IEEE 802.15.4 interface operations -------------------------------- */ + +static void atusb_xmit_complete(struct urb *urb) +{ + dev_dbg(&urb->dev->dev, "atusb_xmit urb completed"); +} + +static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) +{ + struct atusb *atusb = hw->priv; + struct usb_device *usb_dev = atusb->usb_dev; + int ret; + + dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len); + atusb->tx_skb = skb; + atusb->tx_ack_seq++; + atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq); + atusb->tx_dr.wLength = cpu_to_le16(skb->len); + + usb_fill_control_urb(atusb->tx_urb, usb_dev, + usb_sndctrlpipe(usb_dev, 0), + (unsigned char *)&atusb->tx_dr, skb->data, + skb->len, atusb_xmit_complete, NULL); + ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC); + dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret); + return ret; +} + +static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel) +{ + struct atusb *atusb = hw->priv; + int ret; + + /* This implicitly sets the CCA (Clear Channel Assessment) mode to 0, + * "Mode 3a, Carrier sense OR energy above threshold". + * We should probably make this configurable. @@@ + */ + ret = atusb_write_reg(atusb, RG_PHY_CC_CCA, channel); + if (ret < 0) + return ret; + msleep(1); /* @@@ ugly synchronization */ + return 0; +} + +static int atusb_ed(struct ieee802154_hw *hw, u8 *level) +{ + BUG_ON(!level); + *level = 0xbe; + return 0; +} + +static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw, + struct ieee802154_hw_addr_filt *filt, + unsigned long changed) +{ + struct atusb *atusb = hw->priv; + struct device *dev = &atusb->usb_dev->dev; + uint8_t reg; + + if (changed & IEEE802154_AFILT_SADDR_CHANGED) { + u16 addr = le16_to_cpu(filt->short_addr); + + dev_vdbg(dev, "atusb_set_hw_addr_filt called for saddr\n"); + atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr); + atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8); + } + + if (changed & IEEE802154_AFILT_PANID_CHANGED) { + u16 pan = le16_to_cpu(filt->pan_id); + + dev_vdbg(dev, "atusb_set_hw_addr_filt called for pan id\n"); + atusb_write_reg(atusb, RG_PAN_ID_0, pan); + atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8); + } + + if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { + u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN]; + + memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN); + dev_vdbg(dev, "atusb_set_hw_addr_filt called for IEEE addr\n"); + for (i = 0; i < 8; i++) + atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]); + } + + if (changed & IEEE802154_AFILT_PANC_CHANGED) { + dev_vdbg(dev, + "atusb_set_hw_addr_filt called for panc change\n"); + reg = atusb_read_reg(atusb, SR_REG(SR_AACK_I_AM_COORD)); + if (filt->pan_coord) + reg |= SR_VALUE(SR_AACK_I_AM_COORD, 1); + else + reg &= ~SR_VALUE(SR_AACK_I_AM_COORD, 1); + atusb_write_reg(atusb, SR_REG(SR_AACK_I_AM_COORD), reg); + } + + return atusb_get_and_clear_error(atusb); +} + +static int atusb_start(struct ieee802154_hw *hw) +{ + struct atusb *atusb = hw->priv; + struct usb_device *usb_dev = atusb->usb_dev; + int ret; + + dev_dbg(&usb_dev->dev, "atusb_start\n"); + schedule_delayed_work(&atusb->work, 0); + atusb_command(atusb, ATUSB_RX_MODE, 1); + ret = atusb_get_and_clear_error(atusb); + if (ret < 0) + usb_kill_anchored_urbs(&atusb->idle_urbs); + return ret; +} + +static void atusb_stop(struct ieee802154_hw *hw) +{ + struct atusb *atusb = hw->priv; + struct usb_device *usb_dev = atusb->usb_dev; + + dev_dbg(&usb_dev->dev, "atusb_stop\n"); + usb_kill_anchored_urbs(&atusb->idle_urbs); + atusb_command(atusb, ATUSB_RX_MODE, 0); + atusb_get_and_clear_error(atusb); +} + +static struct ieee802154_ops atusb_ops = { + .owner = THIS_MODULE, + .xmit_async = atusb_xmit, + .ed = atusb_ed, + .set_channel = atusb_channel, + .start = atusb_start, + .stop = atusb_stop, + .set_hw_addr_filt = atusb_set_hw_addr_filt, +}; + +/* ----- Firmware and chip version information ----------------------------- */ + +static int atusb_get_and_show_revision(struct atusb *atusb) +{ + struct usb_device *usb_dev = atusb->usb_dev; + unsigned char buffer[3]; + int ret; + + /* Get a couple of the ATMega Firmware values */ + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), + ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, + buffer, 3, 1000); + if (ret >= 0) + dev_info(&usb_dev->dev, + "Firmware: major: %u, minor: %u, hardware type: %u\n", + buffer[0], buffer[1], buffer[2]); + if (buffer[0] == 0 && buffer[1] < 2) { + dev_info(&usb_dev->dev, + "Firmware version (%u.%u) is predates our first public release.", + buffer[0], buffer[1]); + dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); + } + + return ret; +} + +static int atusb_get_and_show_build(struct atusb *atusb) +{ + struct usb_device *usb_dev = atusb->usb_dev; + char build[ATUSB_BUILD_SIZE + 1]; + int ret; + + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), + ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, + build, ATUSB_BUILD_SIZE, 1000); + if (ret >= 0) { + build[ret] = 0; + dev_info(&usb_dev->dev, "Firmware: build %s\n", build); + } + + return ret; +} + +static int atusb_get_and_show_chip(struct atusb *atusb) +{ + struct usb_device *usb_dev = atusb->usb_dev; + uint8_t man_id_0, man_id_1, part_num, version_num; + + man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0); + man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1); + part_num = atusb_read_reg(atusb, RG_PART_NUM); + version_num = atusb_read_reg(atusb, RG_VERSION_NUM); + + if (atusb->err) + return atusb->err; + + if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) { + dev_err(&usb_dev->dev, + "non-Atmel transceiver xxxx%02x%02x\n", + man_id_1, man_id_0); + goto fail; + } + if (part_num != 3 && part_num != 2) { + dev_err(&usb_dev->dev, + "unexpected transceiver, part 0x%02x version 0x%02x\n", + part_num, version_num); + goto fail; + } + + dev_info(&usb_dev->dev, "ATUSB: AT86RF231 version %d\n", version_num); + + return 0; + +fail: + atusb->err = -ENODEV; + return -ENODEV; +} + +/* ----- Setup ------------------------------------------------------------- */ + +static int atusb_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct usb_device *usb_dev = interface_to_usbdev(interface); + struct ieee802154_hw *hw; + struct atusb *atusb = NULL; + int ret = -ENOMEM; + + hw = ieee802154_alloc_hw(sizeof(struct atusb), &atusb_ops); + if (!hw) + return -ENOMEM; + + atusb = hw->priv; + atusb->hw = hw; + atusb->usb_dev = usb_get_dev(usb_dev); + usb_set_intfdata(interface, atusb); + + atusb->shutdown = 0; + atusb->err = 0; + INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs); + init_usb_anchor(&atusb->idle_urbs); + init_usb_anchor(&atusb->rx_urbs); + + if (atusb_alloc_urbs(atusb, ATUSB_NUM_RX_URBS)) + goto fail; + + atusb->tx_dr.bRequestType = ATUSB_REQ_TO_DEV; + atusb->tx_dr.bRequest = ATUSB_TX; + atusb->tx_dr.wValue = cpu_to_le16(0); + + atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!atusb->tx_urb) + goto fail; + + hw->parent = &usb_dev->dev; + hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | + IEEE802154_HW_AACK; + + hw->phy->current_page = 0; + hw->phy->current_channel = 11; /* reset default */ + hw->phy->supported.channels[0] = 0x7FFF800; + ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); + + atusb_command(atusb, ATUSB_RF_RESET, 0); + atusb_get_and_show_chip(atusb); + atusb_get_and_show_revision(atusb); + atusb_get_and_show_build(atusb); + ret = atusb_get_and_clear_error(atusb); + if (ret) { + dev_err(&atusb->usb_dev->dev, + "%s: initialization failed, error = %d\n", + __func__, ret); + goto fail; + } + + ret = ieee802154_register_hw(hw); + if (ret) + goto fail; + + /* If we just powered on, we're now in P_ON and need to enter TRX_OFF + * explicitly. Any resets after that will send us straight to TRX_OFF, + * making the command below redundant. + */ + atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF); + msleep(1); /* reset => TRX_OFF, tTR13 = 37 us */ + +#if 0 + /* Calculating the maximum time available to empty the frame buffer + * on reception: + * + * According to [1], the inter-frame gap is + * R * 20 * 16 us + 128 us + * where R is a random number from 0 to 7. Furthermore, we have 20 bit + * times (80 us at 250 kbps) of SHR of the next frame before the + * transceiver begins storing data in the frame buffer. + * + * This yields a minimum time of 208 us between the last data of a + * frame and the first data of the next frame. This time is further + * reduced by interrupt latency in the atusb firmware. + * + * atusb currently needs about 500 us to retrieve a maximum-sized + * frame. We therefore have to allow reception of a new frame to begin + * while we retrieve the previous frame. + * + * [1] "JN-AN-1035 Calculating data rates in an IEEE 802.15.4-based + * network", Jennic 2006. + * http://www.jennic.com/download_file.php?supportFile=JN-AN-1035%20Calculating%20802-15-4%20Data%20Rates-1v0.pdf + */ + + atusb_write_reg(atusb, + SR_REG(SR_RX_SAFE_MODE), SR_VALUE(SR_RX_SAFE_MODE, 1)); +#endif + atusb_write_reg(atusb, RG_IRQ_MASK, 0xff); + + ret = atusb_get_and_clear_error(atusb); + if (!ret) + return 0; + + dev_err(&atusb->usb_dev->dev, + "%s: setup failed, error = %d\n", + __func__, ret); + + ieee802154_unregister_hw(hw); +fail: + atusb_free_urbs(atusb); + usb_kill_urb(atusb->tx_urb); + usb_free_urb(atusb->tx_urb); + usb_put_dev(usb_dev); + ieee802154_free_hw(hw); + return ret; +} + +static void atusb_disconnect(struct usb_interface *interface) +{ + struct atusb *atusb = usb_get_intfdata(interface); + + dev_dbg(&atusb->usb_dev->dev, "atusb_disconnect\n"); + + atusb->shutdown = 1; + cancel_delayed_work_sync(&atusb->work); + + usb_kill_anchored_urbs(&atusb->rx_urbs); + atusb_free_urbs(atusb); + usb_kill_urb(atusb->tx_urb); + usb_free_urb(atusb->tx_urb); + + ieee802154_unregister_hw(atusb->hw); + + ieee802154_free_hw(atusb->hw); + + usb_set_intfdata(interface, NULL); + usb_put_dev(atusb->usb_dev); + + pr_debug("atusb_disconnect done\n"); +} + +/* The devices we work with */ +static const struct usb_device_id atusb_device_table[] = { + { + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | + USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = ATUSB_VENDOR_ID, + .idProduct = ATUSB_PRODUCT_ID, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC + }, + /* end with null element */ + {} +}; +MODULE_DEVICE_TABLE(usb, atusb_device_table); + +static struct usb_driver atusb_driver = { + .name = "atusb", + .probe = atusb_probe, + .disconnect = atusb_disconnect, + .id_table = atusb_device_table, +}; +module_usb_driver(atusb_driver); + +MODULE_AUTHOR("Alexander Aring "); +MODULE_AUTHOR("Richard Sharpe "); +MODULE_AUTHOR("Stefan Schmidt "); +MODULE_AUTHOR("Werner Almesberger "); +MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h new file mode 100644 index 000000000000..0690edcad57b --- /dev/null +++ b/drivers/net/ieee802154/atusb.h @@ -0,0 +1,84 @@ +/* + * atusb.h - Definitions shared between kernel and ATUSB firmware + * + * Written 2013 by Werner Almesberger + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2, or + * (at your option) any later version. + * + * This file should be identical for kernel and firmware. + * Kernel: drivers/net/ieee802154/atusb.h + * Firmware: ben-wpan/atusb/fw/include/atusb/atusb.h + */ + +#ifndef _ATUSB_H +#define _ATUSB_H + +#define ATUSB_VENDOR_ID 0x20b7 /* Qi Hardware*/ +#define ATUSB_PRODUCT_ID 0x1540 /* 802.15.4, device 0 */ + /* -- - - */ + +#define ATUSB_BUILD_SIZE 256 /* maximum build version/date message length */ + +/* Commands to our device. Make sure this is synced with the firmware */ +enum atusb_requests { + ATUSB_ID = 0x00, /* system status/control grp */ + ATUSB_BUILD, + ATUSB_RESET, + ATUSB_RF_RESET = 0x10, /* debug/test group */ + ATUSB_POLL_INT, + ATUSB_TEST, /* atusb-sil only */ + ATUSB_TIMER, + ATUSB_GPIO, + ATUSB_SLP_TR, + ATUSB_GPIO_CLEANUP, + ATUSB_REG_WRITE = 0x20, /* transceiver group */ + ATUSB_REG_READ, + ATUSB_BUF_WRITE, + ATUSB_BUF_READ, + ATUSB_SRAM_WRITE, + ATUSB_SRAM_READ, + ATUSB_SPI_WRITE = 0x30, /* SPI group */ + ATUSB_SPI_READ1, + ATUSB_SPI_READ2, + ATUSB_SPI_WRITE2_SYNC, + ATUSB_RX_MODE = 0x40, /* HardMAC group */ + ATUSB_TX, +}; + +/* Direction bRequest wValue wIndex wLength + * + * ->host ATUSB_ID - - 3 + * ->host ATUSB_BUILD - - #bytes + * host-> ATUSB_RESET - - 0 + * + * host-> ATUSB_RF_RESET - - 0 + * ->host ATUSB_POLL_INT - - 1 + * host-> ATUSB_TEST - - 0 + * ->host ATUSB_TIMER - - #bytes (6) + * ->host ATUSB_GPIO dir+data mask+p# 3 + * host-> ATUSB_SLP_TR - - 0 + * host-> ATUSB_GPIO_CLEANUP - - 0 + * + * host-> ATUSB_REG_WRITE value addr 0 + * ->host ATUSB_REG_READ - addr 1 + * host-> ATUSB_BUF_WRITE - - #bytes + * ->host ATUSB_BUF_READ - - #bytes + * host-> ATUSB_SRAM_WRITE - addr #bytes + * ->host ATUSB_SRAM_READ - addr #bytes + * + * host-> ATUSB_SPI_WRITE byte0 byte1 #bytes + * ->host ATUSB_SPI_READ1 byte0 - #bytes + * ->host ATUSB_SPI_READ2 byte0 byte1 #bytes + * ->host ATUSB_SPI_WRITE2_SYNC byte0 byte1 0/1 + * + * host-> ATUSB_RX_MODE on - 0 + * host-> ATUSB_TX flags ack_seq #bytes + */ + +#define ATUSB_REQ_FROM_DEV (USB_TYPE_VENDOR | USB_DIR_IN) +#define ATUSB_REQ_TO_DEV (USB_TYPE_VENDOR | USB_DIR_OUT) + +#endif /* !_ATUSB_H */ diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index f833b8bb6663..84b28a05c5a1 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -653,7 +653,7 @@ static int cc2520_register(struct cc2520_private *priv) ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr); /* We do support only 2.4 Ghz */ - priv->hw->phy->channels_supported[0] = 0x7FFF800; + priv->hw->phy->supported.channels[0] = 0x7FFF800; priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK | IEEE802154_HW_AFILT; diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index dc2bfb600b4b..9d0da4ec3e8c 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -27,25 +27,25 @@ #include #include -static int numlbs = 1; +static int numlbs = 2; -struct fakelb_dev_priv { - struct ieee802154_hw *hw; +static LIST_HEAD(fakelb_phys); +static DEFINE_SPINLOCK(fakelb_phys_lock); - struct list_head list; - struct fakelb_priv *fake; +static LIST_HEAD(fakelb_ifup_phys); +static DEFINE_RWLOCK(fakelb_ifup_phys_lock); - spinlock_t lock; - bool working; -}; +struct fakelb_phy { + struct ieee802154_hw *hw; + + u8 page; + u8 channel; -struct fakelb_priv { struct list_head list; - rwlock_t lock; + struct list_head list_ifup; }; -static int -fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) +static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) { BUG_ON(!level); *level = 0xbe; @@ -53,78 +53,63 @@ fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) return 0; } -static int -fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) +static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { - pr_debug("set channel to %d\n", channel); + struct fakelb_phy *phy = hw->priv; + write_lock_bh(&fakelb_ifup_phys_lock); + phy->page = page; + phy->channel = channel; + write_unlock_bh(&fakelb_ifup_phys_lock); return 0; } -static void -fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb) +static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { - struct sk_buff *newskb; + struct fakelb_phy *current_phy = hw->priv, *phy; - spin_lock(&priv->lock); - if (priv->working) { - newskb = pskb_copy(skb, GFP_ATOMIC); - ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc); - } - spin_unlock(&priv->lock); -} + read_lock_bh(&fakelb_ifup_phys_lock); + list_for_each_entry(phy, &fakelb_ifup_phys, list_ifup) { + if (current_phy == phy) + continue; -static int -fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) -{ - struct fakelb_dev_priv *priv = hw->priv; - struct fakelb_priv *fake = priv->fake; - - read_lock_bh(&fake->lock); - if (priv->list.next == priv->list.prev) { - /* we are the only one device */ - fakelb_hw_deliver(priv, skb); - } else { - struct fakelb_dev_priv *dp; - list_for_each_entry(dp, &priv->fake->list, list) { - if (dp != priv && - (dp->hw->phy->current_channel == - priv->hw->phy->current_channel)) - fakelb_hw_deliver(dp, skb); + if (current_phy->page == phy->page && + current_phy->channel == phy->channel) { + struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC); + + if (newskb) + ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc); } } - read_unlock_bh(&fake->lock); + read_unlock_bh(&fakelb_ifup_phys_lock); + ieee802154_xmit_complete(hw, skb, false); return 0; } -static int -fakelb_hw_start(struct ieee802154_hw *hw) { - struct fakelb_dev_priv *priv = hw->priv; - int ret = 0; +static int fakelb_hw_start(struct ieee802154_hw *hw) +{ + struct fakelb_phy *phy = hw->priv; - spin_lock(&priv->lock); - if (priv->working) - ret = -EBUSY; - else - priv->working = 1; - spin_unlock(&priv->lock); + write_lock_bh(&fakelb_ifup_phys_lock); + list_add(&phy->list_ifup, &fakelb_ifup_phys); + write_unlock_bh(&fakelb_ifup_phys_lock); - return ret; + return 0; } -static void -fakelb_hw_stop(struct ieee802154_hw *hw) { - struct fakelb_dev_priv *priv = hw->priv; +static void fakelb_hw_stop(struct ieee802154_hw *hw) +{ + struct fakelb_phy *phy = hw->priv; - spin_lock(&priv->lock); - priv->working = 0; - spin_unlock(&priv->lock); + write_lock_bh(&fakelb_ifup_phys_lock); + list_del(&phy->list_ifup); + write_unlock_bh(&fakelb_ifup_phys_lock); } static const struct ieee802154_ops fakelb_ops = { .owner = THIS_MODULE, - .xmit_sync = fakelb_hw_xmit, + .xmit_async = fakelb_hw_xmit, .ed = fakelb_hw_ed, .set_channel = fakelb_hw_channel, .start = fakelb_hw_start, @@ -135,54 +120,54 @@ static const struct ieee802154_ops fakelb_ops = { module_param(numlbs, int, 0); MODULE_PARM_DESC(numlbs, " number of pseudo devices"); -static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) +static int fakelb_add_one(struct device *dev) { - struct fakelb_dev_priv *priv; - int err; struct ieee802154_hw *hw; + struct fakelb_phy *phy; + int err; - hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops); + hw = ieee802154_alloc_hw(sizeof(*phy), &fakelb_ops); if (!hw) return -ENOMEM; - priv = hw->priv; - priv->hw = hw; + phy = hw->priv; + phy->hw = hw; /* 868 MHz BPSK 802.15.4-2003 */ - hw->phy->channels_supported[0] |= 1; + hw->phy->supported.channels[0] |= 1; /* 915 MHz BPSK 802.15.4-2003 */ - hw->phy->channels_supported[0] |= 0x7fe; + hw->phy->supported.channels[0] |= 0x7fe; /* 2.4 GHz O-QPSK 802.15.4-2003 */ - hw->phy->channels_supported[0] |= 0x7FFF800; + hw->phy->supported.channels[0] |= 0x7FFF800; /* 868 MHz ASK 802.15.4-2006 */ - hw->phy->channels_supported[1] |= 1; + hw->phy->supported.channels[1] |= 1; /* 915 MHz ASK 802.15.4-2006 */ - hw->phy->channels_supported[1] |= 0x7fe; + hw->phy->supported.channels[1] |= 0x7fe; /* 868 MHz O-QPSK 802.15.4-2006 */ - hw->phy->channels_supported[2] |= 1; + hw->phy->supported.channels[2] |= 1; /* 915 MHz O-QPSK 802.15.4-2006 */ - hw->phy->channels_supported[2] |= 0x7fe; + hw->phy->supported.channels[2] |= 0x7fe; /* 2.4 GHz CSS 802.15.4a-2007 */ - hw->phy->channels_supported[3] |= 0x3fff; + hw->phy->supported.channels[3] |= 0x3fff; /* UWB Sub-gigahertz 802.15.4a-2007 */ - hw->phy->channels_supported[4] |= 1; + hw->phy->supported.channels[4] |= 1; /* UWB Low band 802.15.4a-2007 */ - hw->phy->channels_supported[4] |= 0x1e; + hw->phy->supported.channels[4] |= 0x1e; /* UWB High band 802.15.4a-2007 */ - hw->phy->channels_supported[4] |= 0xffe0; + hw->phy->supported.channels[4] |= 0xffe0; /* 750 MHz O-QPSK 802.15.4c-2009 */ - hw->phy->channels_supported[5] |= 0xf; + hw->phy->supported.channels[5] |= 0xf; /* 750 MHz MPSK 802.15.4c-2009 */ - hw->phy->channels_supported[5] |= 0xf0; + hw->phy->supported.channels[5] |= 0xf0; /* 950 MHz BPSK 802.15.4d-2009 */ - hw->phy->channels_supported[6] |= 0x3ff; + hw->phy->supported.channels[6] |= 0x3ff; /* 950 MHz GFSK 802.15.4d-2009 */ - hw->phy->channels_supported[6] |= 0x3ffc00; + hw->phy->supported.channels[6] |= 0x3ffc00; - INIT_LIST_HEAD(&priv->list); - priv->fake = fake; - - spin_lock_init(&priv->lock); + ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); + /* fake phy channel 13 as default */ + hw->phy->current_channel = 13; + phy->channel = hw->phy->current_channel; hw->parent = dev; @@ -190,67 +175,55 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) if (err) goto err_reg; - write_lock_bh(&fake->lock); - list_add_tail(&priv->list, &fake->list); - write_unlock_bh(&fake->lock); + spin_lock(&fakelb_phys_lock); + list_add_tail(&phy->list, &fakelb_phys); + spin_unlock(&fakelb_phys_lock); return 0; err_reg: - ieee802154_free_hw(priv->hw); + ieee802154_free_hw(phy->hw); return err; } -static void fakelb_del(struct fakelb_dev_priv *priv) +static void fakelb_del(struct fakelb_phy *phy) { - write_lock_bh(&priv->fake->lock); - list_del(&priv->list); - write_unlock_bh(&priv->fake->lock); + list_del(&phy->list); - ieee802154_unregister_hw(priv->hw); - ieee802154_free_hw(priv->hw); + ieee802154_unregister_hw(phy->hw); + ieee802154_free_hw(phy->hw); } static int fakelb_probe(struct platform_device *pdev) { - struct fakelb_priv *priv; - struct fakelb_dev_priv *dp; - int err = -ENOMEM; - int i; - - priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv), - GFP_KERNEL); - if (!priv) - goto err_alloc; - - INIT_LIST_HEAD(&priv->list); - rwlock_init(&priv->lock); + struct fakelb_phy *phy, *tmp; + int err, i; for (i = 0; i < numlbs; i++) { - err = fakelb_add_one(&pdev->dev, priv); + err = fakelb_add_one(&pdev->dev); if (err < 0) goto err_slave; } - platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "added ieee802154 hardware\n"); return 0; err_slave: - list_for_each_entry(dp, &priv->list, list) - fakelb_del(dp); -err_alloc: + spin_lock(&fakelb_phys_lock); + list_for_each_entry_safe(phy, tmp, &fakelb_phys, list) + fakelb_del(phy); + spin_unlock(&fakelb_phys_lock); return err; } static int fakelb_remove(struct platform_device *pdev) { - struct fakelb_priv *priv = platform_get_drvdata(pdev); - struct fakelb_dev_priv *dp, *temp; - - list_for_each_entry_safe(dp, temp, &priv->list, list) - fakelb_del(dp); + struct fakelb_phy *phy, *tmp; + spin_lock(&fakelb_phys_lock); + list_for_each_entry_safe(phy, tmp, &fakelb_phys, list) + fakelb_del(phy); + spin_unlock(&fakelb_phys_lock); return 0; } diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index fba2dfd910f7..f2a1bd122a74 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -750,7 +750,7 @@ static int mrf24j40_probe(struct spi_device *spi) devrec->hw->priv = devrec; devrec->hw->parent = &devrec->spi->dev; - devrec->hw->phy->channels_supported[0] = CHANNEL_MASK; + devrec->hw->phy->supported.channels[0] = CHANNEL_MASK; devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK | IEEE802154_HW_AFILT; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 7c0cb87d1f2f..cf18940f4e84 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -112,6 +112,11 @@ config MICREL_PHY ---help--- Supports the KSZ9021, VSC8201, KS8001 PHYs. +config DP83867_PHY + tristate "Drivers for Texas Instruments DP83867 Gigabit PHY" + ---help--- + Currently supports the DP83867 PHY. + config FIXED_PHY tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" depends on PHYLIB @@ -205,7 +210,6 @@ config MDIO_BCM_UNIMAC This hardware can be found in the Broadcom GENET Ethernet MAC controllers as well as some Broadcom Ethernet switches such as the Starfighter 2 switches. - endif # PHYLIB config MICREL_KS8995MA diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index e97e7f921862..fcc25a0c45cd 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_DP83640_PHY) += dp83640.o +obj-$(CONFIG_DP83867_PHY) += dp83867.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 64c74c6a4828..4dea85bfc545 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -136,8 +136,8 @@ static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev) /* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */ phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0); - /* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */ - phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061); + /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */ + phy_write_misc(phydev, AFE_TX_CONFIG, 0x431); /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */ phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da); @@ -167,6 +167,9 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev) /* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */ phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f); + /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */ + phy_write_misc(phydev, AFE_TX_CONFIG, 0x431); + /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */ phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da); @@ -404,7 +407,7 @@ static struct phy_driver bcm7xxx_driver[] = { .name = "Broadcom BCM7425", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = 0, + .flags = PHY_IS_INTERNAL, .config_init = bcm7xxx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 496e02f961d3..00cb41e71312 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -47,7 +47,7 @@ #define PSF_TX 0x1000 #define EXT_EVENT 1 #define CAL_EVENT 7 -#define CAL_TRIGGER 7 +#define CAL_TRIGGER 1 #define DP83640_N_PINS 12 #define MII_DP83640_MICR 0x11 @@ -496,7 +496,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp, else evnt |= EVNT_RISE; } + mutex_lock(&clock->extreg_lock); ext_write(0, phydev, PAGE5, PTP_EVNT, evnt); + mutex_unlock(&clock->extreg_lock); return 0; case PTP_CLK_REQ_PEROUT: @@ -532,6 +534,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F }; static void enable_status_frames(struct phy_device *phydev, bool on) { + struct dp83640_private *dp83640 = phydev->priv; + struct dp83640_clock *clock = dp83640->clock; u16 cfg0 = 0, ver; if (on) @@ -539,9 +543,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on) ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT; + mutex_lock(&clock->extreg_lock); + ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0); ext_write(0, phydev, PAGE6, PSF_CFG1, ver); + mutex_unlock(&clock->extreg_lock); + if (!phydev->attached_dev) { pr_warn("expected to find an attached netdevice\n"); return; @@ -838,7 +846,7 @@ static void decode_rxts(struct dp83640_private *dp83640, list_del_init(&rxts->list); phy2rxts(phy_rxts, rxts); - spin_lock_irqsave(&dp83640->rx_queue.lock, flags); + spin_lock(&dp83640->rx_queue.lock); skb_queue_walk(&dp83640->rx_queue, skb) { struct dp83640_skb_info *skb_info; @@ -853,7 +861,7 @@ static void decode_rxts(struct dp83640_private *dp83640, break; } } - spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags); + spin_unlock(&dp83640->rx_queue.lock); if (!shhwtstamps) list_add_tail(&rxts->list, &dp83640->rxts); @@ -1173,11 +1181,18 @@ static int dp83640_config_init(struct phy_device *phydev) if (clock->chosen && !list_empty(&clock->phylist)) recalibrate(clock); - else + else { + mutex_lock(&clock->extreg_lock); enable_broadcast(phydev, clock->page, 1); + mutex_unlock(&clock->extreg_lock); + } enable_status_frames(phydev, true); + + mutex_lock(&clock->extreg_lock); ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); + mutex_unlock(&clock->extreg_lock); + return 0; } diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c new file mode 100644 index 000000000000..c7a12e2e07b7 --- /dev/null +++ b/drivers/net/phy/dp83867.c @@ -0,0 +1,239 @@ +/* + * Driver for the Texas Instruments DP83867 PHY + * + * Copyright (C) 2015 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#define DP83867_PHY_ID 0x2000a231 +#define DP83867_DEVADDR 0x1f + +#define MII_DP83867_PHYCTRL 0x10 +#define MII_DP83867_MICR 0x12 +#define MII_DP83867_ISR 0x13 +#define DP83867_CTRL 0x1f + +/* Extended Registers */ +#define DP83867_RGMIICTL 0x0032 +#define DP83867_RGMIIDCTL 0x0086 + +#define DP83867_SW_RESET BIT(15) +#define DP83867_SW_RESTART BIT(14) + +/* MICR Interrupt bits */ +#define MII_DP83867_MICR_AN_ERR_INT_EN BIT(15) +#define MII_DP83867_MICR_SPEED_CHNG_INT_EN BIT(14) +#define MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN BIT(13) +#define MII_DP83867_MICR_PAGE_RXD_INT_EN BIT(12) +#define MII_DP83867_MICR_AUTONEG_COMP_INT_EN BIT(11) +#define MII_DP83867_MICR_LINK_STS_CHNG_INT_EN BIT(10) +#define MII_DP83867_MICR_FALSE_CARRIER_INT_EN BIT(8) +#define MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN BIT(4) +#define MII_DP83867_MICR_WOL_INT_EN BIT(3) +#define MII_DP83867_MICR_XGMII_ERR_INT_EN BIT(2) +#define MII_DP83867_MICR_POL_CHNG_INT_EN BIT(1) +#define MII_DP83867_MICR_JABBER_INT_EN BIT(0) + +/* RGMIICTL bits */ +#define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1) +#define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0) + +/* PHY CTRL bits */ +#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14 + +/* RGMIIDCTL bits */ +#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4 + +struct dp83867_private { + int rx_id_delay; + int tx_id_delay; + int fifo_depth; +}; + +static int dp83867_ack_interrupt(struct phy_device *phydev) +{ + int err = phy_read(phydev, MII_DP83867_ISR); + + if (err < 0) + return err; + + return 0; +} + +static int dp83867_config_intr(struct phy_device *phydev) +{ + int micr_status; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + micr_status = phy_read(phydev, MII_DP83867_MICR); + if (micr_status < 0) + return micr_status; + + micr_status |= + (MII_DP83867_MICR_AN_ERR_INT_EN | + MII_DP83867_MICR_SPEED_CHNG_INT_EN | + MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN | + MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN); + + return phy_write(phydev, MII_DP83867_MICR, micr_status); + } + + micr_status = 0x0; + return phy_write(phydev, MII_DP83867_MICR, micr_status); +} + +#ifdef CONFIG_OF_MDIO +static int dp83867_of_init(struct phy_device *phydev) +{ + struct dp83867_private *dp83867 = phydev->priv; + struct device *dev = &phydev->dev; + struct device_node *of_node = dev->of_node; + int ret; + + if (!of_node && dev->parent->of_node) + of_node = dev->parent->of_node; + + if (!phydev->dev.of_node) + return -ENODEV; + + ret = of_property_read_u32(of_node, "ti,rx-internal-delay", + &dp83867->rx_id_delay); + if (ret) + return ret; + + ret = of_property_read_u32(of_node, "ti,tx-internal-delay", + &dp83867->tx_id_delay); + if (ret) + return ret; + + ret = of_property_read_u32(of_node, "ti,fifo-depth", + &dp83867->fifo_depth); + if (ret) + return ret; + + return 0; +} +#else +static int dp83867_of_init(struct phy_device *phydev) +{ + return 0; +} +#endif /* CONFIG_OF_MDIO */ + +static int dp83867_config_init(struct phy_device *phydev) +{ + struct dp83867_private *dp83867; + int ret; + u16 val, delay; + + if (!phydev->priv) { + dp83867 = devm_kzalloc(&phydev->dev, sizeof(*dp83867), + GFP_KERNEL); + if (!dp83867) + return -ENOMEM; + + phydev->priv = dp83867; + ret = dp83867_of_init(phydev); + if (ret) + return ret; + } else { + dp83867 = (struct dp83867_private *)phydev->priv; + } + + if (phy_interface_is_rgmii(phydev)) { + ret = phy_write(phydev, MII_DP83867_PHYCTRL, + (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT)); + if (ret) + return ret; + } + + if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || + (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { + val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, + DP83867_DEVADDR, phydev->addr); + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN); + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + val |= DP83867_RGMII_TX_CLK_DELAY_EN; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + val |= DP83867_RGMII_RX_CLK_DELAY_EN; + + phy_write_mmd_indirect(phydev, DP83867_RGMIICTL, + DP83867_DEVADDR, phydev->addr, val); + + delay = (dp83867->rx_id_delay | + (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT)); + + phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL, + DP83867_DEVADDR, phydev->addr, delay); + } + + return 0; +} + +static int dp83867_phy_reset(struct phy_device *phydev) +{ + int err; + + err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET); + if (err < 0) + return err; + + return dp83867_config_init(phydev); +} + +static struct phy_driver dp83867_driver[] = { + { + .phy_id = DP83867_PHY_ID, + .phy_id_mask = 0xfffffff0, + .name = "TI DP83867", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + + .config_init = dp83867_config_init, + .soft_reset = dp83867_phy_reset, + + /* IRQ related */ + .ack_interrupt = dp83867_ack_interrupt, + .config_intr = dp83867_config_intr, + + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, + + .driver = {.owner = THIS_MODULE,} + }, +}; +module_phy_driver(dp83867_driver); + +static struct mdio_device_id __maybe_unused dp83867_tbl[] = { + { DP83867_PHY_ID, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, dp83867_tbl); + +MODULE_DESCRIPTION("Texas Instruments DP83867 PHY driver"); +MODULE_AUTHOR("Dan Murphy interface == PHY_INTERFACE_MODE_RGMII) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) { + if (phy_interface_is_rgmii(phydev)) { c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); if (c < 0) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 1b1698f98818..f721444c2b0a 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -317,10 +317,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (err < 0) return err; - if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) { + if (phy_interface_is_rgmii(phydev)) { mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) & MII_88E1121_PHY_MSCR_DELAY_MASK; @@ -469,10 +466,7 @@ static int m88e1111_config_init(struct phy_device *phydev) int err; int temp; - if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) { + if (phy_interface_is_rgmii(phydev)) { temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); if (temp < 0) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index ebdc357c5131..499185eaf413 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -288,9 +288,10 @@ static int kszphy_config_init(struct phy_device *phydev) } static int ksz9021_load_values_from_of(struct phy_device *phydev, - struct device_node *of_node, u16 reg, - char *field1, char *field2, - char *field3, char *field4) + const struct device_node *of_node, + u16 reg, + const char *field1, const char *field2, + const char *field3, const char *field4) { int val1 = -1; int val2 = -2; @@ -336,8 +337,8 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev, static int ksz9021_config_init(struct phy_device *phydev) { - struct device *dev = &phydev->dev; - struct device_node *of_node = dev->of_node; + const struct device *dev = &phydev->dev; + const struct device_node *of_node = dev->of_node; if (!of_node && dev->parent->of_node) of_node = dev->parent->of_node; @@ -365,6 +366,11 @@ static int ksz9021_config_init(struct phy_device *phydev) #define KSZ9031_PS_TO_REG 60 /* Extended registers */ +/* MMD Address 0x0 */ +#define MII_KSZ9031RN_FLP_BURST_TX_LO 3 +#define MII_KSZ9031RN_FLP_BURST_TX_HI 4 + +/* MMD Address 0x2 */ #define MII_KSZ9031RN_CONTROL_PAD_SKEW 4 #define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5 #define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6 @@ -389,9 +395,9 @@ static int ksz9031_extended_read(struct phy_device *phydev, } static int ksz9031_of_load_skew_values(struct phy_device *phydev, - struct device_node *of_node, + const struct device_node *of_node, u16 reg, size_t field_sz, - char *field[], u8 numfields) + const char *field[], u8 numfields) { int val[4] = {-1, -2, -3, -4}; int matches = 0; @@ -425,20 +431,36 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev, return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval); } +static int ksz9031_center_flp_timing(struct phy_device *phydev) +{ + int result; + + /* Center KSZ9031RNX FLP timing at 16ms. */ + result = ksz9031_extended_write(phydev, OP_DATA, 0, + MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006); + result = ksz9031_extended_write(phydev, OP_DATA, 0, + MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80); + + if (result) + return result; + + return genphy_restart_aneg(phydev); +} + static int ksz9031_config_init(struct phy_device *phydev) { - struct device *dev = &phydev->dev; - struct device_node *of_node = dev->of_node; - char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"}; - char *rx_data_skews[4] = { + const struct device *dev = &phydev->dev; + const struct device_node *of_node = dev->of_node; + static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"}; + static const char *rx_data_skews[4] = { "rxd0-skew-ps", "rxd1-skew-ps", "rxd2-skew-ps", "rxd3-skew-ps" }; - char *tx_data_skews[4] = { + static const char *tx_data_skews[4] = { "txd0-skew-ps", "txd1-skew-ps", "txd2-skew-ps", "txd3-skew-ps" }; - char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"}; + static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"}; if (!of_node && dev->parent->of_node) of_node = dev->parent->of_node; @@ -460,7 +482,8 @@ static int ksz9031_config_init(struct phy_device *phydev) MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4, tx_data_skews, 4); } - return 0; + + return ksz9031_center_flp_timing(phydev); } #define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 @@ -519,7 +542,7 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum, static int kszphy_probe(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; - struct device_node *np = phydev->dev.of_node; + const struct device_node *np = phydev->dev.of_node; struct kszphy_priv *priv; struct clk *clk; int ret; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 377d2db04d33..b2197b506acb 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1093,8 +1093,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if ((phydev->duplex == DUPLEX_FULL) && ((phydev->interface == PHY_INTERFACE_MODE_MII) || (phydev->interface == PHY_INTERFACE_MODE_GMII) || - (phydev->interface >= PHY_INTERFACE_MODE_RGMII && - phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) || + phy_interface_is_rgmii(phydev) || phy_is_internal(phydev))) { int eee_lp, eee_cap, eee_adv; u32 lp, cap, adv; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5eddbc02c6c2..34c519eb1db5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2131,9 +2131,10 @@ static void vxlan_cleanup(unsigned long arg) if (!netif_running(vxlan->dev)) return; - spin_lock_bh(&vxlan->hash_lock); for (h = 0; h < FDB_HASH_SIZE; ++h) { struct hlist_node *p, *n; + + spin_lock_bh(&vxlan->hash_lock); hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { struct vxlan_fdb *f = container_of(p, struct vxlan_fdb, hlist); @@ -2152,8 +2153,8 @@ static void vxlan_cleanup(unsigned long arg) } else if (time_before(timeout, next_timer)) next_timer = timeout; } + spin_unlock_bh(&vxlan->hash_lock); } - spin_unlock_bh(&vxlan->hash_lock); mod_timer(&vxlan->age_timer, next_timer); } diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index bcfa01add7cc..7193b7304fdd 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -517,7 +517,7 @@ static int cosa_probe(int base, int irq, int dma) */ set_current_state(TASK_INTERRUPTIBLE); cosa_putstatus(cosa, SR_TX_INT_ENA); - schedule_timeout(30); + schedule_timeout(msecs_to_jiffies(300)); irq = probe_irq_off(irqs); /* Disable all IRQs from the card */ cosa_putstatus(cosa, 0); diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 08223569cebd..7a72407208b1 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -551,7 +551,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv, msg, i); goto done; } - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(msecs_to_jiffies(100)); rmb(); } while (++i > 0); netdev_err(dev, "%s timeout\n", msg); @@ -596,7 +596,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv) (dpriv->iqtx[cur] & cpu_to_le32(Xpr))) break; smp_rmb(); - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(msecs_to_jiffies(100)); } while (++i > 0); return (i >= 0 ) ? i : -EAGAIN; @@ -1033,7 +1033,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) /* Flush posted writes */ readl(ioaddr + GSTAR); - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(msecs_to_jiffies(100)); for (i = 0; i < 16; i++) pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); @@ -1046,7 +1046,6 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) static int dscc4_open(struct net_device *dev) { struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct dscc4_pci_priv *ppriv; int ret = -EAGAIN; if ((dscc4_loopback_check(dpriv) < 0)) @@ -1055,8 +1054,6 @@ static int dscc4_open(struct net_device *dev) if ((ret = hdlc_open(dev))) goto err; - ppriv = dpriv->pci_priv; - /* * Due to various bugs, there is no way to reliably reset a * specific port (manufacturer's dependent special PCI #RST wiring diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 16604bdf5197..a63ab2e83105 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -277,6 +277,7 @@ source "drivers/net/wireless/libertas/Kconfig" source "drivers/net/wireless/orinoco/Kconfig" source "drivers/net/wireless/p54/Kconfig" source "drivers/net/wireless/rt2x00/Kconfig" +source "drivers/net/wireless/mediatek/Kconfig" source "drivers/net/wireless/rtlwifi/Kconfig" source "drivers/net/wireless/ti/Kconfig" source "drivers/net/wireless/zd1211rw/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 0c8891686718..6b9e729dd8ac 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -45,6 +45,8 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi/ obj-$(CONFIG_IWLEGACY) += iwlegacy/ obj-$(CONFIG_RT2X00) += rt2x00/ +obj-$(CONFIG_WL_MEDIATEK) += mediatek/ + obj-$(CONFIG_P54_COMMON) += p54/ obj-$(CONFIG_ATH_CARDS) += ath/ diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index 413528295d72..8c283fcd843d 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -1098,14 +1098,18 @@ static void adm8211_hw_init(struct ieee80211_hw *dev) pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline); switch (cline) { - case 0x8: reg |= (0x1 << 14); - break; - case 0x16: reg |= (0x2 << 14); - break; - case 0x32: reg |= (0x3 << 14); - break; - default: reg |= (0x0 << 14); - break; + case 0x8: + reg |= (0x1 << 14); + break; + case 0x10: + reg |= (0x2 << 14); + break; + case 0x20: + reg |= (0x3 << 14); + break; + default: + reg |= (0x0 << 14); + break; } } diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 987b266278a8..bcccae19325d 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -387,7 +387,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); - if (!skip_otp && result != 0) { + if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, + ar->fw_features)) + && result != 0) { ath10k_err(ar, "otp calibration failed: %d", result); return -EINVAL; } diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 8444adf42195..70fcdc9c2758 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -460,6 +460,14 @@ enum ath10k_fw_features { */ ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6, + /* Don't trust error code from otp.bin */ + ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, + + /* Some firmware revisions pad 4th hw address to 4 byte boundary making + * it 8 bytes long in Native Wifi Rx decap. + */ + ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index a12b8323f9f1..53bd6a19eab6 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -36,6 +36,7 @@ enum ath10k_debug_mask { ATH10K_DBG_REGULATORY = 0x00000800, ATH10K_DBG_TESTMODE = 0x00001000, ATH10K_DBG_WMI_PRINT = 0x00002000, + ATH10K_DBG_PCI_PS = 0x00004000, ATH10K_DBG_ANY = 0xffffffff, }; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index b26e32f42656..89eb16b30fc4 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -965,10 +965,16 @@ static void ath10k_process_rx(struct ath10k *ar, ieee80211_rx(ar->hw, skb); } -static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) +static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, + struct ieee80211_hdr *hdr) { - /* nwifi header is padded to 4 bytes. this fixes 4addr rx */ - return round_up(ieee80211_hdrlen(hdr->frame_control), 4); + int len = ieee80211_hdrlen(hdr->frame_control); + + if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, + ar->fw_features)) + len = round_up(len, 4); + + return len; } static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, @@ -1067,7 +1073,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, /* pull decapped header and copy SA & DA */ hdr = (struct ieee80211_hdr *)msdu->data; - hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); + hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); ether_addr_copy(da, ieee80211_get_DA(hdr)); ether_addr_copy(sa, ieee80211_get_SA(hdr)); skb_pull(msdu, hdr_len); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 539b2b62b58a..0ed422ae46a4 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1708,7 +1708,14 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) enable_ps = false; } - if (enable_ps) { + if (!arvif->is_started) { + /* mac80211 can update vif powersave state while disconnected. + * Firmware doesn't behave nicely and consumes more power than + * necessary if PS is disabled on a non-started vdev. Hence + * force-enable PS for non-running vdevs. + */ + psmode = WMI_STA_PS_MODE_ENABLED; + } else if (enable_ps) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 969a1231800e..17a060e8efa2 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -330,6 +330,205 @@ static const struct service_to_pipe target_service_to_ce_map_wlan[] = { }, }; +static bool ath10k_pci_is_awake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS); + + return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; +} + +static void __ath10k_pci_wake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + lockdep_assert_held(&ar_pci->ps_lock); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + iowrite32(PCIE_SOC_WAKE_V_MASK, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); +} + +static void __ath10k_pci_sleep(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + lockdep_assert_held(&ar_pci->ps_lock); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + iowrite32(PCIE_SOC_WAKE_RESET, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + ar_pci->ps_awake = false; +} + +static int ath10k_pci_wake_wait(struct ath10k *ar) +{ + int tot_delay = 0; + int curr_delay = 5; + + while (tot_delay < PCIE_WAKE_TIMEOUT) { + if (ath10k_pci_is_awake(ar)) + return 0; + + udelay(curr_delay); + tot_delay += curr_delay; + + if (curr_delay < 50) + curr_delay += 5; + } + + return -ETIMEDOUT; +} + +static int ath10k_pci_wake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + /* This function can be called very frequently. To avoid excessive + * CPU stalls for MMIO reads use a cache var to hold the device state. + */ + if (!ar_pci->ps_awake) { + __ath10k_pci_wake(ar); + + ret = ath10k_pci_wake_wait(ar); + if (ret == 0) + ar_pci->ps_awake = true; + } + + if (ret == 0) { + ar_pci->ps_wake_refcount++; + WARN_ON(ar_pci->ps_wake_refcount == 0); + } + + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); + + return ret; +} + +static void ath10k_pci_sleep(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + if (WARN_ON(ar_pci->ps_wake_refcount == 0)) + goto skip; + + ar_pci->ps_wake_refcount--; + + mod_timer(&ar_pci->ps_timer, jiffies + + msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); + +skip: + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + +static void ath10k_pci_ps_timer(unsigned long ptr) +{ + struct ath10k *ar = (void *)ptr; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + if (ar_pci->ps_wake_refcount > 0) + goto skip; + + __ath10k_pci_sleep(ar); + +skip: + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + +static void ath10k_pci_sleep_sync(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + + del_timer_sync(&ar_pci->ps_timer); + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + WARN_ON(ar_pci->ps_wake_refcount > 0); + __ath10k_pci_sleep(ar); + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + +void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", + value, offset, ret); + return; + } + + iowrite32(value, ar_pci->mem + offset); + ath10k_pci_sleep(ar); +} + +u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 val; + int ret; + + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", + offset, ret); + return 0xffffffff; + } + + val = ioread32(ar_pci->mem + offset); + ath10k_pci_sleep(ar); + + return val; +} + +u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) +{ + return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); +} + +void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) +{ + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); +} + +u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) +{ + return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); +} + +void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) +{ + ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); +} + static bool ath10k_pci_irq_pending(struct ath10k *ar) { u32 cause; @@ -793,60 +992,6 @@ static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); } -static bool ath10k_pci_is_awake(struct ath10k *ar) -{ - u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS); - - return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; -} - -static int ath10k_pci_wake_wait(struct ath10k *ar) -{ - int tot_delay = 0; - int curr_delay = 5; - - while (tot_delay < PCIE_WAKE_TIMEOUT) { - if (ath10k_pci_is_awake(ar)) - return 0; - - udelay(curr_delay); - tot_delay += curr_delay; - - if (curr_delay < 50) - curr_delay += 5; - } - - return -ETIMEDOUT; -} - -/* The rule is host is forbidden from accessing device registers while it's - * asleep. Currently ath10k_pci_wake() and ath10k_pci_sleep() calls aren't - * balanced and the device is kept awake all the time. This is intended for a - * simpler solution for the following problems: - * - * * device can enter sleep during s2ram without the host knowing, - * - * * irq handlers access registers which is a problem if other device asserts - * a shared irq line when ath10k is between hif_power_down() and - * hif_power_up(). - * - * FIXME: If power consumption is a concern (and there are *real* gains) then a - * refcounted wake/sleep needs to be implemented. - */ - -static int ath10k_pci_wake(struct ath10k *ar) -{ - ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, - PCIE_SOC_WAKE_V_MASK); - return ath10k_pci_wake_wait(ar); -} - -static void ath10k_pci_sleep(struct ath10k *ar) -{ - ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, - PCIE_SOC_WAKE_RESET); -} - /* Called by lower (CE) layer when a send to Target completes. */ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) { @@ -1227,11 +1372,15 @@ static void ath10k_pci_irq_enable(struct ath10k *ar) static int ath10k_pci_hif_start(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); ath10k_pci_irq_enable(ar); ath10k_pci_rx_post(ar); + pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, + ar_pci->link_ctl); + return 0; } @@ -1344,6 +1493,9 @@ static void ath10k_pci_flush(struct ath10k *ar) static void ath10k_pci_hif_stop(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); /* Most likely the device has HTT Rx ring configured. The only way to @@ -1362,6 +1514,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_irq_disable(ar); ath10k_pci_irq_sync(ar); ath10k_pci_flush(ar); + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + WARN_ON(ar_pci->ps_wake_refcount > 0); + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, @@ -1981,15 +2137,15 @@ static int ath10k_pci_chip_reset(struct ath10k *ar) static int ath10k_pci_hif_power_up(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_err(ar, "failed to wake up target: %d\n", ret); - return ret; - } + pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, + &ar_pci->link_ctl); + pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, + ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); /* * Bring the target up cleanly. @@ -2037,7 +2193,6 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ath10k_pci_ce_deinit(ar); err_sleep: - ath10k_pci_sleep(ar); return ret; } @@ -2054,7 +2209,12 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar) static int ath10k_pci_hif_suspend(struct ath10k *ar) { - ath10k_pci_sleep(ar); + /* The grace timer can still be counting down and ar->ps_awake be true. + * It is known that the device may be asleep after resuming regardless + * of the SoC powersave state before suspending. Hence make sure the + * device is asleep before proceeding. + */ + ath10k_pci_sleep_sync(ar); return 0; } @@ -2064,13 +2224,6 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; u32 val; - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_err(ar, "failed to wake device up on resume: %d\n", ret); - return ret; - } /* Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries @@ -2081,7 +2234,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - return ret; + return 0; } #endif @@ -2175,13 +2328,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) { struct ath10k *ar = arg; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); - return IRQ_NONE; - } if (ar_pci->num_msi_intrs == 0) { if (!ath10k_pci_irq_pending(ar)) @@ -2502,7 +2648,6 @@ static int ath10k_pci_claim(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; - u32 lcr_val; int ret; pci_set_drvdata(pdev, ar); @@ -2536,10 +2681,6 @@ static int ath10k_pci_claim(struct ath10k *ar) pci_set_master(pdev); - /* Workaround: Disable ASPM */ - pci_read_config_dword(pdev, 0x80, &lcr_val); - pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00)); - /* Arrange for access to Target SoC registers. */ ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); if (!ar_pci->mem) { @@ -2633,8 +2774,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev, pdev->subsystem_vendor, pdev->subsystem_device); spin_lock_init(&ar_pci->ce_lock); + spin_lock_init(&ar_pci->ps_lock); + setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, (unsigned long)ar); + setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, + (unsigned long)ar); ret = ath10k_pci_claim(ar); if (ret) { @@ -2642,12 +2787,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_core_destroy; } - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_err(ar, "failed to wake up: %d\n", ret); - goto err_release; - } - ret = ath10k_pci_alloc_pipes(ar); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", @@ -2711,9 +2850,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ath10k_pci_free_pipes(ar); err_sleep: - ath10k_pci_sleep(ar); - -err_release: ath10k_pci_release(ar); err_core_destroy: @@ -2743,6 +2879,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ath10k_pci_deinit_irq(ar); ath10k_pci_ce_deinit(ar); ath10k_pci_free_pipes(ar); + ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); ath10k_core_destroy(ar); } diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index bddf54320160..d7696ddc03c4 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -185,6 +185,41 @@ struct ath10k_pci { /* Map CE id to ce_state */ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; struct timer_list rx_post_retry; + + /* Due to HW quirks it is recommended to disable ASPM during device + * bootup. To do that the original PCI-E Link Control is stored before + * device bootup is executed and re-programmed later. + */ + u16 link_ctl; + + /* Protects ps_awake and ps_wake_refcount */ + spinlock_t ps_lock; + + /* The device has a special powersave-oriented register. When device is + * considered asleep it drains less power and driver is forbidden from + * accessing most MMIO registers. If host were to access them without + * waking up the device might scribble over host memory or return + * 0xdeadbeef readouts. + */ + unsigned long ps_wake_refcount; + + /* Waking up takes some time (up to 2ms in some cases) so it can be bad + * for latency. To mitigate this the device isn't immediately allowed + * to sleep after all references are undone - instead there's a grace + * period after which the powersave register is updated unless some + * activity to/from device happened in the meantime. + * + * Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC. + */ + struct timer_list ps_timer; + + /* MMIO registers are used to communicate with the device. With + * intensive traffic accessing powersave register would be a bit + * wasteful overhead and would needlessly stall CPU. It is far more + * efficient to rely on a variable in RAM and update it only upon + * powersave register state changes. + */ + bool ps_awake; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) @@ -209,61 +244,25 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) * for this device; but that's not guaranteed. */ #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \ - (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \ + (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \ CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \ 0x100000 | ((addr) & 0xfffff)) /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ #define DIAG_ACCESS_CE_TIMEOUT_MS 10 -/* Target exposes its registers for direct access. However before host can - * access them it needs to make sure the target is awake (ath10k_pci_wake, - * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go - * to sleep unless host tells it to (ath10k_pci_sleep). - * - * If host tries to access target registers without waking it up it can - * scribble over host memory. - * - * If target is asleep waking it up may take up to even 2ms. - */ - -static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, - u32 value) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - iowrite32(value, ar_pci->mem + offset); -} - -static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - return ioread32(ar_pci->mem + offset); -} - -static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) -{ - return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); -} - -static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) -{ - ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); -} - -static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); -} +void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value); +void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val); +void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val); -static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); +u32 ath10k_pci_read32(struct ath10k *ar, u32 offset); +u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr); +u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr); - iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); -} +/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too + * frequently. To avoid this put SoC to sleep after a very conservative grace + * period. Adjust with great care. + */ +#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60 #endif /* _PCI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index d22addf6118b..8dcd424aa502 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -519,9 +519,12 @@ int ath10k_spectral_vif_stop(struct ath10k_vif *arvif) int ath10k_spectral_create(struct ath10k *ar) { + /* The buffer size covers whole channels in dual bands up to 128 bins. + * Scan with bigger than 128 bins needs to be run on single band each. + */ ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan", ar->debug.debugfs_phy, - 1024, 256, + 1140, 2500, &rfs_spec_scan_cb, NULL); debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index ebaa096cf200..0fabe689179c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1645,10 +1645,10 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) survey = &ar->survey[idx]; survey->time = WMI_CHAN_INFO_MSEC(cycle_count); - survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count); + survey->time_busy = WMI_CHAN_INFO_MSEC(rx_clear_count); survey->noise = noise_floor; survey->filled = SURVEY_INFO_TIME | - SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_BUSY | SURVEY_INFO_NOISE_DBM; } diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index cce4625a53ad..a511ef3614b9 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -889,7 +889,7 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason, GFP_KERNEL); } else if (vif->sme_state == SME_CONNECTED) { cfg80211_disconnected(vif->ndev, proto_reason, - NULL, 0, GFP_KERNEL); + NULL, 0, false, GFP_KERNEL); } vif->sme_state = SME_DISCONNECTED; @@ -3467,7 +3467,7 @@ void ath6kl_cfg80211_stop(struct ath6kl_vif *vif) GFP_KERNEL); break; case SME_CONNECTED: - cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL); + cfg80211_disconnected(vif->ndev, 0, NULL, 0, true, GFP_KERNEL); break; } diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 6c23d279525f..8f8793004b9f 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -254,86 +254,25 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) return 0; } -/** - * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios - * @ah: atheros hardware structure - * @chan: - * - * For non single-chip solutions. Converts to baseband spur frequency given the - * input channel frequency and compute register settings below. - */ -static void ar5008_hw_spur_mitigate(struct ath_hw *ah, - struct ath9k_channel *chan) +void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah, + struct ath9k_channel *chan, int bin) { - int bb_spur = AR_NO_SPUR; - int bin, cur_bin; - int spur_freq_sd; - int spur_delta_phase; - int denominator; + int cur_bin; int upper, lower, cur_vit_mask; - int tmp, new; int i; - static int pilot_mask_reg[4] = { + int8_t mask_m[123]; + int8_t mask_p[123]; + int8_t mask_amt; + int tmp_mask; + static const int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 }; - static int chan_mask_reg[4] = { + static const int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 }; - static int inc[4] = { 0, 100, 0, 0 }; - - int8_t mask_m[123]; - int8_t mask_p[123]; - int8_t mask_amt; - int tmp_mask; - int cur_bb_spur; - bool is2GHz = IS_CHAN_2GHZ(chan); - - memset(&mask_m, 0, sizeof(int8_t) * 123); - memset(&mask_p, 0, sizeof(int8_t) * 123); - - for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { - cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); - if (AR_NO_SPUR == cur_bb_spur) - break; - cur_bb_spur = cur_bb_spur - (chan->channel * 10); - if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) { - bb_spur = cur_bb_spur; - break; - } - } - - if (AR_NO_SPUR == bb_spur) - return; - - bin = bb_spur * 32; - - tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); - new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | - AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | - AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | - AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); - - REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new); - - new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | - AR_PHY_SPUR_REG_ENABLE_MASK_PPM | - AR_PHY_SPUR_REG_MASK_RATE_SELECT | - AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | - SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); - REG_WRITE(ah, AR_PHY_SPUR_REG, new); - - spur_delta_phase = ((bb_spur * 524288) / 100) & - AR_PHY_TIMING11_SPUR_DELTA_PHASE; - - denominator = IS_CHAN_2GHZ(chan) ? 440 : 400; - spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff; - - new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | - SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | - SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); - REG_WRITE(ah, AR_PHY_TIMING11, new); + static const int inc[4] = { 0, 100, 0, 0 }; cur_bin = -6000; upper = bin + 100; @@ -343,6 +282,7 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah, int pilot_mask = 0; int chan_mask = 0; int bp = 0; + for (bp = 0; bp < 30; bp++) { if ((cur_bin > lower) && (cur_bin < upper)) { pilot_mask = pilot_mask | 0x1 << bp; @@ -361,7 +301,6 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah, for (i = 0; i < 123; i++) { if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { - /* workaround for gcc bug #37014 */ volatile int tmp_v = abs(cur_vit_mask - bin); @@ -466,6 +405,78 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah, REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); } +/** + * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios + * @ah: atheros hardware structure + * @chan: + * + * For non single-chip solutions. Converts to baseband spur frequency given the + * input channel frequency and compute register settings below. + */ +static void ar5008_hw_spur_mitigate(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + int bb_spur = AR_NO_SPUR; + int bin; + int spur_freq_sd; + int spur_delta_phase; + int denominator; + int tmp, new; + int i; + + int8_t mask_m[123]; + int8_t mask_p[123]; + int cur_bb_spur; + bool is2GHz = IS_CHAN_2GHZ(chan); + + memset(&mask_m, 0, sizeof(int8_t) * 123); + memset(&mask_p, 0, sizeof(int8_t) * 123); + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { + cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); + if (AR_NO_SPUR == cur_bb_spur) + break; + cur_bb_spur = cur_bb_spur - (chan->channel * 10); + if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) { + bb_spur = cur_bb_spur; + break; + } + } + + if (AR_NO_SPUR == bb_spur) + return; + + bin = bb_spur * 32; + + tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); + new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | + AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | + AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | + AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); + + REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new); + + new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | + AR_PHY_SPUR_REG_ENABLE_MASK_PPM | + AR_PHY_SPUR_REG_MASK_RATE_SELECT | + AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | + SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); + REG_WRITE(ah, AR_PHY_SPUR_REG, new); + + spur_delta_phase = ((bb_spur * 524288) / 100) & + AR_PHY_TIMING11_SPUR_DELTA_PHASE; + + denominator = IS_CHAN_2GHZ(chan) ? 440 : 400; + spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff; + + new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | + SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | + SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); + REG_WRITE(ah, AR_PHY_TIMING11, new); + + ar5008_hw_cmn_spur_mitigate(ah, chan, bin); +} + /** * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming * @ah: atheros hardware structure diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index fc08162b5820..db6624527d99 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -169,29 +169,17 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah, { int bb_spur = AR_NO_SPUR; int freq; - int bin, cur_bin; + int bin; int bb_spur_off, spur_subchannel_sd; int spur_freq_sd; int spur_delta_phase; int denominator; - int upper, lower, cur_vit_mask; int tmp, newVal; int i; - static const int pilot_mask_reg[4] = { - AR_PHY_TIMING7, AR_PHY_TIMING8, - AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 - }; - static const int chan_mask_reg[4] = { - AR_PHY_TIMING9, AR_PHY_TIMING10, - AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 - }; - static const int inc[4] = { 0, 100, 0, 0 }; struct chan_centers centers; int8_t mask_m[123]; int8_t mask_p[123]; - int8_t mask_amt; - int tmp_mask; int cur_bb_spur; bool is2GHz = IS_CHAN_2GHZ(chan); @@ -288,135 +276,7 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah, newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S; REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal); - cur_bin = -6000; - upper = bin + 100; - lower = bin - 100; - - for (i = 0; i < 4; i++) { - int pilot_mask = 0; - int chan_mask = 0; - int bp = 0; - for (bp = 0; bp < 30; bp++) { - if ((cur_bin > lower) && (cur_bin < upper)) { - pilot_mask = pilot_mask | 0x1 << bp; - chan_mask = chan_mask | 0x1 << bp; - } - cur_bin += 100; - } - cur_bin += inc[i]; - REG_WRITE(ah, pilot_mask_reg[i], pilot_mask); - REG_WRITE(ah, chan_mask_reg[i], chan_mask); - } - - cur_vit_mask = 6100; - upper = bin + 120; - lower = bin - 120; - - for (i = 0; i < 123; i++) { - if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { - - /* workaround for gcc bug #37014 */ - volatile int tmp_v = abs(cur_vit_mask - bin); - - if (tmp_v < 75) - mask_amt = 1; - else - mask_amt = 0; - if (cur_vit_mask < 0) - mask_m[abs(cur_vit_mask / 100)] = mask_amt; - else - mask_p[cur_vit_mask / 100] = mask_amt; - } - cur_vit_mask -= 100; - } - - tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28) - | (mask_m[48] << 26) | (mask_m[49] << 24) - | (mask_m[50] << 22) | (mask_m[51] << 20) - | (mask_m[52] << 18) | (mask_m[53] << 16) - | (mask_m[54] << 14) | (mask_m[55] << 12) - | (mask_m[56] << 10) | (mask_m[57] << 8) - | (mask_m[58] << 6) | (mask_m[59] << 4) - | (mask_m[60] << 2) | (mask_m[61] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask); - REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask); - - tmp_mask = (mask_m[31] << 28) - | (mask_m[32] << 26) | (mask_m[33] << 24) - | (mask_m[34] << 22) | (mask_m[35] << 20) - | (mask_m[36] << 18) | (mask_m[37] << 16) - | (mask_m[48] << 14) | (mask_m[39] << 12) - | (mask_m[40] << 10) | (mask_m[41] << 8) - | (mask_m[42] << 6) | (mask_m[43] << 4) - | (mask_m[44] << 2) | (mask_m[45] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask); - - tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28) - | (mask_m[18] << 26) | (mask_m[18] << 24) - | (mask_m[20] << 22) | (mask_m[20] << 20) - | (mask_m[22] << 18) | (mask_m[22] << 16) - | (mask_m[24] << 14) | (mask_m[24] << 12) - | (mask_m[25] << 10) | (mask_m[26] << 8) - | (mask_m[27] << 6) | (mask_m[28] << 4) - | (mask_m[29] << 2) | (mask_m[30] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask); - - tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28) - | (mask_m[2] << 26) | (mask_m[3] << 24) - | (mask_m[4] << 22) | (mask_m[5] << 20) - | (mask_m[6] << 18) | (mask_m[7] << 16) - | (mask_m[8] << 14) | (mask_m[9] << 12) - | (mask_m[10] << 10) | (mask_m[11] << 8) - | (mask_m[12] << 6) | (mask_m[13] << 4) - | (mask_m[14] << 2) | (mask_m[15] << 0); - REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask); - - tmp_mask = (mask_p[15] << 28) - | (mask_p[14] << 26) | (mask_p[13] << 24) - | (mask_p[12] << 22) | (mask_p[11] << 20) - | (mask_p[10] << 18) | (mask_p[9] << 16) - | (mask_p[8] << 14) | (mask_p[7] << 12) - | (mask_p[6] << 10) | (mask_p[5] << 8) - | (mask_p[4] << 6) | (mask_p[3] << 4) - | (mask_p[2] << 2) | (mask_p[1] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask); - - tmp_mask = (mask_p[30] << 28) - | (mask_p[29] << 26) | (mask_p[28] << 24) - | (mask_p[27] << 22) | (mask_p[26] << 20) - | (mask_p[25] << 18) | (mask_p[24] << 16) - | (mask_p[23] << 14) | (mask_p[22] << 12) - | (mask_p[21] << 10) | (mask_p[20] << 8) - | (mask_p[19] << 6) | (mask_p[18] << 4) - | (mask_p[17] << 2) | (mask_p[16] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask); - - tmp_mask = (mask_p[45] << 28) - | (mask_p[44] << 26) | (mask_p[43] << 24) - | (mask_p[42] << 22) | (mask_p[41] << 20) - | (mask_p[40] << 18) | (mask_p[39] << 16) - | (mask_p[38] << 14) | (mask_p[37] << 12) - | (mask_p[36] << 10) | (mask_p[35] << 8) - | (mask_p[34] << 6) | (mask_p[33] << 4) - | (mask_p[32] << 2) | (mask_p[31] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask); - - tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28) - | (mask_p[59] << 26) | (mask_p[58] << 24) - | (mask_p[57] << 22) | (mask_p[56] << 20) - | (mask_p[55] << 18) | (mask_p[54] << 16) - | (mask_p[53] << 14) | (mask_p[52] << 12) - | (mask_p[51] << 10) | (mask_p[50] << 8) - | (mask_p[49] << 6) | (mask_p[48] << 4) - | (mask_p[47] << 2) | (mask_p[46] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); + ar5008_hw_cmn_spur_mitigate(ah, chan, bin); REGWRITE_BUFFER_FLUSH(ah); } diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index e82a0d4ce23f..5dbc617ecf8a 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -440,9 +440,9 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv) } #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ -#define OP_BT_PRIORITY_DETECTED BIT(3) -#define OP_BT_SCAN BIT(4) -#define OP_TSF_RESET BIT(6) +#define OP_BT_PRIORITY_DETECTED 3 +#define OP_BT_SCAN 4 +#define OP_TSF_RESET 6 enum htc_op_flags { HTC_FWFLAG_NO_RMW, diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index c1d2d0340feb..e8454db17634 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -1119,6 +1119,8 @@ bool ar9003_is_paprd_enabled(struct ath_hw *ah); void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx); void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array, struct ath9k_channel *chan); +void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah, + struct ath9k_channel *chan, int bin); void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array, struct ath9k_channel *chan, int ht40_delta); diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index c9f93310c0d6..76842e6ca38e 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -651,6 +651,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd, unsigned int plen, void *payload, unsigned int outlen, void *out) { int err = -ENOMEM; + unsigned long time_left; if (!IS_ACCEPTING_CMD(ar)) return -EIO; @@ -672,8 +673,8 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd, err = __carl9170_exec_cmd(ar, &ar->cmd, false); if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) { - err = wait_for_completion_timeout(&ar->cmd_wait, HZ); - if (err == 0) { + time_left = wait_for_completion_timeout(&ar->cmd_wait, HZ); + if (time_left == 0) { err = -ETIMEDOUT; goto err_unbuf; } diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index caa717bf52f3..050506f842e9 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -12,6 +12,7 @@ wil6210-y += debug.o wil6210-y += rx_reorder.o wil6210-y += ioctl.o wil6210-y += fw.o +wil6210-y += pmc.o wil6210-$(CONFIG_WIL6210_TRACING) += trace.o wil6210-y += wil_platform.o wil6210-y += ethtool.o diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index b97172667bc7..dbfcdd16628a 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -402,11 +402,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, rsn_eid = sme->ie ? cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) : NULL; - - if (sme->privacy && !rsn_eid) { - wil_err(wil, "Missing RSN IE for secure connection\n"); - return -EINVAL; - } + if (sme->privacy && !rsn_eid) + wil_info(wil, "WSC connection\n"); bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, @@ -425,10 +422,17 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, wil->privacy = sme->privacy; if (wil->privacy) { - /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */ - rc = wmi_del_cipher_key(wil, 0, bss->bssid); + /* For secure assoc, remove old keys */ + rc = wmi_del_cipher_key(wil, 0, bss->bssid, + WMI_KEY_USE_PAIRWISE); if (rc) { - wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n"); + wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n"); + goto out; + } + rc = wmi_del_cipher_key(wil, 0, bss->bssid, + WMI_KEY_USE_RX_GROUP); + if (rc) { + wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n"); goto out; } } @@ -458,11 +462,18 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, goto out; } if (wil->privacy) { - conn.dot11_auth_mode = WMI_AUTH11_SHARED; - conn.auth_mode = WMI_AUTH_WPA2_PSK; - conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP; - conn.pairwise_crypto_len = 16; - } else { + if (rsn_eid) { /* regular secure connection */ + conn.dot11_auth_mode = WMI_AUTH11_SHARED; + conn.auth_mode = WMI_AUTH_WPA2_PSK; + conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP; + conn.pairwise_crypto_len = 16; + conn.group_crypto_type = WMI_CRYPT_AES_GCMP; + conn.group_crypto_len = 16; + } else { /* WSC */ + conn.dot11_auth_mode = WMI_AUTH11_WSC; + conn.auth_mode = WMI_AUTH_NONE; + } + } else { /* insecure connection */ conn.dot11_auth_mode = WMI_AUTH11_OPEN; conn.auth_mode = WMI_AUTH_NONE; } @@ -507,6 +518,8 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); + wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code); + rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); return rc; @@ -561,6 +574,39 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy, return 0; } +static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, + bool pairwise) +{ + struct wireless_dev *wdev = wil->wdev; + enum wmi_key_usage rc; + static const char * const key_usage_str[] = { + [WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE", + [WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP", + [WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP", + }; + + if (pairwise) { + rc = WMI_KEY_USE_PAIRWISE; + } else { + switch (wdev->iftype) { + case NL80211_IFTYPE_STATION: + rc = WMI_KEY_USE_RX_GROUP; + break; + case NL80211_IFTYPE_AP: + rc = WMI_KEY_USE_TX_GROUP; + break; + default: + /* TODO: Rx GTK or Tx GTK? */ + wil_err(wil, "Can't determine GTK type\n"); + rc = WMI_KEY_USE_RX_GROUP; + break; + } + } + wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]); + + return rc; +} + static int wil_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, @@ -568,13 +614,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, struct key_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); - /* group key is not used */ - if (!pairwise) - return 0; + wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, + pairwise ? "PTK" : "GTK"); - return wmi_add_cipher_key(wil, key_index, mac_addr, - params->key_len, params->key); + return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len, + params->key, key_usage); } static int wil_cfg80211_del_key(struct wiphy *wiphy, @@ -583,12 +629,12 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy, const u8 *mac_addr) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); - /* group key is not used */ - if (!pairwise) - return 0; + wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, + pairwise ? "PTK" : "GTK"); - return wmi_del_cipher_key(wil, key_index, mac_addr); + return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage); } /* Need to be present or wiphy_new() will WARN */ @@ -661,11 +707,6 @@ static int wil_fix_bcon(struct wil6210_priv *wil, if (bcon->probe_resp_len <= hlen) return 0; - if (!bcon->proberesp_ies) { - bcon->proberesp_ies = f->u.probe_resp.variable; - bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; - rc = 1; - } if (!bcon->assocresp_ies) { bcon->assocresp_ies = f->u.probe_resp.variable; bcon->assocresp_ies_len = bcon->probe_resp_len - hlen; @@ -680,9 +721,19 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct cfg80211_beacon_data *bcon) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; + size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + const u8 *pr_ies = NULL; + size_t pr_ies_len = 0; int rc; wil_dbg_misc(wil, "%s()\n", __func__); + wil_print_bcon_data(bcon); + + if (bcon->probe_resp_len > hlen) { + pr_ies = f->u.probe_resp.variable; + pr_ies_len = bcon->probe_resp_len - hlen; + } if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); @@ -695,9 +746,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, * bcon->beacon_ies); */ - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, - bcon->proberesp_ies_len, - bcon->proberesp_ies); + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies); if (rc) { wil_err(wil, "set_ie(PROBE_RESP) failed\n"); return rc; @@ -725,6 +774,10 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct cfg80211_beacon_data *bcon = &info->beacon; struct cfg80211_crypto_settings *crypto = &info->crypto; u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); + struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; + size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + const u8 *pr_ies = NULL; + size_t pr_ies_len = 0; wil_dbg_misc(wil, "%s()\n", __func__); @@ -744,6 +797,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, wil_print_bcon_data(bcon); wil_print_crypto(wil, crypto); + if (bcon->probe_resp_len > hlen) { + pr_ies = f->u.probe_resp.variable; + pr_ies_len = bcon->probe_resp_len - hlen; + } + if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); wil_print_bcon_data(bcon); @@ -771,8 +829,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, * bcon->beacon_ies); */ - wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, - bcon->proberesp_ies); + wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies); wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, bcon->assocresp_ies); @@ -814,13 +871,9 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, wmi_pcp_stop(wil); __wil_down(wil); - __wil_up(wil); mutex_unlock(&wil->mutex); - /* some functions above might fail (e.g. __wil_up). Nevertheless, we - * return success because AP has stopped - */ return 0; } @@ -830,6 +883,9 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac, + params->reason_code); + mutex_lock(&wil->mutex); wil6210_disconnect(wil, params->mac, params->reason_code, false); mutex_unlock(&wil->mutex); diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index bbc22d88f78f..8f9c0722a801 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -24,6 +24,7 @@ #include "wil6210.h" #include "wmi.h" #include "txrx.h" +#include "pmc.h" /* Nasty hack. Better have per device instances */ static u32 mem_addr; @@ -123,15 +124,17 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data) if (cid < WIL6210_MAX_CID) seq_printf(s, - "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n", + "\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n", wil->sta[cid].addr, cid, tid, + txdata->dot1x_open ? "+" : "-", txdata->agg_wsize, txdata->agg_timeout, txdata->agg_amsdu ? "+" : "-", used, avail, sidle); else seq_printf(s, - "\nBroadcast [%3d|%3d] idle %s\n", + "\nBroadcast 1x%s [%3d|%3d] idle %s\n", + txdata->dot1x_open ? "+" : "-", used, avail, sidle); wil_print_vring(s, wil, name, vring, '_', 'H'); @@ -702,6 +705,89 @@ static const struct file_operations fops_back = { .open = simple_open, }; +/* pmc control, write: + * - "alloc " to allocate PMC + * - "free" to release memory allocated for PMC + */ +static ssize_t wil_write_pmccfg(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct wil6210_priv *wil = file->private_data; + int rc; + char *kbuf = kmalloc(len + 1, GFP_KERNEL); + char cmd[9]; + int num_descs, desc_size; + + if (!kbuf) + return -ENOMEM; + + rc = simple_write_to_buffer(kbuf, len, ppos, buf, len); + if (rc != len) { + kfree(kbuf); + return rc >= 0 ? -EIO : rc; + } + + kbuf[len] = '\0'; + rc = sscanf(kbuf, "%8s %d %d", cmd, &num_descs, &desc_size); + kfree(kbuf); + + if (rc < 0) + return rc; + + if (rc < 1) { + wil_err(wil, "pmccfg: no params given\n"); + return -EINVAL; + } + + if (0 == strcmp(cmd, "alloc")) { + if (rc != 3) { + wil_err(wil, "pmccfg: alloc requires 2 params\n"); + return -EINVAL; + } + wil_pmc_alloc(wil, num_descs, desc_size); + } else if (0 == strcmp(cmd, "free")) { + if (rc != 1) { + wil_err(wil, "pmccfg: free does not have any params\n"); + return -EINVAL; + } + wil_pmc_free(wil, true); + } else { + wil_err(wil, "pmccfg: Unrecognized command \"%s\"\n", cmd); + return -EINVAL; + } + + return len; +} + +static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wil6210_priv *wil = file->private_data; + char text[256]; + char help[] = "pmc control, write:\n" + " - \"alloc \" to allocate pmc\n" + " - \"free\" to free memory allocated for pmc\n"; + + sprintf(text, "Last command status: %d\n\n%s", + wil_pmc_last_cmd_status(wil), + help); + + return simple_read_from_buffer(user_buf, count, ppos, text, + strlen(text) + 1); +} + +static const struct file_operations fops_pmccfg = { + .read = wil_read_pmccfg, + .write = wil_write_pmccfg, + .open = simple_open, +}; + +static const struct file_operations fops_pmcdata = { + .open = simple_open, + .read = wil_pmc_read, + .llseek = wil_pmc_llseek, +}; + /*---tx_mgmt---*/ /* Write mgmt frame to this file to send it */ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, @@ -1111,8 +1197,7 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data) status = "connected"; break; } - seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status, - (p->data_port_open ? " data_port_open" : "")); + seq_printf(s, "[%d] %pM %s\n", i, p->addr, status); if (p->status == wil_sta_connected) { rc = wil_cid_fill_sinfo(wil, i, &sinfo); @@ -1292,8 +1377,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) status = "connected"; break; } - seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status, - (p->data_port_open ? " data_port_open" : "")); + seq_printf(s, "[%d] %pM %s\n", i, p->addr, status); if (p->status == wil_sta_connected) { spin_lock_bh(&p->tid_rx_lock); @@ -1363,6 +1447,8 @@ static const struct { {"tx_mgmt", S_IWUSR, &fops_txmgmt}, {"wmi_send", S_IWUSR, &fops_wmi}, {"back", S_IRUGO | S_IWUSR, &fops_back}, + {"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg}, + {"pmcdata", S_IRUGO, &fops_pmcdata}, {"temp", S_IRUGO, &fops_temp}, {"freq", S_IRUGO, &fops_freq}, {"link", S_IRUGO, &fops_link}, @@ -1440,6 +1526,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil) if (IS_ERR_OR_NULL(dbg)) return -ENODEV; + wil_pmc_init(wil); + wil6210_debugfs_init_files(wil, dbg); wil6210_debugfs_init_isr(wil, dbg); wil6210_debugfs_init_blobs(wil, dbg); @@ -1459,4 +1547,9 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil) { debugfs_remove_recursive(wil->debug); wil->debug = NULL; + + /* free pmc memory without sending command to fw, as it will + * be reset on the way down anyway + */ + wil_pmc_free(wil, false); } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index c2a238426425..6d704aee3afd 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -25,6 +25,10 @@ #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000 #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10 +bool debug_fw; /* = false; */ +module_param(debug_fw, bool, S_IRUGO); +MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug"); + bool no_fw_recovery; module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery"); @@ -146,7 +150,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid, sta->status); - sta->data_port_open = false; if (sta->status != wil_sta_unused) { if (!from_event) wmi_disconnect_sta(wil, sta->addr, reason_code); @@ -224,7 +227,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, if (test_bit(wil_status_fwconnected, wil->status)) { clear_bit(wil_status_fwconnected, wil->status); cfg80211_disconnected(ndev, reason_code, - NULL, 0, GFP_KERNEL); + NULL, 0, false, GFP_KERNEL); } else if (test_bit(wil_status_fwconnecting, wil->status)) { cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, @@ -373,9 +376,10 @@ int wil_bcast_init(struct wil6210_priv *wil) if (ri < 0) return ri; + wil->bcast_vring = ri; rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order); - if (rc == 0) - wil->bcast_vring = ri; + if (rc) + wil->bcast_vring = -1; return rc; } @@ -547,7 +551,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil) static int wil_target_reset(struct wil6210_priv *wil) { int delay = 0; - u32 x; + u32 x, x1 = 0; wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name); @@ -602,12 +606,16 @@ static int wil_target_reset(struct wil6210_priv *wil) do { msleep(RST_DELAY); x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready)); + if (x1 != x) { + wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x); + x1 = x; + } if (delay++ > RST_COUNT) { wil_err(wil, "Reset not completed, bl.ready 0x%08x\n", x); return -ETIME; } - } while (!(x & BIT_BL_READY)); + } while (x != BIT_BL_READY); C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); @@ -686,6 +694,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) WARN_ON(!mutex_is_locked(&wil->mutex)); WARN_ON(test_bit(wil_status_napi_en, wil->status)); + if (debug_fw) { + static const u8 mac[ETH_ALEN] = { + 0x00, 0xde, 0xad, 0x12, 0x34, 0x56, + }; + struct net_device *ndev = wil_to_ndev(wil); + + ether_addr_copy(ndev->perm_addr, mac); + ether_addr_copy(ndev->dev_addr, ndev->perm_addr); + return 0; + } + cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); wil_bcast_fini(wil); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index f2f7ea29558e..6042f61b016c 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -24,6 +24,11 @@ static int wil_open(struct net_device *ndev) wil_dbg_misc(wil, "%s()\n", __func__); + if (debug_fw) { + wil_err(wil, "%s() while in debug_fw mode\n", __func__); + return -EINVAL; + } + return wil_up(wil); } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 109986114abf..58c79166a6d1 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -27,10 +27,6 @@ MODULE_PARM_DESC(use_msi, " Use MSI interrupt: " "0 - don't, 1 - (default) - single, or 3"); -static bool debug_fw; /* = false; */ -module_param(debug_fw, bool, S_IRUGO); -MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug"); - static void wil_set_capabilities(struct wil6210_priv *wil) { @@ -133,8 +129,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) mutex_lock(&wil->mutex); rc = wil_reset(wil, false); mutex_unlock(&wil->mutex); - if (debug_fw) - rc = 0; if (rc) goto release_irq; diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c new file mode 100644 index 000000000000..8a8cdc61b25b --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "wmi.h" +#include "wil6210.h" +#include "txrx.h" +#include "pmc.h" + +struct desc_alloc_info { + dma_addr_t pa; + void *va; +}; + +static int wil_is_pmc_allocated(struct pmc_ctx *pmc) +{ + return !!pmc->pring_va; +} + +void wil_pmc_init(struct wil6210_priv *wil) +{ + memset(&wil->pmc, 0, sizeof(struct pmc_ctx)); + mutex_init(&wil->pmc.lock); +} + +/** + * Allocate the physical ring (p-ring) and the required + * number of descriptors of required size. + * Initialize the descriptors as required by pmc dma. + * The descriptors' buffers dwords are initialized to hold + * dword's serial number in the lsw and reserved value + * PCM_DATA_INVALID_DW_VAL in the msw. + */ +void wil_pmc_alloc(struct wil6210_priv *wil, + int num_descriptors, + int descriptor_size) +{ + u32 i; + struct pmc_ctx *pmc = &wil->pmc; + struct device *dev = wil_to_dev(wil); + struct wmi_pmc_cmd pmc_cmd = {0}; + + mutex_lock(&pmc->lock); + + if (wil_is_pmc_allocated(pmc)) { + /* sanity check */ + wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__); + goto no_release_err; + } + + pmc->num_descriptors = num_descriptors; + pmc->descriptor_size = descriptor_size; + + wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n", + __func__, num_descriptors, descriptor_size); + + /* allocate descriptors info list in pmc context*/ + pmc->descriptors = kcalloc(num_descriptors, + sizeof(struct desc_alloc_info), + GFP_KERNEL); + if (!pmc->descriptors) { + wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__); + goto no_release_err; + } + + wil_dbg_misc(wil, + "%s: allocated descriptors info list %p\n", + __func__, pmc->descriptors); + + /* Allocate pring buffer and descriptors. + * vring->va should be aligned on its size rounded up to power of 2 + * This is granted by the dma_alloc_coherent + */ + pmc->pring_va = dma_alloc_coherent(dev, + sizeof(struct vring_tx_desc) * num_descriptors, + &pmc->pring_pa, + GFP_KERNEL); + + wil_dbg_misc(wil, + "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", + __func__, + pmc->pring_va, &pmc->pring_pa, + sizeof(struct vring_tx_desc), + num_descriptors, + sizeof(struct vring_tx_desc) * num_descriptors); + + if (!pmc->pring_va) { + wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__); + goto release_pmc_skb_list; + } + + /* initially, all descriptors are SW owned + * For Tx, Rx, and PMC, ownership bit is at the same location, thus + * we can use any + */ + for (i = 0; i < num_descriptors; i++) { + struct vring_tx_desc *_d = &pmc->pring_va[i]; + struct vring_tx_desc dd, *d = ⅆ + int j = 0; + + pmc->descriptors[i].va = dma_alloc_coherent(dev, + descriptor_size, + &pmc->descriptors[i].pa, + GFP_KERNEL); + + if (unlikely(!pmc->descriptors[i].va)) { + wil_err(wil, + "%s: ERROR allocating pmc descriptor %d", + __func__, i); + goto release_pmc_skbs; + } + + for (j = 0; j < descriptor_size / sizeof(u32); j++) { + u32 *p = (u32 *)pmc->descriptors[i].va + j; + *p = PCM_DATA_INVALID_DW_VAL | j; + } + + /* configure dma descriptor */ + d->dma.addr.addr_low = + cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa)); + d->dma.addr.addr_high = + cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa)); + d->dma.status = 0; /* 0 = HW_OWNED */ + d->dma.length = cpu_to_le16(descriptor_size); + d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; + *_d = *d; + } + + wil_dbg_misc(wil, "%s: allocated successfully\n", __func__); + + pmc_cmd.op = WMI_PMC_ALLOCATE; + pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); + pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); + + wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__); + pmc->last_cmd_status = wmi_send(wil, + WMI_PMC_CMDID, + &pmc_cmd, + sizeof(pmc_cmd)); + if (pmc->last_cmd_status) { + wil_err(wil, + "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d", + __func__, pmc->last_cmd_status); + goto release_pmc_skbs; + } + + mutex_unlock(&pmc->lock); + + return; + +release_pmc_skbs: + wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__); + for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) { + dma_free_coherent(dev, + descriptor_size, + pmc->descriptors[i].va, + pmc->descriptors[i].pa); + + pmc->descriptors[i].va = NULL; + } + wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__); + + dma_free_coherent(dev, + sizeof(struct vring_tx_desc) * num_descriptors, + pmc->pring_va, + pmc->pring_pa); + + pmc->pring_va = NULL; + +release_pmc_skb_list: + wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n", + __func__); + kfree(pmc->descriptors); + pmc->descriptors = NULL; + +no_release_err: + pmc->last_cmd_status = -ENOMEM; + mutex_unlock(&pmc->lock); +} + +/** + * Traverse the p-ring and release all buffers. + * At the end release the p-ring memory + */ +void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) +{ + struct pmc_ctx *pmc = &wil->pmc; + struct device *dev = wil_to_dev(wil); + struct wmi_pmc_cmd pmc_cmd = {0}; + + mutex_lock(&pmc->lock); + + pmc->last_cmd_status = 0; + + if (!wil_is_pmc_allocated(pmc)) { + wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n", + __func__); + pmc->last_cmd_status = -EPERM; + mutex_unlock(&pmc->lock); + return; + } + + if (send_pmc_cmd) { + wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n", + __func__); + pmc_cmd.op = WMI_PMC_RELEASE; + pmc->last_cmd_status = + wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, + sizeof(pmc_cmd)); + if (pmc->last_cmd_status) { + wil_err(wil, + "%s WMI_PMC_CMD with RELEASE op failed, status %d", + __func__, pmc->last_cmd_status); + /* There's nothing we can do with this error. + * Normally, it should never occur. + * Continue to freeing all memory allocated for pmc. + */ + } + } + + if (pmc->pring_va) { + size_t buf_size = sizeof(struct vring_tx_desc) * + pmc->num_descriptors; + + wil_dbg_misc(wil, "%s: free pring va %p\n", + __func__, pmc->pring_va); + dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); + + pmc->pring_va = NULL; + } else { + pmc->last_cmd_status = -ENOENT; + } + + if (pmc->descriptors) { + int i; + + for (i = 0; + pmc->descriptors[i].va && i < pmc->num_descriptors; i++) { + dma_free_coherent(dev, + pmc->descriptor_size, + pmc->descriptors[i].va, + pmc->descriptors[i].pa); + pmc->descriptors[i].va = NULL; + } + wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n", + __func__, i, pmc->num_descriptors); + wil_dbg_misc(wil, + "%s: free pmc descriptors info list %p\n", + __func__, pmc->descriptors); + kfree(pmc->descriptors); + pmc->descriptors = NULL; + } else { + pmc->last_cmd_status = -ENOENT; + } + + mutex_unlock(&pmc->lock); +} + +/** + * Status of the last operation requested via debugfs: alloc/free/read. + * 0 - success or negative errno + */ +int wil_pmc_last_cmd_status(struct wil6210_priv *wil) +{ + wil_dbg_misc(wil, "%s: status %d\n", __func__, + wil->pmc.last_cmd_status); + + return wil->pmc.last_cmd_status; +} + +/** + * Read from required position up to the end of current descriptor, + * depends on descriptor size configured during alloc request. + */ +ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + struct wil6210_priv *wil = filp->private_data; + struct pmc_ctx *pmc = &wil->pmc; + size_t retval = 0; + unsigned long long idx; + loff_t offset; + size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors; + + mutex_lock(&pmc->lock); + + if (!wil_is_pmc_allocated(pmc)) { + wil_err(wil, "%s: error, pmc is not allocated!\n", __func__); + pmc->last_cmd_status = -EPERM; + mutex_unlock(&pmc->lock); + return -EPERM; + } + + wil_dbg_misc(wil, + "%s: size %u, pos %lld\n", + __func__, (unsigned)count, *f_pos); + + pmc->last_cmd_status = 0; + + idx = *f_pos; + do_div(idx, pmc->descriptor_size); + offset = *f_pos - (idx * pmc->descriptor_size); + + if (*f_pos >= pmc_size) { + wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n", + __func__, *f_pos, (unsigned)pmc_size); + pmc->last_cmd_status = -ERANGE; + goto out; + } + + wil_dbg_misc(wil, + "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", + __func__, *f_pos, idx, offset, count); + + /* if no errors, return the copied byte count */ + retval = simple_read_from_buffer(buf, + count, + &offset, + pmc->descriptors[idx].va, + pmc->descriptor_size); + *f_pos += retval; +out: + mutex_unlock(&pmc->lock); + + return retval; +} + +loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence) +{ + loff_t newpos; + struct wil6210_priv *wil = filp->private_data; + struct pmc_ctx *pmc = &wil->pmc; + size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors; + + switch (whence) { + case 0: /* SEEK_SET */ + newpos = off; + break; + + case 1: /* SEEK_CUR */ + newpos = filp->f_pos + off; + break; + + case 2: /* SEEK_END */ + newpos = pmc_size; + break; + + default: /* can't happen */ + return -EINVAL; + } + + if (newpos < 0) + return -EINVAL; + if (newpos > pmc_size) + newpos = pmc_size; + + filp->f_pos = newpos; + + return newpos; +} diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h new file mode 100644 index 000000000000..bebc8d52e1e6 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/pmc.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#define PCM_DATA_INVALID_DW_VAL (0xB0BA0000) + +void wil_pmc_init(struct wil6210_priv *wil); +void wil_pmc_alloc(struct wil6210_priv *wil, + int num_descriptors, int descriptor_size); +void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd); +int wil_pmc_last_cmd_status(struct wil6210_priv *wil); +ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *); +loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index e8bd512d81a9..0113dac3a9a9 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -236,7 +236,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, return -ENOMEM; } - d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; + d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT; wil_desc_addr_set(&d->dma.addr, pa); /* ip_length don't care */ /* b11 don't care */ @@ -724,6 +724,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); + if (!wil->privacy) + txdata->dot1x_open = true; rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd), WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -738,11 +740,13 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); txdata->enabled = 1; - if (wil->sta[cid].data_port_open && (agg_wsize >= 0)) + if (txdata->dot1x_open && (agg_wsize >= 0)) wil_addba_tx_request(wil, id, agg_wsize); return 0; out_free: + txdata->dot1x_open = false; + txdata->enabled = 0; wil_vring_free(wil, vring, 1); out: @@ -792,6 +796,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); + if (!wil->privacy) + txdata->dot1x_open = true; rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd), WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -809,6 +815,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) return 0; out_free: + txdata->enabled = 0; + txdata->dot1x_open = false; wil_vring_free(wil, vring, 1); out: @@ -828,6 +836,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) wil_dbg_misc(wil, "%s() id=%d\n", __func__, id); spin_lock_bh(&txdata->lock); + txdata->dot1x_open = false; txdata->enabled = 0; /* no Tx can be in progress or start anew */ spin_unlock_bh(&txdata->lock); /* make sure NAPI won't touch this vring */ @@ -848,12 +857,11 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, if (cid < 0) return NULL; - if (!wil->sta[cid].data_port_open && - (skb->protocol != cpu_to_be16(ETH_P_PAE))) - return NULL; - /* TODO: fix for multiple TID */ for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { + if (!wil->vring_tx_data[i].dot1x_open && + (skb->protocol != cpu_to_be16(ETH_P_PAE))) + continue; if (wil->vring2cid_tid[i][0] == cid) { struct vring *v = &wil->vring_tx[i]; @@ -883,7 +891,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, /* In the STA mode, it is expected to have only 1 VRING * for the AP we connected to. - * find 1-st vring and see whether it is eligible for data + * find 1-st vring eligible for this skb and use it. */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { v = &wil->vring_tx[i]; @@ -894,9 +902,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, if (cid >= WIL6210_MAX_CID) /* skip BCAST */ continue; - if (!wil->sta[cid].data_port_open && + if (!wil->vring_tx_data[i].dot1x_open && (skb->protocol != cpu_to_be16(ETH_P_PAE))) - break; + continue; wil_dbg_txrx(wil, "Tx -> ring %d\n", i); @@ -918,7 +926,6 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, * in all cases override dest address to unicast peer's address * Use old strategy when new is not supported yet: * - for PBSS - * - for secure link */ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, struct sk_buff *skb) @@ -931,6 +938,9 @@ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, v = &wil->vring_tx[i]; if (!v->va) return NULL; + if (!wil->vring_tx_data[i].dot1x_open && + (skb->protocol != cpu_to_be16(ETH_P_PAE))) + return NULL; return v; } @@ -963,7 +973,8 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, cid = wil->vring2cid_tid[i][0]; if (cid >= WIL6210_MAX_CID) /* skip BCAST */ continue; - if (!wil->sta[cid].data_port_open) + if (!wil->vring_tx_data[i].dot1x_open && + (skb->protocol != cpu_to_be16(ETH_P_PAE))) continue; /* don't Tx back to source when re-routing Rx->Tx at the AP */ @@ -989,7 +1000,8 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, cid = wil->vring2cid_tid[i][0]; if (cid >= WIL6210_MAX_CID) /* skip BCAST */ continue; - if (!wil->sta[cid].data_port_open) + if (!wil->vring_tx_data[i].dot1x_open && + (skb->protocol != cpu_to_be16(ETH_P_PAE))) continue; if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) @@ -1016,9 +1028,6 @@ static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil, if (wdev->iftype != NL80211_IFTYPE_AP) return wil_find_tx_bcast_2(wil, skb); - if (wil->privacy) - return wil_find_tx_bcast_2(wil, skb); - return wil_find_tx_bcast_1(wil, skb); } @@ -1144,13 +1153,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, wil_tx_desc_map(d, pa, len, vring_index); if (unlikely(mcast)) { d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */ - if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) { - /* set MCS 1 */ + if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */ d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS); - /* packet mode 2 */ - d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) | - (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS); - } } /* Process TCP/UDP checksum offloading */ if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) { diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index d90c8aa20c15..0c4638487c74 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -384,19 +384,27 @@ struct vring_rx_mac { * [word 7] length */ -#define RX_DMA_D0_CMD_DMA_IT BIT(10) - -/* Error field, offload bits */ -#define RX_DMA_ERROR_L3_ERR BIT(4) -#define RX_DMA_ERROR_L4_ERR BIT(5) +#define RX_DMA_D0_CMD_DMA_EOP BIT(8) +#define RX_DMA_D0_CMD_DMA_RT BIT(9) /* always 1 */ +#define RX_DMA_D0_CMD_DMA_IT BIT(10) /* interrupt */ + +/* Error field */ +#define RX_DMA_ERROR_FCS BIT(0) +#define RX_DMA_ERROR_MIC BIT(1) +#define RX_DMA_ERROR_KEY BIT(2) /* Key missing */ +#define RX_DMA_ERROR_REPLAY BIT(3) +#define RX_DMA_ERROR_L3_ERR BIT(4) +#define RX_DMA_ERROR_L4_ERR BIT(5) /* Status field */ -#define RX_DMA_STATUS_DU BIT(0) -#define RX_DMA_STATUS_ERROR BIT(2) - +#define RX_DMA_STATUS_DU BIT(0) +#define RX_DMA_STATUS_EOP BIT(1) +#define RX_DMA_STATUS_ERROR BIT(2) +#define RX_DMA_STATUS_MI BIT(3) /* MAC Interrupt is asserted */ #define RX_DMA_STATUS_L3I BIT(4) #define RX_DMA_STATUS_L4I BIT(5) #define RX_DMA_STATUS_PHY_INFO BIT(6) +#define RX_DMA_STATUS_FFM BIT(7) /* EtherType Flex Filter Match */ struct vring_rx_dma { u32 d0; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 4310972c9e16..f3513a1fa424 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -21,6 +21,7 @@ #include #include #include +#include #include "wil_platform.h" extern bool no_fw_recovery; @@ -29,10 +30,11 @@ extern unsigned short rx_ring_overflow_thrsh; extern int agg_wsize; extern u32 vring_idle_trsh; extern bool rx_align_2; +extern bool debug_fw; #define WIL_NAME "wil6210" #define WIL_FW_NAME "wil6210.fw" /* code */ -#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */ +#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */ #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ @@ -396,6 +398,7 @@ struct vring { * Additional data for Tx Vring */ struct vring_tx_data { + bool dot1x_open; int enabled; cycles_t idle, last_idle, begin; u8 agg_wsize; /* agreed aggregation window, 0 - no agg */ @@ -484,7 +487,6 @@ struct wil_sta_info { u8 addr[ETH_ALEN]; enum wil_sta_status status; struct wil_net_stats stats; - bool data_port_open; /* can send any data, not only EAPOL */ /* Rx BACK */ struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM]; spinlock_t tid_rx_lock; /* guarding tid_rx array */ @@ -526,6 +528,17 @@ struct wil_probe_client_req { u8 cid; }; +struct pmc_ctx { + /* alloc, free, and read operations must own the lock */ + struct mutex lock; + struct vring_tx_desc *pring_va; + dma_addr_t pring_pa; + struct desc_alloc_info *descriptors; + int last_cmd_status; + int num_descriptors; + int descriptor_size; +}; + struct wil6210_priv { struct pci_dev *pdev; int n_msi; @@ -610,6 +623,8 @@ struct wil6210_priv { void *platform_handle; struct wil_platform_ops platform_ops; + + struct pmc_ctx pmc; }; #define wil_to_wiphy(i) (i->wdev->wiphy) @@ -701,9 +716,10 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid); int wmi_set_channel(struct wil6210_priv *wil, int channel); int wmi_get_channel(struct wil6210_priv *wil, int *channel); int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, - const void *mac_addr); + const void *mac_addr, int key_usage); int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, - const void *mac_addr, int key_len, const void *key); + const void *mac_addr, int key_len, const void *key, + int key_usage); int wmi_echo(struct wil6210_priv *wil); int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 9fe2085be2c5..3dc8daf69bd2 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -543,55 +543,22 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id, } } -static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize) +static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) { - struct vring_tx_data *t; - int i; + struct wmi_vring_en_event *evt = d; + u8 vri = evt->vring_index; - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { - if (cid != wil->vring2cid_tid[i][0]) - continue; - t = &wil->vring_tx_data[i]; - if (!t->enabled) - continue; + wil_dbg_wmi(wil, "Enable vring %d\n", vri); - wil_addba_tx_request(wil, i, wsize); - } -} - -static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len) -{ - struct wmi_data_port_open_event *evt = d; - u8 cid = evt->cid; - - wil_dbg_wmi(wil, "Link UP for CID %d\n", cid); - - if (cid >= ARRAY_SIZE(wil->sta)) { - wil_err(wil, "Link UP for invalid CID %d\n", cid); + if (vri >= ARRAY_SIZE(wil->vring_tx)) { + wil_err(wil, "Enable for invalid vring %d\n", vri); return; } - - wil->sta[cid].data_port_open = true; - if (agg_wsize >= 0) - wil_addba_tx_cid(wil, cid, agg_wsize); -} - -static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len) -{ - struct net_device *ndev = wil_to_ndev(wil); - struct wmi_wbe_link_down_event *evt = d; - u8 cid = evt->cid; - - wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n", - cid, le32_to_cpu(evt->reason)); - - if (cid >= ARRAY_SIZE(wil->sta)) { - wil_err(wil, "Link DOWN for invalid CID %d\n", cid); + wil->vring_tx_data[vri].dot1x_open = true; + if (vri == wil->bcast_vring) /* no BA for bcast */ return; - } - - wil->sta[cid].data_port_open = false; - netif_carrier_off(ndev); + if (agg_wsize >= 0) + wil_addba_tx_request(wil, vri, agg_wsize); } static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d, @@ -695,11 +662,10 @@ static const struct { {WMI_CONNECT_EVENTID, wmi_evt_connect}, {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx}, - {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup}, - {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown}, {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status}, {WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req}, {WMI_DELBA_EVENTID, wmi_evt_delba}, + {WMI_VRING_EN_EVENTID, wmi_evt_vring_en}, }; /* @@ -844,7 +810,7 @@ int wmi_echo(struct wil6210_priv *wil) }; return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd), - WMI_ECHO_RSP_EVENTID, NULL, 0, 20); + WMI_ECHO_RSP_EVENTID, NULL, 0, 50); } int wmi_set_mac_address(struct wil6210_priv *wil, void *addr) @@ -985,7 +951,7 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel) } int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, - const void *mac_addr) + const void *mac_addr, int key_usage) { struct wmi_delete_cipher_key_cmd cmd = { .key_index = key_index, @@ -998,11 +964,12 @@ int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, } int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, - const void *mac_addr, int key_len, const void *key) + const void *mac_addr, int key_len, const void *key, + int key_usage) { struct wmi_add_cipher_key_cmd cmd = { .key_index = key_index, - .key_usage = WMI_KEY_USE_PAIRWISE, + .key_usage = key_usage, .key_len = key_len, }; @@ -1238,7 +1205,8 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-"); rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd), - WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100); + WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), + 100); if (rc) return rc; diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index b29055315350..cc04ab73b398 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * Copyright (c) 2006-2012 Wilocity . * * Permission to use, copy, modify, and/or distribute this software for any @@ -253,8 +253,8 @@ struct wmi_set_passphrase_cmd { */ enum wmi_key_usage { WMI_KEY_USE_PAIRWISE = 0, - WMI_KEY_USE_GROUP = 1, - WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */ + WMI_KEY_USE_RX_GROUP = 1, + WMI_KEY_USE_TX_GROUP = 2, }; struct wmi_add_cipher_key_cmd { @@ -835,6 +835,21 @@ struct wmi_temp_sense_cmd { __le32 measure_mode; } __packed; +/* + * WMI_PMC_CMDID + */ +enum wmi_pmc_op_e { + WMI_PMC_ALLOCATE = 0, + WMI_PMC_RELEASE = 1, +}; + +struct wmi_pmc_cmd { + u8 op; /* enum wmi_pmc_cmd_op_type */ + u8 reserved; + __le16 ring_size; + __le64 mem_base; +} __packed; + /* * WMI Events */ @@ -870,7 +885,7 @@ enum wmi_event_id { WMI_VRING_CFG_DONE_EVENTID = 0x1821, WMI_BA_STATUS_EVENTID = 0x1823, WMI_RCP_ADDBA_REQ_EVENTID = 0x1824, - WMI_ADDBA_RESP_SENT_EVENTID = 0x1825, + WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825, WMI_DELBA_EVENTID = 0x1826, WMI_GET_SSID_EVENTID = 0x1828, WMI_GET_PCP_CHANNEL_EVENTID = 0x182a, @@ -882,7 +897,7 @@ enum wmi_event_id { WMI_WRITE_MAC_TXQ_EVENTID = 0x1833, WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834, - WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836, + WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836, WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837, WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839, WMI_RS_MGMT_DONE_EVENTID = 0x1852, @@ -894,11 +909,12 @@ enum wmi_event_id { /* Performance monitoring events */ WMI_DATA_PORT_OPEN_EVENTID = 0x1860, - WMI_WBE_LINKDOWN_EVENTID = 0x1861, + WMI_WBE_LINK_DOWN_EVENTID = 0x1861, WMI_BF_CTRL_DONE_EVENTID = 0x1862, WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863, WMI_GET_STATUS_DONE_EVENTID = 0x1864, + WMI_VRING_EN_EVENTID = 0x1865, WMI_UNIT_TEST_EVENTID = 0x1900, WMI_FLASH_READ_DONE_EVENTID = 0x1902, @@ -1147,7 +1163,7 @@ struct wmi_vring_cfg_done_event { } __packed; /* - * WMI_ADDBA_RESP_SENT_EVENTID + * WMI_RCP_ADDBA_RESP_SENT_EVENTID */ struct wmi_rcp_addba_resp_sent_event { u8 cidxtid; @@ -1179,7 +1195,7 @@ struct wmi_cfg_rx_chain_done_event { } __packed; /* - * WMI_WBE_LINKDOWN_EVENTID + * WMI_WBE_LINK_DOWN_EVENTID */ enum wmi_wbe_link_down_event_reason { WMI_WBE_REASON_USER_REQUEST = 0, @@ -1201,6 +1217,14 @@ struct wmi_data_port_open_event { u8 reserved[3]; } __packed; +/* + * WMI_VRING_EN_EVENTID + */ +struct wmi_vring_en_event { + u8 vring_index; + u8 reserved[3]; +} __packed; + /* * WMI_GET_PCP_CHANNEL_EVENTID */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index 8a69544485a9..71779b9e4bbe 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -1116,12 +1117,25 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata; +static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev, + int val) +{ +#if IS_ENABLED(CONFIG_ACPI) + struct acpi_device *adev; + + adev = ACPI_COMPANION(dev); + if (adev) + adev->flags.power_manageable = 0; +#endif +} + static int brcmf_ops_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { int err; struct brcmf_sdio_dev *sdiodev; struct brcmf_bus *bus_if; + struct device *dev; brcmf_dbg(SDIO, "Enter\n"); brcmf_dbg(SDIO, "Class=%x\n", func->class); @@ -1129,6 +1143,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); brcmf_dbg(SDIO, "Function#: %d\n", func->num); + dev = &func->dev; + /* prohibit ACPI power management for this device */ + brcmf_sdiod_acpi_set_power_manageable(dev, 0); + /* Consume func num 1 but dont do anything with it. */ if (func->num == 1) return 0; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index 6fe2b7564cbf..e10fa67010c0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -1296,7 +1296,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) } clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state); cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0, - GFP_KERNEL); + true, GFP_KERNEL); } clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state); @@ -1962,7 +1962,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state); clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); - cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL); + cfg80211_disconnected(ndev, reason_code, NULL, 0, true, GFP_KERNEL); memcpy(&scbval.ea, &profile->bssid, ETH_ALEN); scbval.val = cpu_to_le32(reason_code); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c index 77656c711bed..26c65872dae3 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c @@ -22,17 +22,6 @@ #include "core.h" #include "commonring.h" - -/* dma flushing needs implementation for mips and arm platforms. Should - * be put in util. Note, this is not real flushing. It is virtual non - * cached memory. Only write buffers should have to be drained. Though - * this may be different depending on platform...... - * SEE ALSO msgbuf.c - */ -#define brcmf_dma_flush(addr, len) -#define brcmf_dma_invalidate_cache(addr, len) - - void brcmf_commonring_register_cb(struct brcmf_commonring *commonring, int (*cr_ring_bell)(void *ctx), int (*cr_update_rptr)(void *ctx), @@ -206,14 +195,9 @@ int brcmf_commonring_write_complete(struct brcmf_commonring *commonring) address = commonring->buf_addr; address += (commonring->f_ptr * commonring->item_len); if (commonring->f_ptr > commonring->w_ptr) { - brcmf_dma_flush(address, - (commonring->depth - commonring->f_ptr) * - commonring->item_len); address = commonring->buf_addr; commonring->f_ptr = 0; } - brcmf_dma_flush(address, (commonring->w_ptr - commonring->f_ptr) * - commonring->item_len); commonring->f_ptr = commonring->w_ptr; @@ -258,8 +242,6 @@ void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring, if (commonring->r_ptr == commonring->depth) commonring->r_ptr = 0; - brcmf_dma_invalidate_cache(ret_addr, *n_ items * commonring->item_len); - return ret_addr; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c index 8ff31ffa4a41..7ae6461df932 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c @@ -25,7 +25,7 @@ #define BRCMF_FW_MAX_NVRAM_SIZE 64000 #define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */ -#define BRCMF_FW_NVRAM_PCIEDEV_LEN 9 /* pcie/1/4/ */ +#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */ char brcmf_firmware_path[BRCMF_FW_PATH_LEN]; module_param_string(firmware_path, brcmf_firmware_path, @@ -66,6 +66,12 @@ struct nvram_parser { bool multi_dev_v2; }; +/** + * is_nvram_char() - check if char is a valid one for NVRAM entry + * + * It accepts all printable ASCII chars except for '#' which opens a comment. + * Please note that ' ' (space) while accepted is not a valid key name char. + */ static bool is_nvram_char(char c) { /* comment marker excluded */ @@ -73,7 +79,7 @@ static bool is_nvram_char(char c) return false; /* key and value may have any other readable character */ - return (c > 0x20 && c < 0x7f); + return (c >= 0x20 && c < 0x7f); } static bool is_whitespace(char c) @@ -120,7 +126,7 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp) nvp->multi_dev_v1 = true; if (strncmp(&nvp->fwnv->data[nvp->entry], "pcie/", 5) == 0) nvp->multi_dev_v2 = true; - } else if (!is_nvram_char(c)) { + } else if (!is_nvram_char(c) || c == ' ') { brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n", nvp->line, nvp->column); return COMMENT; @@ -162,17 +168,20 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp) static enum nvram_parser_state brcmf_nvram_handle_comment(struct nvram_parser *nvp) { - char *eol, *sol; + char *eoc, *sol; sol = (char *)&nvp->fwnv->data[nvp->pos]; - eol = strchr(sol, '\n'); - if (eol == NULL) - return END; + eoc = strchr(sol, '\n'); + if (!eoc) { + eoc = strchr(sol, '\0'); + if (!eoc) + return END; + } /* eat all moving to next line */ nvp->line++; nvp->column = 1; - nvp->pos += (eol - sol) + 1; + nvp->pos += (eoc - sol) + 1; return IDLE; } @@ -222,6 +231,10 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp, static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, u16 bus_nr) { + /* Device path with a leading '=' key-value separator */ + char pcie_path[] = "=pcie/?/?"; + size_t pcie_len; + u32 i, j; bool found; u8 *nvram; @@ -238,6 +251,9 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, /* First search for the devpathX and see if it is the configuration * for domain_nr/bus_nr. Search complete nvp */ + snprintf(pcie_path, sizeof(pcie_path), "=pcie/%d/%d", domain_nr, + bus_nr); + pcie_len = strlen(pcie_path); found = false; i = 0; while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) { @@ -245,13 +261,10 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, * Y = domain_nr, Z = bus_nr, X = virtual ID */ if ((strncmp(&nvp->nvram[i], "devpath", 7) == 0) && - (strncmp(&nvp->nvram[i + 8], "=pcie/", 6) == 0)) { - if (((nvp->nvram[i + 14] - '0') == domain_nr) && - ((nvp->nvram[i + 16] - '0') == bus_nr)) { - id = nvp->nvram[i + 7] - '0'; - found = true; - break; - } + (strncmp(&nvp->nvram[i + 8], pcie_path, pcie_len) == 0)) { + id = nvp->nvram[i + 7] - '0'; + found = true; + break; } while (nvp->nvram[i] != 0) i++; @@ -297,6 +310,8 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr, u16 bus_nr) { + char prefix[BRCMF_FW_NVRAM_PCIEDEV_LEN]; + size_t len; u32 i, j; u8 *nvram; @@ -308,14 +323,13 @@ static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr, * Valid entries are of type pcie/X/Y/ where X = domain_nr and * Y = bus_nr. */ + snprintf(prefix, sizeof(prefix), "pcie/%d/%d/", domain_nr, bus_nr); + len = strlen(prefix); i = 0; j = 0; - while (i < nvp->nvram_len - BRCMF_FW_NVRAM_PCIEDEV_LEN) { - if ((strncmp(&nvp->nvram[i], "pcie/", 5) == 0) && - (nvp->nvram[i + 6] == '/') && (nvp->nvram[i + 8] == '/') && - ((nvp->nvram[i + 5] - '0') == domain_nr) && - ((nvp->nvram[i + 7] - '0') == bus_nr)) { - i += BRCMF_FW_NVRAM_PCIEDEV_LEN; + while (i < nvp->nvram_len - len) { + if (strncmp(&nvp->nvram[i], prefix, len) == 0) { + i += len; while (nvp->nvram[i] != 0) { nvram[j] = nvp->nvram[i]; i++; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c index eb1325371d3a..59440631fec5 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c @@ -249,8 +249,8 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid) } -void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, - struct sk_buff *skb) +u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, + struct sk_buff *skb) { struct brcmf_flowring_ring *ring; @@ -271,6 +271,7 @@ void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW) brcmf_flowring_block(flow, flowid, false); } + return skb_queue_len(&ring->skblist); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h index a34cd394c616..5551861a44bc 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h @@ -64,8 +64,8 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid); void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid); u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid); -void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, - struct sk_buff *skb); +u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, + struct sk_buff *skb); struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid); void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid, struct sk_buff *skb); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index f0dda0ecd23b..5017eaa4af45 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c @@ -635,7 +635,7 @@ static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h, return 0; } -static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, +static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, u32 slot_id, struct sk_buff **pktout, bool remove_item) { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 4ec9811f49c8..1b47de067d25 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -73,7 +73,7 @@ #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 -#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64 +#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32 struct msgbuf_common_hdr { @@ -278,16 +278,6 @@ struct brcmf_msgbuf_pktids { struct brcmf_msgbuf_pktid *array; }; - -/* dma flushing needs implementation for mips and arm platforms. Should - * be put in util. Note, this is not real flushing. It is virtual non - * cached memory. Only write buffers should have to be drained. Though - * this may be different depending on platform...... - */ -#define brcmf_dma_flush(addr, len) -#define brcmf_dma_invalidate_cache(addr, len) - - static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf); @@ -462,7 +452,6 @@ static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, memcpy(msgbuf->ioctbuf, buf, buf_len); else memset(msgbuf->ioctbuf, 0, buf_len); - brcmf_dma_flush(ioctl_buf, buf_len); err = brcmf_commonring_write_complete(commonring); brcmf_commonring_unlock(commonring); @@ -511,11 +500,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, msgbuf->rx_pktids, msgbuf->ioctl_resp_pktid); if (msgbuf->ioctl_resp_ret_len != 0) { - if (!skb) { - brcmf_err("Invalid packet id idx recv'd %d\n", - msgbuf->ioctl_resp_pktid); + if (!skb) return -EBADF; - } + memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? len : msgbuf->ioctl_resp_ret_len); } @@ -797,6 +784,8 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx, struct brcmf_flowring *flow = msgbuf->flow; struct ethhdr *eh = (struct ethhdr *)(skb->data); u32 flowid; + u32 queue_count; + bool force; flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); if (flowid == BRCMF_FLOWRING_INVALID_ID) { @@ -804,8 +793,9 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx, if (flowid == BRCMF_FLOWRING_INVALID_ID) return -ENOMEM; } - brcmf_flowring_enqueue(flow, flowid, skb); - brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false); + queue_count = brcmf_flowring_enqueue(flow, flowid, skb); + force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0); + brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force); return 0; } @@ -874,10 +864,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, msgbuf->tx_pktids, idx); - if (!skb) { - brcmf_err("Invalid packet id idx recv'd %d\n", idx); + if (!skb) return; - } set_bit(flowid, msgbuf->txstatus_done_map); commonring = msgbuf->flowrings[flowid]; @@ -1156,6 +1144,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, msgbuf->rx_pktids, idx); + if (!skb) + return; if (data_offset) skb_pull(skb, data_offset); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c index c824570ddea3..03f35e0c52ca 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c @@ -39,10 +39,16 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) if (!sdiodev->pdata) return; + if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0) + sdiodev->pdata->drive_strength = val; + + /* make sure there are interrupts defined in the node */ + if (!of_find_property(np, "interrupts", NULL)) + return; + irq = irq_of_parse_and_map(np, 0); if (!irq) { brcmf_err("interrupt could not be mapped\n"); - devm_kfree(dev, sdiodev->pdata); return; } irqf = irqd_get_trigger_type(irq_get_irq_data(irq)); @@ -50,7 +56,4 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) sdiodev->pdata->oob_irq_supported = true; sdiodev->pdata->oob_irq_nr = irq; sdiodev->pdata->oob_irq_flags = irqf; - - if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0) - sdiodev->pdata->drive_strength = val; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index 79ca24e6d2c5..37a2624d7bba 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -112,10 +112,11 @@ enum brcmf_pcie_state { BRCMF_PCIE_MB_INT_D2H3_DB0 | \ BRCMF_PCIE_MB_INT_D2H3_DB1) -#define BRCMF_PCIE_MIN_SHARED_VERSION 4 +#define BRCMF_PCIE_MIN_SHARED_VERSION 5 #define BRCMF_PCIE_MAX_SHARED_VERSION 5 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF -#define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000 +#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000 +#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000 @@ -147,6 +148,10 @@ enum brcmf_pcie_state { #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8 #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12 #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16 +#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20 +#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28 +#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36 +#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44 #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0 #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52 @@ -248,6 +253,13 @@ struct brcmf_pciedev_info { bool mbdata_completed; bool irq_allocated; bool wowl_enabled; + u8 dma_idx_sz; + void *idxbuf; + u32 idxbuf_sz; + dma_addr_t idxbuf_dmahandle; + u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset); + void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset, + u16 value); }; struct brcmf_pcie_ringbuf { @@ -277,15 +289,6 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = { }; -/* dma flushing needs implementation for mips and arm platforms. Should - * be put in util. Note, this is not real flushing. It is virtual non - * cached memory. Only write buffers should have to be drained. Though - * this may be different depending on platform...... - */ -#define brcmf_dma_flush(addr, len) -#define brcmf_dma_invalidate_cache(addr, len) - - static u32 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset) { @@ -333,6 +336,25 @@ brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset, } +static u16 +brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset) +{ + u16 *address = devinfo->idxbuf + mem_offset; + + return (*(address)); +} + + +static void +brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset, + u16 value) +{ + u16 *address = devinfo->idxbuf + mem_offset; + + *(address) = value; +} + + static u32 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset) { @@ -878,7 +900,7 @@ static int brcmf_pcie_ring_mb_write_rptr(void *ctx) brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr, commonring->w_ptr, ring->id); - brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr); + devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr); return 0; } @@ -896,7 +918,7 @@ static int brcmf_pcie_ring_mb_write_wptr(void *ctx) brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr, commonring->r_ptr, ring->id); - brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr); + devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr); return 0; } @@ -925,7 +947,7 @@ static int brcmf_pcie_ring_mb_update_rptr(void *ctx) if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) return -EIO; - commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr); + commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr); brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr, commonring->w_ptr, ring->id); @@ -943,7 +965,7 @@ static int brcmf_pcie_ring_mb_update_wptr(void *ctx) if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) return -EIO; - commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr); + commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr); brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr, commonring->r_ptr, ring->id); @@ -1048,6 +1070,13 @@ static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo) } kfree(devinfo->shared.flowrings); devinfo->shared.flowrings = NULL; + if (devinfo->idxbuf) { + dma_free_coherent(&devinfo->pdev->dev, + devinfo->idxbuf_sz, + devinfo->idxbuf, + devinfo->idxbuf_dmahandle); + devinfo->idxbuf = NULL; + } } @@ -1063,19 +1092,72 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) u32 addr; u32 ring_mem_ptr; u32 i; + u64 address; + u32 bufsz; u16 max_sub_queues; + u8 idx_offset; ring_addr = devinfo->shared.ring_info_addr; brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr); + addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES; + max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr); + + if (devinfo->dma_idx_sz != 0) { + bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) * + devinfo->dma_idx_sz * 2; + devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz, + &devinfo->idxbuf_dmahandle, + GFP_KERNEL); + if (!devinfo->idxbuf) + devinfo->dma_idx_sz = 0; + } - addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET; - d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); - addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET; - d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); - addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET; - h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); - addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET; - h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); + if (devinfo->dma_idx_sz == 0) { + addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET; + d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); + addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET; + d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); + addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET; + h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); + addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET; + h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); + idx_offset = sizeof(u32); + devinfo->write_ptr = brcmf_pcie_write_tcm16; + devinfo->read_ptr = brcmf_pcie_read_tcm16; + brcmf_dbg(PCIE, "Using TCM indices\n"); + } else { + memset(devinfo->idxbuf, 0, bufsz); + devinfo->idxbuf_sz = bufsz; + idx_offset = devinfo->dma_idx_sz; + devinfo->write_ptr = brcmf_pcie_write_idx; + devinfo->read_ptr = brcmf_pcie_read_idx; + + h2d_w_idx_ptr = 0; + addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET; + address = (u64)devinfo->idxbuf_dmahandle; + brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + + h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset; + addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET; + address += max_sub_queues * idx_offset; + brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + + d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset; + addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET; + address += max_sub_queues * idx_offset; + brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + + d2h_r_idx_ptr = d2h_w_idx_ptr + + BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset; + addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET; + address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset; + brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + brcmf_dbg(PCIE, "Using host memory indices\n"); + } addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET; ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr); @@ -1089,8 +1171,8 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) ring->id = i; devinfo->shared.commonrings[i] = ring; - h2d_w_idx_ptr += sizeof(u32); - h2d_r_idx_ptr += sizeof(u32); + h2d_w_idx_ptr += idx_offset; + h2d_r_idx_ptr += idx_offset; ring_mem_ptr += BRCMF_RING_MEM_SZ; } @@ -1104,13 +1186,11 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) ring->id = i; devinfo->shared.commonrings[i] = ring; - d2h_w_idx_ptr += sizeof(u32); - d2h_r_idx_ptr += sizeof(u32); + d2h_w_idx_ptr += idx_offset; + d2h_r_idx_ptr += idx_offset; ring_mem_ptr += BRCMF_RING_MEM_SZ; } - addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES; - max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr); devinfo->shared.nrof_flowrings = max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS; rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring), @@ -1134,15 +1214,15 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) ring); ring->w_idx_addr = h2d_w_idx_ptr; ring->r_idx_addr = h2d_r_idx_ptr; - h2d_w_idx_ptr += sizeof(u32); - h2d_r_idx_ptr += sizeof(u32); + h2d_w_idx_ptr += idx_offset; + h2d_r_idx_ptr += idx_offset; } devinfo->shared.flowrings = rings; return 0; fail: - brcmf_err("Allocating commonring buffers failed\n"); + brcmf_err("Allocating ring buffers failed\n"); brcmf_pcie_release_ringbuffers(devinfo); return -ENOMEM; } @@ -1175,7 +1255,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) goto fail; memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); - brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); addr = devinfo->shared.tcm_base_address + BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET; @@ -1193,7 +1272,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) goto fail; memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); - brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); addr = devinfo->shared.tcm_base_address + BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET; @@ -1280,10 +1358,13 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, brcmf_err("Unsupported PCIE version %d\n", version); return -EINVAL; } - if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) { - brcmf_err("Unsupported legacy TX mode 0x%x\n", - shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT); - return -EINVAL; + + /* check firmware support dma indicies */ + if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) { + if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX) + devinfo->dma_idx_sz = sizeof(u16); + else + devinfo->dma_idx_sz = sizeof(u32); } addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET; diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 99f9760fc11e..aba095761ac6 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -22,6 +22,7 @@ config IWLWIFI Intel 3160 Wi-Fi Adapter Intel 7265 Wi-Fi Adapter Intel 8260 Wi-Fi Adapter + Intel 3165 Wi-Fi Adapter This driver uses the kernel's mac80211 subsystem. diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index 3d32f4120174..dbfc5b18bcb7 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -9,6 +9,7 @@ iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o +iwlwifi-objs += iwl-trans.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index ba7fc42edf97..852461ffe98f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -112,6 +112,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, IEEE80211_HW_QUEUE_CONTROL | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS | + IEEE80211_HW_SUPPORT_FAST_XMIT | IEEE80211_HW_WANT_MONITOR_VIF; hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE; diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 69b2c0b733ad..cc35f796d406 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -69,16 +69,15 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 13 -#define IWL3160_UCODE_API_MAX 13 +#define IWL7260_UCODE_API_MAX 15 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 12 -#define IWL3160_UCODE_API_OK 12 +#define IWL3165_UCODE_API_OK 13 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 10 -#define IWL3160_UCODE_API_MIN 10 +#define IWL3165_UCODE_API_MIN 13 /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d @@ -104,9 +103,6 @@ #define IWL3160_FW_PRE "iwlwifi-3160-" #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" -#define IWL3165_FW_PRE "iwlwifi-3165-" -#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode" - #define IWL7265_FW_PRE "iwlwifi-7265-" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" @@ -271,8 +267,13 @@ static const struct iwl_ht_params iwl7265_ht_params = { const struct iwl_cfg iwl3165_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3165", - .fw_name_pre = IWL3165_FW_PRE, + .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7000, + /* sparse doens't like the re-assignment but it is safe */ +#ifndef __CHECKER__ + .ucode_api_ok = IWL3165_UCODE_API_OK, + .ucode_api_min = IWL3165_UCODE_API_MIN, +#endif .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3165_NVM_VERSION, .nvm_calib_ver = IWL3165_TX_POWER_VERSION, @@ -348,6 +349,5 @@ const struct iwl_cfg iwl7265d_n_cfg = { MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); -MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index ce6321b7d241..72040cd0b979 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 13 +#define IWL8000_UCODE_API_MAX 15 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 12 @@ -122,24 +122,49 @@ static const struct iwl_ht_params iwl8000_ht_params = { .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), }; -#define IWL_DEVICE_8000 \ - .ucode_api_max = IWL8000_UCODE_API_MAX, \ - .ucode_api_ok = IWL8000_UCODE_API_OK, \ - .ucode_api_min = IWL8000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_8000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .base_params = &iwl8000_base_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ - .d0i3 = true, \ - .non_shared_ant = ANT_A, \ - .dccm_offset = IWL8260_DCCM_OFFSET, \ - .dccm_len = IWL8260_DCCM_LEN, \ - .dccm2_offset = IWL8260_DCCM2_OFFSET, \ - .dccm2_len = IWL8260_DCCM2_LEN, \ - .smem_offset = IWL8260_SMEM_OFFSET, \ - .smem_len = IWL8260_SMEM_LEN +static const struct iwl_tt_params iwl8000_tt_params = { + .ct_kill_entry = 115, + .ct_kill_exit = 93, + .ct_kill_duration = 5, + .dynamic_smps_entry = 111, + .dynamic_smps_exit = 107, + .tx_protection_entry = 112, + .tx_protection_exit = 105, + .tx_backoff = { + {.temperature = 110, .backoff = 200}, + {.temperature = 111, .backoff = 600}, + {.temperature = 112, .backoff = 1200}, + {.temperature = 113, .backoff = 2000}, + {.temperature = 114, .backoff = 4000}, + }, + .support_ct_kill = true, + .support_dynamic_smps = true, + .support_tx_protection = true, + .support_tx_backoff = true, +}; + +#define IWL_DEVICE_8000 \ + .ucode_api_max = IWL8000_UCODE_API_MAX, \ + .ucode_api_ok = IWL8000_UCODE_API_OK, \ + .ucode_api_min = IWL8000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_8000, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .base_params = &iwl8000_base_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ + .d0i3 = true, \ + .non_shared_ant = ANT_A, \ + .dccm_offset = IWL8260_DCCM_OFFSET, \ + .dccm_len = IWL8260_DCCM_LEN, \ + .dccm2_offset = IWL8260_DCCM2_OFFSET, \ + .dccm2_len = IWL8260_DCCM2_LEN, \ + .smem_offset = IWL8260_SMEM_OFFSET, \ + .smem_len = IWL8260_SMEM_LEN, \ + .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \ + .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ + .thermal_params = &iwl8000_tt_params, \ + .apmg_not_supported = true const struct iwl_cfg iwl8260_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 8260", @@ -177,8 +202,6 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = { .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, - .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, - .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, .disable_dummy_notification = true, .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, @@ -192,8 +215,6 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = { .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, - .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, - .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, .bt_shared_single_ant = true, .disable_dummy_notification = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 225b6d6b8573..08c14afeb148 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -360,6 +360,7 @@ struct iwl_cfg { const u32 smem_offset; const u32 smem_len; const struct iwl_tt_params *thermal_params; + bool apmg_not_supported; }; /* diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h index 223b8752f924..948ce0802fa7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -64,19 +65,21 @@ TRACE_EVENT(iwlwifi_dev_hcmd, TRACE_EVENT(iwlwifi_dev_rx, TP_PROTO(const struct device *dev, const struct iwl_trans *trans, - void *rxbuf, size_t len), - TP_ARGS(dev, trans, rxbuf, len), + struct iwl_rx_packet *pkt, size_t len), + TP_ARGS(dev, trans, pkt, len), TP_STRUCT__entry( DEV_ENTRY - __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len)) + __field(u8, cmd) + __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len)) ), TP_fast_assign( DEV_ASSIGN; - memcpy(__get_dynamic_array(rxbuf), rxbuf, - iwl_rx_trace_len(trans, rxbuf, len)); + __entry->cmd = pkt->hdr.cmd; + memcpy(__get_dynamic_array(rxbuf), pkt, + iwl_rx_trace_len(trans, pkt, len)); ), TP_printk("[%s] RX cmd %#.2x", - __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4]) + __get_str(dev), __entry->cmd) ); TRACE_EVENT(iwlwifi_dev_tx, diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 7267152e7dc7..6685259927f8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -423,13 +423,19 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, { const struct iwl_ucode_api *ucode_api = (void *)data; u32 api_index = le32_to_cpu(ucode_api->api_index); + u32 api_flags = le32_to_cpu(ucode_api->api_flags); + int i; - if (api_index >= IWL_API_ARRAY_SIZE) { + if (api_index >= IWL_API_MAX_BITS / 32) { IWL_ERR(drv, "api_index larger than supported by driver\n"); - return -EINVAL; + /* don't return an error so we can load FW that has more bits */ + return 0; } - capa->api[api_index] = le32_to_cpu(ucode_api->api_flags); + for (i = 0; i < 32; i++) { + if (api_flags & BIT(i)) + __set_bit(i + 32 * api_index, capa->_api); + } return 0; } @@ -439,13 +445,19 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, { const struct iwl_ucode_capa *ucode_capa = (void *)data; u32 api_index = le32_to_cpu(ucode_capa->api_index); + u32 api_flags = le32_to_cpu(ucode_capa->api_capa); + int i; - if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) { + if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) { IWL_ERR(drv, "api_index larger than supported by driver\n"); - return -EINVAL; + /* don't return an error so we can load FW that has more bits */ + return 0; } - capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa); + for (i = 0; i < 32; i++) { + if (api_flags & BIT(i)) + __set_bit(i + 32 * api_index, capa->_capa); + } return 0; } @@ -1148,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) if (err) goto try_again; - if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION) + if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) api_ver = drv->fw.ucode_ver; else api_ver = IWL_UCODE_API(drv->fw.ucode_ver); @@ -1239,6 +1251,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) sizeof(struct iwl_fw_dbg_trigger_txq_timer); trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] = sizeof(struct iwl_fw_dbg_trigger_time_event); + trigger_tlv_sz[FW_DBG_TRIGGER_BA] = + sizeof(struct iwl_fw_dbg_trigger_ba); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) { if (pieces->dbg_trigger_tlv[i]) { diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c index 41ff85de7334..21302b6f2bfd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -748,6 +750,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, return; } + if (data->sku_cap_mimo_disabled) + rx_chains = 1; + ht_info->ht_supported = true; ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h index 5234a0bf11e4..750c8c9ee70d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -84,6 +86,7 @@ struct iwl_nvm_data { bool sku_cap_11ac_enable; bool sku_cap_amt_enable; bool sku_cap_ipan_enable; + bool sku_cap_mimo_disabled; u16 radio_cfg_type; u8 radio_cfg_step; diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index d45dc021cda2..d56064861a9c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 -/* - * RX related structures and functions - */ -#define RX_FREE_BUFFERS 64 -#define RX_LOW_WATERMARK 8 - /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h index 251bf8dc4a12..e57dbd0ef2e1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -254,6 +254,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) * detection. * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related * events. + * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events. */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, @@ -267,6 +268,7 @@ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_RSSI, FW_DBG_TRIGGER_TXQ_TIMERS, FW_DBG_TRIGGER_TIME_EVENT, + FW_DBG_TRIGGER_BA, /* must be last */ FW_DBG_TRIGGER_MAX, diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index c7cfc38a2644..a9b5ae4ebec0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -237,6 +237,8 @@ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30), }; +typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; + /** * enum iwl_ucode_tlv_api - ucode api * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex @@ -255,22 +257,27 @@ enum iwl_ucode_tlv_flag { * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params * @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10 * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format + * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority + * instead of 3. */ enum iwl_ucode_tlv_api { - IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), - IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), - IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9), - IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10), - IWL_UCODE_TLV_API_TX_POWER_DEV = BIT(11), - IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), - IWL_UCODE_TLV_API_SCD_CFG = BIT(15), - IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), - IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17), - IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18), - IWL_UCODE_TLV_API_STATS_V10 = BIT(19), - IWL_UCODE_TLV_API_NEW_VERSION = BIT(20), + IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3, + IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, + IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, + IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10, + IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11, + IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13, + IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15, + IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16, + IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17, + IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, + IWL_UCODE_TLV_API_STATS_V10 = (__force iwl_ucode_tlv_api_t)19, + IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, + IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, }; +typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; + /** * enum iwl_ucode_tlv_capa - ucode capabilities * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 @@ -290,6 +297,7 @@ enum iwl_ucode_tlv_api { * which also implies support for the scheduler configuration command * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command + * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different @@ -299,22 +307,23 @@ enum iwl_ucode_tlv_api { * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC */ enum iwl_ucode_tlv_capa { - IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), - IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1), - IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2), - IWL_UCODE_TLV_CAPA_BEAMFORMER = BIT(3), - IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6), - IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8), - IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9), - IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), - IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), - IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), - IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13), - IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22), - IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28), - IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = BIT(29), - IWL_UCODE_TLV_CAPA_BT_COEX_RRC = BIT(30), + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, + IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, + IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, + IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10, + IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, + IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, + IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, + IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, + IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, + IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, }; /* The default calibrate table size if not specified by firmware file */ @@ -325,13 +334,14 @@ enum iwl_ucode_tlv_capa { /* The default max probe length if not specified by the firmware file */ #define IWL_DEFAULT_MAX_PROBE_LENGTH 200 +#define IWL_API_MAX_BITS 64 +#define IWL_CAPABILITIES_MAX_BITS 64 + /* * For 16.0 uCode and above, there is no differentiation between sections, * just an offset to the HW address. */ #define IWL_UCODE_SECTION_MAX 12 -#define IWL_API_ARRAY_SIZE 1 -#define IWL_CAPABILITIES_ARRAY_SIZE 1 #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC /* uCode version contains 4 values: Major/Minor/API/Serial */ @@ -424,11 +434,13 @@ struct iwl_fw_dbg_reg_op { * @SMEM_MODE: monitor stores the data in SMEM * @EXTERNAL_MODE: monitor stores the data in allocated DRAM * @MARBH_MODE: monitor stores the data in MARBH buffer + * @MIPI_MODE: monitor outputs the data through the MIPI interface */ enum iwl_fw_dbg_monitor_mode { SMEM_MODE = 0, EXTERNAL_MODE = 1, MARBH_MODE = 2, + MIPI_MODE = 3, }; /** @@ -660,6 +672,33 @@ struct iwl_fw_dbg_trigger_time_event { } __packed time_events[16]; } __packed; +/** + * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger + * rx_ba_start: tid bitmap to configure on what tid the trigger should occur + * when an Rx BlockAck session is started. + * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur + * when an Rx BlockAck session is stopped. + * tx_ba_start: tid bitmap to configure on what tid the trigger should occur + * when a Tx BlockAck session is started. + * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur + * when a Tx BlockAck session is stopped. + * rx_bar: tid bitmap to configure on what tid the trigger should occur + * when a BAR is received (for a Tx BlockAck session). + * tx_bar: tid bitmap to configure on what tid the trigger should occur + * when a BAR is send (for an Rx BlocAck session). + * frame_timeout: tid bitmap to configure on what tid the trigger should occur + * when a frame times out in the reodering buffer. + */ +struct iwl_fw_dbg_trigger_ba { + __le16 rx_ba_start; + __le16 rx_ba_stop; + __le16 tx_ba_start; + __le16 tx_ba_stop; + __le16 rx_bar; + __le16 tx_bar; + __le16 frame_timeout; +} __packed; + /** * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. * @id: conf id diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index cf75bafae51d..3e3c9d8b3c37 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -105,10 +105,24 @@ struct iwl_ucode_capabilities { u32 n_scan_channels; u32 standard_phy_calibration_size; u32 flags; - u32 api[IWL_API_ARRAY_SIZE]; - u32 capa[IWL_CAPABILITIES_ARRAY_SIZE]; + unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)]; + unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)]; }; +static inline bool +fw_has_api(const struct iwl_ucode_capabilities *capabilities, + iwl_ucode_tlv_api_t api) +{ + return test_bit((__force long)api, capabilities->_api); +} + +static inline bool +fw_has_capa(const struct iwl_ucode_capabilities *capabilities, + iwl_ucode_tlv_capa_t capa) +{ + return test_bit((__force long)capa, capabilities->_capa); +} + /* one for each uCode image (inst/data, init/runtime/wowlan) */ struct fw_desc { const void *data; /* vmalloc'ed data */ @@ -205,6 +219,8 @@ static inline const char *get_fw_dbg_mode_string(int mode) return "EXTERNAL_DRAM"; case MARBH_MODE: return "MARBH"; + case MIPI_MODE: + return "MIPI"; default: return "UNKNOWN"; } diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 0b5a81d52a3e..80fefe7d7b8c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -116,10 +116,11 @@ enum family_8000_nvm_offsets { /* SKU Capabilities (actual values from NVM definition) */ enum nvm_sku_bits { - NVM_SKU_CAP_BAND_24GHZ = BIT(0), - NVM_SKU_CAP_BAND_52GHZ = BIT(1), - NVM_SKU_CAP_11N_ENABLE = BIT(2), - NVM_SKU_CAP_11AC_ENABLE = BIT(3), + NVM_SKU_CAP_BAND_24GHZ = BIT(0), + NVM_SKU_CAP_BAND_52GHZ = BIT(1), + NVM_SKU_CAP_11N_ENABLE = BIT(2), + NVM_SKU_CAP_11AC_ENABLE = BIT(3), + NVM_SKU_CAP_MIMO_DISABLE = BIT(5), }; /* @@ -368,6 +369,11 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, if (cfg->ht_params->ldpc) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; + if (data->sku_cap_mimo_disabled) { + num_rx_ants = 1; + num_tx_ants = 1; + } + if (num_tx_ants > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; else @@ -465,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, if (cfg->device_family != IWL_DEVICE_FAMILY_8000) return le16_to_cpup(nvm_sw + RADIO_CFG); - return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000)); + return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000)); } @@ -527,6 +533,10 @@ static void iwl_set_hw_address_family_8000(struct device *dev, const u8 *hw_addr; if (mac_override) { + static const u8 reserved_mac[] = { + 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 + }; + hw_addr = (const u8 *)(mac_override + MAC_ADDRESS_OVERRIDE_FAMILY_8000); @@ -538,7 +548,12 @@ static void iwl_set_hw_address_family_8000(struct device *dev, data->hw_addr[4] = hw_addr[5]; data->hw_addr[5] = hw_addr[4]; - if (is_valid_ether_addr(data->hw_addr)) + /* + * Force the use of the OTP MAC address in case of reserved MAC + * address in the NVM, or if address is given but invalid. + */ + if (is_valid_ether_addr(data->hw_addr) && + memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) return; IWL_ERR_DEV(dev, @@ -610,6 +625,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, data->sku_cap_11n_enable = false; data->sku_cap_11ac_enable = data->sku_cap_11n_enable && (sku & NVM_SKU_CAP_11AC_ENABLE); + data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c new file mode 100644 index 000000000000..9f8bcefc04c5 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-trans.c @@ -0,0 +1,113 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include "iwl-trans.h" + +struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, + struct device *dev, + const struct iwl_cfg *cfg, + const struct iwl_trans_ops *ops, + size_t dev_cmd_headroom) +{ + struct iwl_trans *trans; +#ifdef CONFIG_LOCKDEP + static struct lock_class_key __key; +#endif + + trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL); + if (!trans) + return NULL; + +#ifdef CONFIG_LOCKDEP + lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", + &__key, 0); +#endif + + trans->dev = dev; + trans->cfg = cfg; + trans->ops = ops; + trans->dev_cmd_headroom = dev_cmd_headroom; + + snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), + "iwl_cmd_pool:%s", dev_name(trans->dev)); + trans->dev_cmd_pool = + kmem_cache_create(trans->dev_cmd_pool_name, + sizeof(struct iwl_device_cmd) + + trans->dev_cmd_headroom, + sizeof(void *), + SLAB_HWCACHE_ALIGN, + NULL); + if (!trans->dev_cmd_pool) + goto free; + + return trans; + free: + kfree(trans); + return NULL; +} + +void iwl_trans_free(struct iwl_trans *trans) +{ + kmem_cache_destroy(trans->dev_cmd_pool); + kfree(trans); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 56254a837214..87a230a7f4b6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -641,6 +641,8 @@ struct iwl_trans { enum iwl_d0i3_mode d0i3_mode; + bool wowlan_d0i3; + /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __aligned(sizeof(void *)); @@ -1010,20 +1012,20 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans) iwl_op_mode_nic_error(trans->op_mode); } +/***************************************************** + * transport helper functions + *****************************************************/ +struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, + struct device *dev, + const struct iwl_cfg *cfg, + const struct iwl_trans_ops *ops, + size_t dev_cmd_headroom); +void iwl_trans_free(struct iwl_trans *trans); + /***************************************************** * driver (transport) register/unregister functions ******************************************************/ int __must_check iwl_pci_register_driver(void); void iwl_pci_unregister_driver(void); -static inline void trans_lockdep_init(struct iwl_trans *trans) -{ -#ifdef CONFIG_LOCKDEP - static struct lock_class_key __key; - - lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", - &__key, 0); -#endif -} - #endif /* __iwl_trans_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 13a0a03158de..b4737e296c92 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -408,23 +408,12 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) int iwl_send_bt_init_conf(struct iwl_mvm *mvm) { - struct iwl_bt_coex_cmd *bt_cmd; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - int ret; + struct iwl_bt_coex_cmd bt_cmd = {}; u32 mode; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_send_bt_init_conf_old(mvm); - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - lockdep_assert_held(&mvm->mutex); if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { @@ -440,36 +429,33 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) mode = 0; } - bt_cmd->mode = cpu_to_le32(mode); + bt_cmd.mode = cpu_to_le32(mode); goto send_cmd; } mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; - bt_cmd->mode = cpu_to_le32(mode); + bt_cmd.mode = cpu_to_le32(mode); if (IWL_MVM_BT_COEX_SYNC2SCO) - bt_cmd->enabled_modules |= + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED); if (iwl_mvm_bt_is_plcr_supported(mvm)) - bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); if (IWL_MVM_BT_COEX_MPLUT) { - bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); - bt_cmd->enabled_modules |= + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED); } - bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); send_cmd: memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; + return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd); } static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, @@ -746,7 +732,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd); IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); @@ -770,52 +756,14 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, return 0; } -static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - - struct ieee80211_chanctx_conf *chanctx_conf; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - /* If channel context is invalid or not on 2.4GHz - don't count it */ - if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { - rcu_read_unlock(); - return; - } - rcu_read_unlock(); - - if (vif->type != NL80211_IFTYPE_STATION || - mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], - lockdep_is_held(&mvm->mutex)); - - /* This can happen if the station has been removed right now */ - if (IS_ERR_OR_NULL(sta)) - return; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); -} - void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data rssi_event) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data data = { - .mvm = mvm, - }; int ret; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event); return; } @@ -853,10 +801,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bt_rssi_iterator, &data); } #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) @@ -870,7 +814,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_coex_agg_time_limit_old(mvm, sta); if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) @@ -897,7 +841,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta); if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) @@ -927,7 +871,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant) if (ant & mvm->cfg->non_shared_ant) return true; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < @@ -940,10 +884,10 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) if (mvm->cfg->bt_shared_single_ant) return true; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); - return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF; + return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; } bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, @@ -951,7 +895,7 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, { u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band); if (band != IEEE80211_BAND_2GHZ) @@ -994,7 +938,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) { - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { iwl_mvm_bt_coex_vif_change_old(mvm); return; } @@ -1012,7 +957,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, u8 __maybe_unused lower_bound, upper_bound; u8 lut; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd); if (!iwl_mvm_bt_is_plcr_supported(mvm)) diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index d954591e0be5..6ac6de2af977 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -776,7 +776,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, struct iwl_host_cmd cmd = { .id = BT_CONFIG, .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + .dataflags = { IWL_HCMD_DFL_DUP, }, .flags = CMD_ASYNC, }; struct iwl_mvm_sta *mvmsta; diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 36bf6a87fb26..4165d104e4c3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -761,7 +761,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) { - iwl_mvm_cancel_scan(mvm); + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); iwl_trans_stop_device(mvm->trans); @@ -1170,7 +1170,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); iwl_trans_suspend(mvm->trans); - if (wowlan->any) { + mvm->trans->wowlan_d0i3 = wowlan->any; + if (mvm->trans->wowlan_d0i3) { /* 'any' trigger means d0i3 usage */ if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { int ret = iwl_mvm_enter_d0i3_sync(mvm); @@ -1751,8 +1752,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, int i, j, n_matches, ret; fw_status = iwl_mvm_get_wakeup_status(mvm, vif); - if (!IS_ERR_OR_NULL(fw_status)) + if (!IS_ERR_OR_NULL(fw_status)) { reasons = le32_to_cpu(fw_status->wakeup_reasons); + kfree(fw_status); + } if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) wakeup.rfkill_release = true; @@ -1783,7 +1786,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { struct iwl_scan_offload_profile_match *fw_match; struct cfg80211_wowlan_nd_match *match; - int n_channels = 0; + int idx, n_channels = 0; fw_match = &query.matches[i]; @@ -1798,8 +1801,12 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, net_detect->matches[net_detect->n_matches++] = match; - match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len; - memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid, + /* We inverted the order of the SSIDs in the scan + * request, so invert the index here. + */ + idx = mvm->n_nd_match_sets - i - 1; + match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; + memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, match->ssid.ssid_len); if (mvm->n_nd_channels < n_channels) @@ -1869,15 +1876,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) /* get the BSS vif pointer again */ vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) - goto out_unlock; + goto err; ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); if (ret) - goto out_unlock; + goto err; if (d3_status != IWL_D3_STATUS_ALIVE) { IWL_INFO(mvm, "Device was reset during suspend\n"); - goto out_unlock; + goto err; } /* query SRAM first in case we want event logging */ @@ -1903,7 +1910,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) goto out_iterate; } - out_unlock: +err: + iwl_mvm_free_nd(mvm); mutex_unlock(&mvm->mutex); out_iterate: @@ -1916,6 +1924,14 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) /* return 1 to reconfigure the device */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); + + /* We always return 1, which causes mac80211 to do a reconfig + * with IEEE80211_RECONFIG_TYPE_RESTART. This type of + * reconfig calls iwl_mvm_restart_complete(), where we unref + * the IWL_MVM_REF_UCODE_DOWN, so we need to take the + * reference here. + */ + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); return 1; } @@ -2022,7 +2038,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) __iwl_mvm_resume(mvm, true); rtnl_unlock(); iwl_abort_notification_waits(&mvm->notif_wait); - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); ieee80211_restart_hw(mvm->hw); /* wait for restart and disconnect all interfaces */ diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 5f37eab5008d..5c8a65de0e77 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -190,6 +190,21 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf, return ret ?: count; } +static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + char buf[64]; + int bufsz = sizeof(buf); + int pos; + + pos = scnprintf(buf, bufsz, "bss limit = %d\n", + vif->bss_conf.txpower); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + static ssize_t iwl_dbgfs_pm_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -607,6 +622,7 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, } while (0) MVM_DEBUGFS_READ_FILE_OPS(mac_params); +MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); @@ -641,6 +657,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 9ac04c1ea706..ffb4b5cef275 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -493,7 +493,8 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old; @@ -550,7 +551,8 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old; pos += scnprintf(buf+pos, bufsz-pos, @@ -916,7 +918,8 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, if (mvm->scan_rx_ant != scan_rx_ant) { mvm->scan_rx_ant = scan_rx_ant; - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); } @@ -1356,6 +1359,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN); PRINT_MVM_REF(IWL_MVM_REF_SCAN); PRINT_MVM_REF(IWL_MVM_REF_ROC); + PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX); PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT); PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS); PRINT_MVM_REF(IWL_MVM_REF_USER); diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index be1a0a127077..5e4cbdb44c60 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -294,6 +294,7 @@ enum iwl_scan_ebs_status { IWL_SCAN_EBS_SUCCESS, IWL_SCAN_EBS_FAILED, IWL_SCAN_EBS_CHAN_NOT_FOUND, + IWL_SCAN_EBS_INACTIVE, }; /** @@ -431,6 +432,17 @@ enum iwl_scan_priority { IWL_SCAN_PRIORITY_HIGH, }; +enum iwl_scan_priority_ext { + IWL_SCAN_PRIORITY_EXT_0_LOWEST, + IWL_SCAN_PRIORITY_EXT_1, + IWL_SCAN_PRIORITY_EXT_2, + IWL_SCAN_PRIORITY_EXT_3, + IWL_SCAN_PRIORITY_EXT_4, + IWL_SCAN_PRIORITY_EXT_5, + IWL_SCAN_PRIORITY_EXT_6, + IWL_SCAN_PRIORITY_EXT_7_HIGHEST, +}; + /** * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1 * @reserved1: for alignment and future use @@ -837,4 +849,27 @@ struct iwl_scan_offload_profiles_query { struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ +/** + * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @scanned_channels: number of channels scanned and number of valid elements in + * results array + * @status: one of SCAN_COMP_STATUS_* + * @bt_status: BT on/off status + * @last_channel: last channel that was scanned + * @tsf_low: TSF timer (lower half) in usecs + * @tsf_high: TSF timer (higher half) in usecs + * @results: array of scan results, only "scanned_channels" of them are valid + */ +struct iwl_umac_scan_iter_complete_notif { + __le32 uid; + u8 scanned_channels; + u8 status; + u8 bt_status; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; + struct iwl_scan_results_notif results[]; +} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */ + #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 56db2ba52da0..16e9ef49397f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -108,6 +108,7 @@ enum { ANTENNA_COUPLING_NOTIFICATION = 0xa, /* UMAC scan commands */ + SCAN_ITERATION_COMPLETE_UMAC = 0xb5, SCAN_CFG_CMD = 0xc, SCAN_REQ_UMAC = 0xd, SCAN_ABORT_UMAC = 0xe, @@ -170,12 +171,8 @@ enum { /* Thermal Throttling*/ REPLY_THERMAL_MNG_BACKOFF = 0x7e, - /* Scanning */ - SCAN_REQUEST_CMD = 0x80, - SCAN_ABORT_CMD = 0x81, - SCAN_START_NOTIFICATION = 0x82, - SCAN_RESULTS_NOTIFICATION = 0x83, - SCAN_COMPLETE_NOTIFICATION = 0x84, + /* Set/Get DC2DC frequency tune */ + DC2DC_CONFIG_CMD = 0x83, /* NVM */ NVM_ACCESS_CMD = 0x88, @@ -1395,6 +1392,49 @@ struct iwl_mvm_marker { __le32 metadata[0]; } __packed; /* MARKER_API_S_VER_1 */ +/* + * enum iwl_dc2dc_config_id - flag ids + * + * Ids of dc2dc configuration flags + */ +enum iwl_dc2dc_config_id { + DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ + DCDC_FREQ_TUNE_SET = 0x2, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_dc2dc_config_cmd - configure dc2dc values + * + * (DC2DC_CONFIG_CMD = 0x83) + * + * Set/Get & configure dc2dc values. + * The command always returns the current dc2dc values. + * + * @flags: set/get dc2dc + * @enable_low_power_mode: not used. + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_cmd { + __le32 flags; + __le32 enable_low_power_mode; /* not used */ + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ + +/** + * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd + * + * Current dc2dc values returned by the FW. + * + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_resp { + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ + /*********************************** * Smart Fifo API ***********************************/ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index 0601445599f2..eb10c5ee4a14 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -623,7 +623,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) if (!mvm->trans->ltr_enabled) return 0; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0)) return iwl_mvm_config_ltr_v1(mvm); return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, @@ -662,9 +662,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) * device that are triggered by the INIT firwmare (MFUART). */ _iwl_trans_stop_device(mvm->trans, false); - _iwl_trans_start_hw(mvm->trans, false); + ret = _iwl_trans_start_hw(mvm->trans, false); if (ret) - return ret; + goto error; } if (iwlmvm_mod_params.init_dbg) @@ -754,7 +754,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { ret = iwl_mvm_config_scan(mvm); if (ret) goto error; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index b56a445b5d0c..08367fbc3bc4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -318,7 +318,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); if (IS_ERR_OR_NULL(resp)) { IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", - PTR_RET(resp)); + PTR_ERR_OR_ZERO(resp)); goto out; } @@ -334,7 +334,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, kfree(resp); if (IS_ERR_OR_NULL(regd)) { IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", - PTR_RET(regd)); + PTR_ERR_OR_ZERO(regd)); goto out; } @@ -415,6 +415,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; int num_mac, ret, i; + static const u32 mvm_ciphers[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + }; /* Tell mac80211 our characteristics */ hw->flags = IEEE80211_HW_SIGNAL_DBM | @@ -428,6 +434,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IEEE80211_HW_TIMING_BEACON_ONLY | IEEE80211_HW_CONNECTION_MONITOR | IEEE80211_HW_CHANCTX_STA_CSA | + IEEE80211_HW_SUPPORT_FAST_XMIT | IEEE80211_HW_SUPPORTS_CLONED_SKBS; hw->queues = mvm->first_agg_queue; @@ -440,19 +447,38 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; + BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2); + memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); + hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); + hw->wiphy->cipher_suites = mvm->ciphers; + /* * Enable 11w if advertised by firmware and software crypto * is not enabled (as the firmware will interpret some mgmt * packets, so enabling it with software crypto isn't safe) */ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP && - !iwlwifi_mod_params.sw_crypto) + !iwlwifi_mod_params.sw_crypto) { hw->flags |= IEEE80211_HW_MFP_CAPABLE; + mvm->ciphers[hw->wiphy->n_cipher_suites] = + WLAN_CIPHER_SUITE_AES_CMAC; + hw->wiphy->n_cipher_suites++; + } + + /* currently FW API supports only one optional cipher scheme */ + if (mvm->fw->cs[0].cipher) { + mvm->hw->n_cipher_schemes = 1; + mvm->hw->cipher_schemes = &mvm->fw->cs[0]; + mvm->ciphers[hw->wiphy->n_cipher_suites] = + mvm->fw->cs[0].cipher; + hw->wiphy->n_cipher_suites++; + } hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS; hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | - NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_ND_RANDOM_MAC_ADDR; hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); @@ -509,10 +535,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; else mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; @@ -524,10 +551,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; - if ((mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_BEAMFORMER) && - (mvm->fw->ucode_capa.api[0] & - IWL_UCODE_TLV_API_LQ_SS_PARAMS)) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BEAMFORMER) && + fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_LQ_SS_PARAMS)) hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } @@ -553,30 +580,24 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_QUIET; - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; - /* currently FW API supports only one optional cipher scheme */ - if (mvm->fw->cs[0].cipher) { - mvm->hw->n_cipher_schemes = 1; - mvm->hw->cipher_schemes = &mvm->fw->cs[0]; - } - #ifdef CONFIG_PM_SLEEP if (iwl_mvm_is_d0i3_supported(mvm) && device_can_wakeup(mvm->trans->dev)) { @@ -616,13 +637,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (ret) return ret; - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) { + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; } - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) { + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; } @@ -735,6 +757,60 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) return true; } +#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ + do { \ + if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ + break; \ + iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \ + } while (0) + +static void +iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, + enum ieee80211_ampdu_mlme_action action) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + switch (action) { + case IEEE80211_AMPDU_TX_OPERATIONAL: { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + + CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, + "TX AGG START: MAC %pM tid %d ssn %d\n", + sta->addr, tid, tid_data->ssn); + break; + } + case IEEE80211_AMPDU_TX_STOP_CONT: + CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, + "TX AGG STOP: MAC %pM tid %d\n", + sta->addr, tid); + break; + case IEEE80211_AMPDU_RX_START: + CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, + "RX AGG START: MAC %pM tid %d ssn %d\n", + sta->addr, tid, rx_ba_ssn); + break; + case IEEE80211_AMPDU_RX_STOP: + CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, + "RX AGG STOP: MAC %pM tid %d\n", + sta->addr, tid); + break; + default: + break; + } +} + static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, @@ -811,6 +887,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; break; } + + if (!ret) { + u16 rx_ba_ssn = 0; + + if (action == IEEE80211_AMPDU_RX_START) + rx_ba_ssn = *ssn; + + iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, + rx_ba_ssn, action); + } mutex_unlock(&mvm->mutex); /* @@ -1410,7 +1496,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) * The work item could be running or queued if the * ROC time event stops just as we get here. */ - cancel_work_sync(&mvm->roc_done_wk); + flush_work(&mvm->roc_done_wk); iwl_trans_stop_device(mvm->trans); @@ -1423,20 +1509,24 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) /* * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() * won't be called in this case). + * But make sure to cleanup interfaces that have gone down before/during + * HW restart was requested. */ - clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + ieee80211_iterate_interfaces(mvm->hw, 0, + iwl_mvm_cleanup_iterator, mvm); /* We shouldn't have any UIDs still set. Loop over all the UIDs to * make sure there's nothing left there and warn if any is found. */ - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { int i; for (i = 0; i < mvm->max_scans; i++) { - if (WARN_ONCE(mvm->scan_uid[i], - "UMAC scan UID %d was not cleaned\n", - mvm->scan_uid[i])) - mvm->scan_uid[i] = 0; + if (WARN_ONCE(mvm->scan_uid_status[i], + "UMAC scan UID %d status was not cleaned\n", + i)) + mvm->scan_uid_status[i] = 0; } } @@ -1501,7 +1591,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, .pwr_restriction = cpu_to_le16(8 * tx_power), }; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV)) return iwl_mvm_set_tx_power_old(mvm, vif, tx_power); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) @@ -2360,7 +2450,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) - iwl_mvm_scan_offload_stop(mvm, true); + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); switch (vif->type) { case NL80211_IFTYPE_STATION: @@ -2411,12 +2501,8 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, * cancel scan scan before ieee80211_scan_work() could run. * To handle that, simply return if the scan is not running. */ - /* FIXME: for now, we ignore this race for UMAC scans, since - * they don't set the scan_status. - */ - if ((mvm->scan_status & IWL_MVM_SCAN_REGULAR) || - (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - iwl_mvm_cancel_scan(mvm); + if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); mutex_unlock(&mvm->mutex); } @@ -2765,16 +2851,12 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, * could run. To handle this, simply return if the scan is * not running. */ - /* FIXME: for now, we ignore this race for UMAC scans, since - * they don't set the scan_status. - */ - if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED) && - !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { mutex_unlock(&mvm->mutex); return 0; } - ret = iwl_mvm_scan_offload_stop(mvm, false); + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); @@ -3039,8 +3121,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, switch (vif->type) { case NL80211_IFTYPE_STATION: - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) { + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { /* Use aux roc framework (HS20) */ ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); @@ -3832,7 +3914,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, if (idx != 0) return -ENOENT; - if (!(mvm->fw->ucode_capa.capa[0] & + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; @@ -3879,8 +3961,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (!(mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return; /* if beacon filtering isn't on mac80211 does it anyway */ @@ -3910,9 +3992,9 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const struct ieee80211_event *event) +static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) { #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \ do { \ @@ -3921,16 +4003,12 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\ } while (0) - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) return; - if (event->u.mlme.status == MLME_SUCCESS) - return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) @@ -3968,6 +4046,75 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, #undef CHECK_MLME_TRIGGER } +static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "BAR received from %pM, tid %d, ssn %d", + event->u.ba.sta->addr, event->u.ba.tid, + event->u.ba.ssn); +} + +static void +iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid))) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "Frame from %pM timed out, tid %d", + event->u.ba.sta->addr, event->u.ba.tid); +} + +static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + switch (event->type) { + case MLME_EVENT: + iwl_mvm_event_mlme_callback(mvm, vif, event); + break; + case BAR_RX_EVENT: + iwl_mvm_event_bar_rx_callback(mvm, vif, event); + break; + case BA_FRAME_TIMEOUT: + iwl_mvm_event_frame_timeout_callback(mvm, vif, event); + break; + default: + break; + } +} + const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .ampdu_action = iwl_mvm_mac_ampdu_action, diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 6d332348dca6..2d4bad5fe825 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -276,6 +276,7 @@ enum iwl_mvm_ref_type { IWL_MVM_REF_UCODE_DOWN, IWL_MVM_REF_SCAN, IWL_MVM_REF_ROC, + IWL_MVM_REF_ROC_AUX, IWL_MVM_REF_P2P_CLIENT, IWL_MVM_REF_AP_IBSS, IWL_MVM_REF_USER, @@ -446,6 +447,8 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) extern const u8 tid_to_mac80211_ac[]; +#define IWL_MVM_SCAN_STOPPING_SHIFT 8 + enum iwl_scan_status { IWL_MVM_SCAN_REGULAR = BIT(0), IWL_MVM_SCAN_SCHED = BIT(1), @@ -462,8 +465,8 @@ enum iwl_scan_status { IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | IWL_MVM_SCAN_STOPPING_NETDETECT, - IWL_MVM_SCAN_STOPPING_MASK = 0xff00, - IWL_MVM_SCAN_MASK = 0x00ff, + IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, + IWL_MVM_SCAN_MASK = 0xff, }; /** @@ -627,8 +630,7 @@ struct iwl_mvm { unsigned int max_scans; /* UMAC scan tracking */ - u32 scan_uid[IWL_MVM_MAX_UMAC_SCANS]; - u8 scan_seq_num, sched_scan_seq_num; + u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; /* rx chain antennas set through debugfs for the scan command */ u8 scan_rx_ant; @@ -818,6 +820,8 @@ struct iwl_mvm { } tdls_cs; struct iwl_mvm_shared_mem_cfg shared_mem_cfg; + + u32 ciphers[6]; }; /* Extract MVM priv from op_mode and _hw */ @@ -887,14 +891,15 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) return mvm->trans->cfg->d0i3 && mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF && !iwlwifi_mod_params.d0i3_disable && - (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) { bool nvm_lar = mvm->nvm_data->lar_enabled; - bool tlv_lar = mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_LAR_SUPPORT; + bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); if (iwlwifi_mod_params.lar_disable) return false; @@ -911,24 +916,28 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) { - return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE || - mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC; + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) || + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); } static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm) { - return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG; + return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG); } static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm) { - return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && IWL_MVM_BT_COEX_CORUNNING; } static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) { - return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && IWL_MVM_BT_COEX_RRC; } @@ -1121,34 +1130,34 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies); int iwl_mvm_scan_size(struct iwl_mvm *mvm); -int iwl_mvm_cancel_scan(struct iwl_mvm *mvm); +int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); /* Scheduled scan */ -int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, - struct cfg80211_sched_scan_request *req); +int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type); -int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify); -int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); /* UMAC scan */ int iwl_mvm_config_scan(struct iwl_mvm *mvm); int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); +int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c index 87b2a30a2308..2a6be350704a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -316,8 +316,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; lar_enabled = !iwlwifi_mod_params.lar_disable && - (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, regulatory, mac_override, phy_sku, @@ -583,9 +583,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) kfree(nvm_buffer); } - /* load external NVM if configured */ + /* Only if PNVM selected in the mod param - load external NVM */ if (mvm->nvm_file_name) { - /* read External NVM file - take the default */ + /* read External NVM file from the mod param */ ret = iwl_mvm_read_external_nvm(mvm); if (ret) { /* choose the nvm_file name according to the @@ -792,8 +792,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm) char mcc[3]; if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) { - tlv_lar = mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_LAR_SUPPORT; + tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); nvm_lar = mvm->nvm_data->lar_enabled; if (tlv_lar != nvm_lar) IWL_INFO(mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 02028bcb3ff6..e4fa50075ffd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -194,7 +194,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) * (PCIe power is lost before PERST# is asserted), causing ME FW * to lose ownership and not being able to obtain it back. */ - if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + if (!mvm->trans->cfg->apmg_not_supported) iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); @@ -238,13 +238,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), RX_HANDLER(SCAN_ITERATION_COMPLETE, - iwl_mvm_rx_scan_offload_iter_complete_notif, false), + iwl_mvm_rx_lmac_scan_iter_complete_notif, false), RX_HANDLER(SCAN_OFFLOAD_COMPLETE, - iwl_mvm_rx_scan_offload_complete_notif, true), - RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results, + iwl_mvm_rx_lmac_scan_complete_notif, true), + RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, false), RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, true), + RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, + iwl_mvm_rx_umac_scan_iter_complete_notif, false), RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), @@ -279,11 +281,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(BINDING_CONTEXT_CMD), CMD(TIME_QUOTA_CMD), CMD(NON_QOS_TX_COUNTER_CMD), - CMD(SCAN_REQUEST_CMD), - CMD(SCAN_ABORT_CMD), - CMD(SCAN_START_NOTIFICATION), - CMD(SCAN_RESULTS_NOTIFICATION), - CMD(SCAN_COMPLETE_NOTIFICATION), + CMD(DC2DC_CONFIG_CMD), CMD(NVM_ACCESS_CMD), CMD(PHY_CONFIGURATION_CMD), CMD(CALIB_RES_NOTIF_PHY_DB), @@ -356,6 +354,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION), CMD(TDLS_CONFIG_CMD), CMD(MCC_UPDATE_CMD), + CMD(SCAN_ITERATION_COMPLETE_UMAC), }; #undef CMD @@ -517,15 +516,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, min_backoff = calc_min_backoff(trans, cfg); iwl_mvm_tt_initialize(mvm, min_backoff); - /* set the nvm_file_name according to priority */ - if (iwlwifi_mod_params.nvm_file) { + + if (iwlwifi_mod_params.nvm_file) mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; - } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { - if (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP) - mvm->nvm_file_name = mvm->cfg->default_nvm_file_B_step; - else - mvm->nvm_file_name = mvm->cfg->default_nvm_file_C_step; - } + else + IWL_DEBUG_EEPROM(mvm->trans->dev, + "working without external nvm file\n"); if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name, "not allowing power-up and not having nvm_file\n")) diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 044014276550..daff1d0a8e4a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -138,7 +138,7 @@ struct rs_tx_column; typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, + struct rs_rate *rate, const struct rs_tx_column *next_col); struct rs_tx_column { @@ -150,14 +150,14 @@ struct rs_tx_column { }; static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, + struct rs_rate *rate, const struct rs_tx_column *next_col) { return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant); } static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, + struct rs_rate *rate, const struct rs_tx_column *next_col) { struct iwl_mvm_sta *mvmsta; @@ -180,11 +180,14 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) return false; + if (mvm->nvm_data->sku_cap_mimo_disabled) + return false; + return true; } static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, + struct rs_rate *rate, const struct rs_tx_column *next_col) { if (!sta->ht_cap.ht_supported) @@ -194,10 +197,9 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, + struct rs_rate *rate, const struct rs_tx_column *next_col) { - struct rs_rate *rate = &tbl->rate; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; @@ -1125,8 +1127,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; - bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] & - IWL_UCODE_TLV_API_LQ_SS_PARAMS; + bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_LQ_SS_PARAMS); /* Treat uninitialized rate scaling data same as non-existing. */ if (!lq_sta) { @@ -1656,7 +1658,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, for (j = 0; j < MAX_COLUMN_CHECKS; j++) { allow_func = next_col->checks[j]; - if (allow_func && !allow_func(mvm, sta, tbl, next_col)) + if (allow_func && !allow_func(mvm, sta, &tbl->rate, + next_col)) break; } @@ -2711,7 +2714,7 @@ static void rs_vht_init(struct iwl_mvm *mvm, (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)) lq_sta->stbc_capable = true; - if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) && + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) lq_sta->bfer_capable = true; @@ -2995,7 +2998,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); /* TODO: remove old API when min FW API hits 14 */ - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) && + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) && rs_stbc_allow(mvm, sta, lq_sta)) rate.stbc = true; @@ -3209,7 +3212,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm, rs_build_rates_table(mvm, sta, lq_sta, initial_rate); - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate); mvmsta = iwl_mvm_sta_from_mac80211(sta); diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index d6314ddf57b5..8f1d93b7a13a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -570,7 +570,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, }; u32 temperature; - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) { + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STATS_V10)) { struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data; if (iwl_rx_packet_payload_len(pkt) != v10_len) @@ -610,7 +610,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, /* Only handle rx statistics temperature changes if async temp * notifications are not supported */ - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ASYNC_DTM)) iwl_mvm_tt_temp_changed(mvm, temperature); ieee80211_iterate_active_interfaces(mvm->hw, diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index e50fd3fd8ab0..5de144968723 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -101,14 +101,6 @@ struct iwl_mvm_scan_params { } schedule[2]; }; -enum iwl_umac_scan_uid_type { - IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0), - IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1), -}; - -static int iwl_umac_scan_stop(struct iwl_mvm *mvm, - enum iwl_umac_scan_uid_type type, bool notify); - static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { if (mvm->scan_rx_ant != ANT_NONE) @@ -168,7 +160,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, enum ieee80211_band band, int n_ssids) { - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL)) return 10; if (band == IEEE80211_BAND_2GHZ) return 20 + 3 * (n_ssids + 1); @@ -178,7 +170,7 @@ static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm, enum ieee80211_band band) { - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL)) return 110; return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; } @@ -213,8 +205,9 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm, params->max_out_time = 120; if (iwl_mvm_low_latency(mvm)) { - if (mvm->fw->ucode_capa.api[0] & - IWL_UCODE_TLV_API_FRAGMENTED_SCAN) { + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { + params->suspend_time = 105; /* * If there is more than one active interface make @@ -228,8 +221,9 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm, } } - if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] & - IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { + if (frag_passive_dwell && + fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { /* * P2P device scan should not be fragmented to avoid negative * impact on P2P device discovery. Configure max_out_time to be @@ -281,8 +275,8 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm, static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) { /* require rrm scan whenever the fw supports it */ - return mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT; + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT); } static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm) @@ -318,22 +312,41 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm) return max_ie_len; } -int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res, + int num_res, u8 *buf, size_t buf_size) +{ + int i; + u8 *pos = buf, *end = buf + buf_size; + + for (i = 0; pos < end && i < num_res; i++) + pos += snprintf(pos, end - pos, " %u", res[i].channel); + + /* terminate the string in case the buffer was too short */ + *(buf + buf_size - 1) = '\0'; + + return buf; +} + +int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; + u8 buf[256]; IWL_DEBUG_SCAN(mvm, - "Scan offload iteration complete: status=0x%x scanned channels=%d\n", - notif->status, notif->scanned_channels); + "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n", + notif->status, notif->scanned_channels, + iwl_mvm_dump_channel_list(notif->results, + notif->scanned_channels, buf, + sizeof(buf))); return 0; } -int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) { IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); ieee80211_sched_scan_results(mvm->hw); @@ -341,14 +354,27 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, return 0; } -int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) +{ + switch (status) { + case IWL_SCAN_EBS_SUCCESS: + return "successful"; + case IWL_SCAN_EBS_INACTIVE: + return "inactive"; + case IWL_SCAN_EBS_FAILED: + case IWL_SCAN_EBS_CHAN_NOT_FOUND: + default: + return "failed"; + } +} + +int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); - bool ebs_successful = (scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS); /* scan status must be locked for proper checking */ lockdep_assert_held(&mvm->mutex); @@ -368,13 +394,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", aborted ? "aborted" : "completed", - ebs_successful ? "successful" : "failed"); + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED; } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) { IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n", aborted ? "aborted" : "completed", - ebs_successful ? "successful" : "failed"); + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR; } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) { @@ -382,14 +408,14 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n", aborted ? "aborted" : "completed", - ebs_successful ? "successful" : "failed"); + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; ieee80211_sched_scan_stopped(mvm->hw); } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", aborted ? "aborted" : "completed", - ebs_successful ? "successful" : "failed"); + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; ieee80211_scan_completed(mvm->hw, @@ -397,7 +423,9 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); } - mvm->last_ebs_successful = ebs_successful; + mvm->last_ebs_successful = + scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS || + scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE; return 0; } @@ -463,8 +491,9 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params, } } -int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, - struct cfg80211_sched_scan_request *req) +static int +iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, + struct cfg80211_sched_scan_request *req) { struct iwl_scan_offload_profile *profile; struct iwl_scan_offload_profile_cfg *profile_cfg; @@ -545,7 +574,7 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, return true; } -static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm) +static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm) { int ret; struct iwl_host_cmd cmd = { @@ -553,12 +582,6 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm) }; u32 status; - /* Exit instantly with error when device is not ready - * to receive scan abort command or it does not perform - * scheduled scan currently */ - if (!mvm->scan_status) - return -EIO; - ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); if (ret) return ret; @@ -578,73 +601,6 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm) return ret; } -int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) -{ - int ret; - struct iwl_notification_wait wait_scan_done; - static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, }; - bool sched = !!(mvm->scan_status & IWL_MVM_SCAN_SCHED); - - lockdep_assert_held(&mvm->mutex); - - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, - notify); - - /* FIXME: For now we only check if no scan is set here, since - * we only support LMAC in this flow and it doesn't support - * multiple scans. - */ - if (!mvm->scan_status) - return 0; - - if (iwl_mvm_is_radio_killed(mvm)) { - ret = 0; - goto out; - } - - iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, - scan_done_notif, - ARRAY_SIZE(scan_done_notif), - NULL, NULL); - - ret = iwl_mvm_send_scan_offload_abort(mvm); - if (ret) { - IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", - sched ? "offloaded " : "", ret); - iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); - goto out; - } - - IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", - sched ? "scheduled " : ""); - - ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); -out: - /* Clear the scan status so the next scan requests will - * succeed and mark the scan as stopping, so that the Rx - * handler doesn't do anything, as the scan was stopped from - * above. Since the rx handler won't do anything now, we have - * to release the scan reference here. - */ - if (mvm->scan_status == IWL_MVM_SCAN_REGULAR) - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - - if (sched) { - mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; - mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED; - if (notify) - ieee80211_sched_scan_stopped(mvm->hw); - } else { - mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; - mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR; - if (notify) - ieee80211_scan_completed(mvm->hw, true); - } - - return ret; -} - static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, struct iwl_scan_req_tx_cmd *tx_cmd, bool no_cck) @@ -775,6 +731,22 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params->preq.common_data.len = cpu_to_le16(ies->common_ie_len); } +static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm, + enum iwl_scan_priority_ext prio) +{ + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY)) + return cpu_to_le32(prio); + + if (prio <= IWL_SCAN_PRIORITY_EXT_2) + return cpu_to_le32(IWL_SCAN_PRIORITY_LOW); + + if (prio <= IWL_SCAN_PRIORITY_EXT_4) + return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM); + + return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); +} + static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_lmac *cmd, struct iwl_mvm_scan_params *params) @@ -786,7 +758,7 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, params->dwell[IEEE80211_BAND_2GHZ].fragmented; cmd->max_out_time = cpu_to_le32(params->max_out_time); cmd->suspend_time = cpu_to_le32(params->suspend_time); - cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); + cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); } static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, @@ -801,19 +773,23 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, iwl_mvm_max_scan_ie_fw_cmd_room(mvm))); } -static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, int n_iterations) +static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + int n_iterations) { const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa; /* We can only use EBS if: * 1. the feature is supported; * 2. the last EBS was successful; - * 3. if only single scan, the single scan EBS API is supported. + * 3. if only single scan, the single scan EBS API is supported; + * 4. it's not a p2p find operation. */ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && mvm->last_ebs_successful && (n_iterations > 1 || - (capa->api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS))); + fw_has_api(capa, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) && + vif->type != NL80211_IFTYPE_P2P_DEVICE); } static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params) @@ -891,7 +867,7 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->schedule[1].iterations = params->schedule[1].iterations; cmd->schedule[1].full_scan_mul = params->schedule[1].iterations; - if (iwl_mvm_scan_use_ebs(mvm, n_iterations)) { + if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) { cmd->channel_opt[0].flags = cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | @@ -914,32 +890,6 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return 0; } -int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) -{ - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN, - true); - - if (!(mvm->scan_status & IWL_MVM_SCAN_REGULAR)) - return 0; - - if (iwl_mvm_is_radio_killed(mvm)) { - ieee80211_scan_completed(mvm->hw, true); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; - return 0; - } - - return iwl_mvm_scan_offload_stop(mvm, true); -} - -/* UMAC scan API */ - -struct iwl_umac_scan_done { - struct iwl_mvm *mvm; - enum iwl_umac_scan_uid_type type; -}; - static int rate_to_scan_rate_flag(unsigned int rate) { static const int rate_to_scan_rate[IWL_RATE_COUNT] = { @@ -1048,68 +998,15 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) return ret; } -static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid) +static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status) { int i; for (i = 0; i < mvm->max_scans; i++) - if (mvm->scan_uid[i] == uid) + if (mvm->scan_uid_status[i] == status) return i; - return i; -} - -static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm) -{ - return iwl_mvm_find_scan_uid(mvm, 0); -} - -static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm, - enum iwl_umac_scan_uid_type type) -{ - int i; - - for (i = 0; i < mvm->max_scans; i++) - if (mvm->scan_uid[i] & type) - return true; - - return false; -} - -static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm, - enum iwl_umac_scan_uid_type type) -{ - int i; - - for (i = 0; i < mvm->max_scans; i++) - if (mvm->scan_uid[i] & type) - return i; - - return i; -} - -static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm, - enum iwl_umac_scan_uid_type type) -{ - u32 uid; - - /* make sure exactly one bit is on in scan type */ - WARN_ON(hweight8(type) != 1); - - /* - * Make sure scan uids are unique. If one scan lasts long time while - * others are completing frequently, the seq number will wrap up and - * we may have more than one scan with the same uid. - */ - do { - uid = type | (mvm->scan_seq_num << - IWL_UMAC_SCAN_UID_SEQ_OFFSET); - mvm->scan_seq_num++; - } while (iwl_mvm_find_scan_uid(mvm, uid) < mvm->max_scans); - - IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid); - - return uid; + return -ENOENT; } static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, @@ -1123,12 +1020,15 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, params->dwell[IEEE80211_BAND_2GHZ].fragmented; cmd->max_out_time = cpu_to_le32(params->max_out_time); cmd->suspend_time = cpu_to_le32(params->suspend_time); - cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); + cmd->scan_priority = + iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); if (iwl_mvm_scan_total_iterations(params) == 0) - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); + cmd->ooc_priority = + iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); else - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW); + cmd->ooc_priority = + iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2); } static void @@ -1173,26 +1073,30 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (iwl_mvm_scan_total_iterations(params) > 1) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvm->scan_iter_notif_enabled) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; +#endif return flags; } static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_scan_params *params) + struct iwl_mvm_scan_params *params, + int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; - u32 uid; + int uid; u32 ssid_bitmap = 0; int n_iterations = iwl_mvm_scan_total_iterations(params); - int uid_idx; lockdep_assert_held(&mvm->mutex); - uid_idx = iwl_mvm_find_free_scan_uid(mvm); - if (uid_idx >= mvm->max_scans) - return -EBUSY; + uid = iwl_mvm_scan_uid_by_status(mvm, 0); + if (uid < 0) + return uid; memset(cmd, 0, ksize(cmd)); cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) - @@ -1200,17 +1104,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_scan_umac_dwell(mvm, cmd, params); - if (n_iterations == 1) - uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN); - else - uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN); + mvm->scan_uid_status[uid] = type; - mvm->scan_uid[uid_idx] = uid; cmd->uid = cpu_to_le32(uid); - cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); - if (iwl_mvm_scan_use_ebs(mvm, n_iterations)) + if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; @@ -1222,8 +1121,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, params->n_channels, ssid_bitmap, cmd); - /* With UMAC we can have only one schedule, so use the sum of - * the iterations (with a a maximum of 255). + /* With UMAC we use only one schedule for now, so use the sum + * of the iterations (with a a maximum of 255). */ sec_part->schedule[0].iter_count = (n_iterations > 255) ? 255 : n_iterations; @@ -1262,11 +1161,11 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) case IWL_MVM_SCAN_REGULAR: if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) return -EBUSY; - return iwl_mvm_scan_offload_stop(mvm, true); + return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); case IWL_MVM_SCAN_SCHED: if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) return -EBUSY; - return iwl_mvm_cancel_scan(mvm); + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); case IWL_MVM_SCAN_NETDETECT: /* No need to stop anything for net-detect since the * firmware is restarted anyway. This way, any sched @@ -1337,9 +1236,10 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd.id = SCAN_REQ_UMAC; - ret = iwl_mvm_scan_umac(mvm, vif, ¶ms); + ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, + IWL_MVM_SCAN_REGULAR); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); @@ -1444,9 +1344,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd.id = SCAN_REQ_UMAC; - ret = iwl_mvm_scan_umac(mvm, vif, ¶ms); + ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); @@ -1478,144 +1378,118 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_complete *notif = (void *)pkt->data; u32 uid = __le32_to_cpu(notif->uid); - bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN); - int uid_idx = iwl_mvm_find_scan_uid(mvm, uid); + bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); - /* - * Scan uid may be set to zero in case of scan abort request from above. - */ - if (uid_idx >= mvm->max_scans) + if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) return 0; + /* if the scan is already stopping, we don't need to notify mac80211 */ + if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { + ieee80211_scan_completed(mvm->hw, aborted); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { + ieee80211_sched_scan_stopped(mvm->hw); + } + + mvm->scan_status &= ~mvm->scan_uid_status[uid]; + IWL_DEBUG_SCAN(mvm, - "Scan completed, uid %u type %s, status %s, EBS status %s\n", - uid, sched ? "sched" : "regular", + "Scan completed, uid %u type %u, status %s, EBS status %s\n", + uid, mvm->scan_uid_status[uid], notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? "completed" : "aborted", - notif->ebs_status == IWL_SCAN_EBS_SUCCESS ? - "success" : "failed"); + iwl_mvm_ebs_status_str(notif->ebs_status)); - if (notif->ebs_status) + if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && + notif->ebs_status != IWL_SCAN_EBS_INACTIVE) mvm->last_ebs_successful = false; - mvm->scan_uid[uid_idx] = 0; - - if (!sched) { - ieee80211_scan_completed(mvm->hw, - notif->status == - IWL_SCAN_OFFLOAD_ABORTED); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) { - ieee80211_sched_scan_stopped(mvm->hw); - } else { - IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n"); - } + mvm->scan_uid_status[uid] = 0; return 0; } -static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) +int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) { - struct iwl_umac_scan_done *scan_done = data; - struct iwl_umac_scan_complete *notif = (void *)pkt->data; - u32 uid = __le32_to_cpu(notif->uid); - int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid); - - if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC)) - return false; - - if (uid_idx >= scan_done->mvm->max_scans) - return false; - - /* - * Clear scan uid of scans that was aborted from above and completed - * in FW so the RX handler does nothing. Set last_ebs_successful here if - * needed. - */ - scan_done->mvm->scan_uid[uid_idx] = 0; - - if (notif->ebs_status) - scan_done->mvm->last_ebs_successful = false; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; + u8 buf[256]; - return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type); + IWL_DEBUG_SCAN(mvm, + "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n", + notif->status, notif->scanned_channels, + iwl_mvm_dump_channel_list(notif->results, + notif->scanned_channels, buf, + sizeof(buf))); + return 0; } -static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid) +static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) { struct iwl_umac_scan_abort cmd = { .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) - sizeof(struct iwl_mvm_umac_cmd_hdr)), - .uid = cpu_to_le32(uid), }; + int uid, ret; lockdep_assert_held(&mvm->mutex); + /* We should always get a valid index here, because we already + * checked that this type of scan was running in the generic + * code. + */ + uid = iwl_mvm_scan_uid_by_status(mvm, type); + if (WARN_ON_ONCE(uid < 0)) + return uid; + + cmd.uid = cpu_to_le32(uid); + IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); - return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); + if (!ret) + mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; + + return ret; } -static int iwl_umac_scan_stop(struct iwl_mvm *mvm, - enum iwl_umac_scan_uid_type type, bool notify) +static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) { struct iwl_notification_wait wait_scan_done; - static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, }; - struct iwl_umac_scan_done scan_done = { - .mvm = mvm, - .type = type, - }; - int i, ret = -EIO; + static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, + SCAN_OFFLOAD_COMPLETE, }; + int ret; + + lockdep_assert_held(&mvm->mutex); iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, scan_done_notif, ARRAY_SIZE(scan_done_notif), - iwl_scan_umac_done_check, &scan_done); + NULL, NULL); IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); - for (i = 0; i < mvm->max_scans; i++) { - if (mvm->scan_uid[i] & type) { - int err; - - if (iwl_mvm_is_radio_killed(mvm) && - (type & IWL_UMAC_SCAN_UID_REG_SCAN)) { - ieee80211_scan_completed(mvm->hw, true); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - break; - } - - err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]); - if (!err) - ret = 0; - } - } + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + ret = iwl_mvm_umac_scan_abort(mvm, type); + else + ret = iwl_mvm_lmac_scan_abort(mvm); if (ret) { - IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n"); + IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type); iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); return ret; } ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); - if (ret) - return ret; - - if (notify) { - if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN) - ieee80211_sched_scan_stopped(mvm->hw); - if (type & IWL_UMAC_SCAN_UID_REG_SCAN) { - ieee80211_scan_completed(mvm->hw, true); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - } - } return ret; } int iwl_mvm_scan_size(struct iwl_mvm *mvm) { - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) return sizeof(struct iwl_scan_req_umac) + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels + @@ -1633,19 +1507,18 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) */ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) { - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { - u32 uid, i; + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + int uid, i; - uid = iwl_mvm_find_first_scan(mvm, IWL_UMAC_SCAN_UID_REG_SCAN); - if (uid < mvm->max_scans) { + uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR); + if (uid >= 0) { ieee80211_scan_completed(mvm->hw, true); - mvm->scan_uid[uid] = 0; + mvm->scan_uid_status[uid] = 0; } - uid = iwl_mvm_find_first_scan(mvm, - IWL_UMAC_SCAN_UID_SCHED_SCAN); - if (uid < mvm->max_scans && !mvm->restart_fw) { + uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); + if (uid >= 0 && !mvm->restart_fw) { ieee80211_sched_scan_stopped(mvm->hw); - mvm->scan_uid[uid] = 0; + mvm->scan_uid_status[uid] = 0; } /* We shouldn't have any UIDs still set. Loop over all the @@ -1653,10 +1526,10 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) * any is found. */ for (i = 0; i < mvm->max_scans; i++) { - if (WARN_ONCE(mvm->scan_uid[i], - "UMAC scan UID %d was not cleaned\n", - mvm->scan_uid[i])) - mvm->scan_uid[i] = 0; + if (WARN_ONCE(mvm->scan_uid_status[i], + "UMAC scan UID %d status was not cleaned\n", + i)) + mvm->scan_uid_status[i] = 0; } } else { if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) @@ -1670,3 +1543,40 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) ieee80211_sched_scan_stopped(mvm->hw); } } + +int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) +{ + int ret; + + if (!(mvm->scan_status & type)) + return 0; + + if (iwl_mvm_is_radio_killed(mvm)) { + ret = 0; + goto out; + } + + ret = iwl_mvm_scan_stop_wait(mvm, type); + if (!ret) + mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT; +out: + /* Clear the scan status so the next scan requests will + * succeed and mark the scan as stopping, so that the Rx + * handler doesn't do anything, as the scan was stopped from + * above. + */ + mvm->scan_status &= ~type; + + if (type == IWL_MVM_SCAN_REGULAR) { + /* Since the rx handler won't do anything now, we have + * to release the scan reference here. + */ + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + if (notify) + ieee80211_scan_completed(mvm->hw, true); + } else if (notify) { + ieee80211_sched_scan_stopped(mvm->hw); + } + + return ret; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 1845b79487c8..d68dc697a4a0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -5,8 +5,8 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,8 +31,8 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1000,13 +1000,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid, + buf_size, ssn, wdg_timeout); + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) return -EIO; - iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid, - buf_size, ssn, wdg_timeout); - /* * Even though in theory the peer could have different * aggregation reorder buffer sizes for different sessions, diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index fd7b0d36f9a6..d24b6a83e68c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -108,12 +108,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) { queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE); - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) + iwl_mvm_unref(mvm, IWL_MVM_REF_ROC); + } + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { queues |= BIT(mvm->aux_queue); - - iwl_mvm_unref(mvm, IWL_MVM_REF_ROC); + iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX); + } synchronize_net(); @@ -393,6 +395,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); te_data->running = true; + iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX); ieee80211_ready_on_channel(mvm->hw); /* Start TE */ } else { IWL_DEBUG_TE(mvm, @@ -794,13 +797,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_stop_roc(struct iwl_mvm *mvm) { - struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_time_event_data *te_data; bool is_p2p = false; lockdep_assert_held(&mvm->mutex); - mvmvif = NULL; spin_lock_bh(&mvm->time_event_lock); /* @@ -818,17 +820,14 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) } } - /* - * Iterate over the list of aux roc time events and find the time - * event that is associated with a BSS interface. - * This assumes that a BSS interface can have only a single time - * event at any given time and this time event corresponds to a ROC - * request + /* There can only be at most one AUX ROC time event, we just use the + * list to simplify/unify code. Remove it if it exists. */ - list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { + te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, + struct iwl_mvm_time_event_data, + list); + if (te_data) mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - goto remove_te; - } remove_te: spin_unlock_bh(&mvm->time_event_lock); diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index ef32e177f662..7ba7a118ff5c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -70,6 +70,30 @@ #include "mvm.h" #include "sta.h" +static void +iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, + u16 tid, u16 ssn) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + return; + + if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "BAR sent to %pM, tid %d, ssn %d", + addr, tid, ssn); +} + /* * Sets most of the Tx cmd's fields */ @@ -101,12 +125,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, } else if (ieee80211_is_back_req(fc)) { struct ieee80211_bar *bar = (void *)skb->data; u16 control = le16_to_cpu(bar->control); + u16 ssn = le16_to_cpu(bar->start_seq_num); tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; tx_cmd->tid_tspec = (control & IEEE80211_BAR_CTRL_TID_INFO_MASK) >> IEEE80211_BAR_CTRL_TID_INFO_SHIFT; WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); + iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, + ssn); } else { tx_cmd->tid_tspec = IWL_TID_NON_QOS; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) @@ -144,8 +171,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, !is_multicast_ether_addr(ieee80211_get_DA(hdr))) tx_flags |= TX_CMD_FLG_PROT_REQUIRE; - if ((mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && ieee80211_action_contains_tpc(skb)) tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index bc55a8b82db6..03f8e06dded7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -584,7 +584,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) struct iwl_error_event_table table; u32 base; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) { + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) { iwl_mvm_dump_nic_error_log_old(mvm); return; } diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index b18569734922..2ed1e4d2774d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -629,7 +629,18 @@ static int iwl_pci_resume(struct device *device) if (!trans->op_mode) return 0; - iwl_enable_rfkill_int(trans); + /* + * On suspend, ict is disabled, and the interrupt mask + * gets cleared. Reconfigure them both in case of d0i3 + * image. Otherwise, only enable rfkill interrupt (in + * order to keep track of the rfkill status) + */ + if (trans->wowlan_d0i3) { + iwl_pcie_reset_ict(trans); + iwl_enable_interrupts(trans); + } else { + iwl_enable_rfkill_int(trans); + } hw_rfkill = iwl_is_rfkill_set(trans); iwl_trans_pcie_rf_kill(trans, hw_rfkill); diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 01996c9d98a7..31f72a61cc3f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -1,7 +1,7 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -44,6 +44,15 @@ #include "iwl-io.h" #include "iwl-op-mode.h" +/* + * RX related structures and functions + */ +#define RX_NUM_QUEUES 1 +#define RX_POST_REQ_ALLOC 2 +#define RX_CLAIM_REQ_ALLOC 8 +#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) +#define RX_LOW_WATERMARK 8 + struct iwl_host_cmd; /*This file includes the declaration that are internal to the @@ -77,29 +86,29 @@ struct isr_statistics { * struct iwl_rxq - Rx queue * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) - * @pool: - * @queue: * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free + * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: - * @rx_free: list of free SKBs for use - * @rx_used: List of Rx buffers with no SKB + * @rx_free: list of RBDs with allocated RB ready for use + * @rx_used: list of RBDs with no RB attached * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: + * @pool: initial pool of iwl_rx_mem_buffer for the queue + * @queue: actual rx queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { __le32 *bd; dma_addr_t bd_dma; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; u32 read; u32 write; u32 free_count; + u32 used_count; u32 write_actual; struct list_head rx_free; struct list_head rx_used; @@ -107,6 +116,32 @@ struct iwl_rxq { struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; +}; + +/** + * struct iwl_rb_allocator - Rx allocator + * @pool: initial pool of allocator + * @req_pending: number of requests the allcator had not processed yet + * @req_ready: number of requests honored and ready for claiming + * @rbd_allocated: RBDs with pages allocated and ready to be handled to + * the queue. This is a list of &struct iwl_rx_mem_buffer + * @rbd_empty: RBDs with no page attached for allocator use. This is a list + * of &struct iwl_rx_mem_buffer + * @lock: protects the rbd_allocated and rbd_empty lists + * @alloc_wq: work queue for background calls + * @rx_alloc: work struct for background calls + */ +struct iwl_rb_allocator { + struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; + atomic_t req_pending; + atomic_t req_ready; + struct list_head rbd_allocated; + struct list_head rbd_empty; + spinlock_t lock; + struct workqueue_struct *alloc_wq; + struct work_struct rx_alloc; }; struct iwl_dma_ptr { @@ -250,7 +285,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data - * @rx_replenish: work that will be called when buffers need to be allocated + * @rba: allocator for RX replenishing * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM @@ -273,7 +308,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) */ struct iwl_trans_pcie { struct iwl_rxq rxq; - struct work_struct rx_replenish; + struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; @@ -320,7 +355,7 @@ struct iwl_trans_pcie { /*protect hw register */ spinlock_t reg_lock; - bool cmd_in_flight; + bool cmd_hold_nic_awake; bool ref_cmd_in_flight; /* protect ref counter */ diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 7ff69c642103..a3fbaa0ef5e0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -74,16 +74,29 @@ * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ INDEX is updated (updating the - * 'processed' and 'read' driver indexes as well) + * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. + * When the interrupt handler is called, the request is processed. + * The page is either stolen - transferred to the upper layer + * or reused - added immediately to the iwl->rxq->rx_free list. + * + When the page is stolen - the driver updates the matching queue's used + * count, detaches the RBD and transfers it to the queue used list. + * When there are two used RBDs - they are transferred to the allocator empty + * list. Work is then scheduled for the allocator to start allocating + * eight buffers. + * When there are another 6 used RBDs - they are transferred to the allocator + * empty list and the driver tries to claim the pre-allocated buffers and + * add them to iwl->rxq->rx_free. If it fails - it continues to claim them + * until ready. + * When there are 8+ buffers in the free list - either from allocation or from + * 8 reused unstolen pages - restock is called to update the FW and indexes. + * + In order to make sure the allocator always has RBDs to use for allocation + * the allocator has initial pool in the size of num_queues*(8-2) - the + * maximum missing RBDs per allocation request (request posted with 2 + * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). + * The queues supplies the recycle of the rest of the RBDs. * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at irq thread time from the - * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, + * + If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * @@ -92,18 +105,32 @@ * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_pcie_rxq_restock + * iwl_pcie_rxq_restock. + * Used only during initialization. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates - * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl_pcie_rx_replenish + * the WRITE index. + * iwl_pcie_rx_allocator() Background work for allocating pages. * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. + * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. + * + * RBD life-cycle: + * + * Init: + * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue + * + * Regular Receive interrupt: + * Page Stolen: + * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> + * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue + * Page not Stolen: + * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ @@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) rxq->free_count--; } spin_unlock(&rxq->lock); - /* If the pre-allocated buffer pool is dropping low, schedule to - * refill it */ - if (rxq->free_count <= RX_LOW_WATERMARK) - schedule_work(&trans_pcie->rx_replenish); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ @@ -254,6 +277,44 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) } } +/* + * iwl_pcie_rx_alloc_page - allocates and returns a page. + * + */ +static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct page *page; + gfp_t gfp_mask = GFP_KERNEL; + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (trans_pcie->rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", + trans_pcie->rx_page_order); + /* Issue an error if the hardware has consumed more than half + * of its free buffer list and we don't have enough + * pre-allocated buffers. +` */ + if (rxq->free_count <= RX_LOW_WATERMARK && + iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && + net_ratelimit()) + IWL_CRIT(trans, + "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", + rxq->free_count); + return NULL; + } + return page; +} + /* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * @@ -263,13 +324,12 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; - gfp_t gfp_mask = priority; while (1) { spin_lock(&rxq->lock); @@ -279,32 +339,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) } spin_unlock(&rxq->lock); - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (trans_pcie->rx_page_order > 0) - gfp_mask |= __GFP_COMP; - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(trans, "alloc_pages failed, " - "order: %d\n", - trans_pcie->rx_page_order); - - if ((rxq->free_count <= RX_LOW_WATERMARK) && - net_ratelimit()) - IWL_CRIT(trans, "Failed to alloc_pages with %s." - "Only %u free buffers remaining.\n", - priority == GFP_ATOMIC ? - "GFP_ATOMIC" : "GFP_KERNEL", - rxq->free_count); - /* We don't reschedule replenish work here -- we will - * call the restock method and if it still needs - * more buffers it will schedule replenish */ + page = iwl_pcie_rx_alloc_page(trans); + if (!page) return; - } spin_lock(&rxq->lock); @@ -355,7 +393,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) lockdep_assert_held(&rxq->lock); - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + for (i = 0; i < RX_QUEUE_SIZE; i++) { if (!rxq->pool[i].page) continue; dma_unmap_page(trans->dev, rxq->pool[i].page_dma, @@ -372,32 +410,144 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) * When moving to rx_free an page is allocated for the slot. * * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called as a scheduled work item (except for during initialization) + * This is called only during initialization */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) +static void iwl_pcie_rx_replenish(struct iwl_trans *trans) { - iwl_pcie_rxq_alloc_rbs(trans, gfp); + iwl_pcie_rxq_alloc_rbs(trans); iwl_pcie_rxq_restock(trans); } -static void iwl_pcie_rx_replenish_work(struct work_struct *data) +/* + * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues + * + * Allocates for each received request 8 pages + * Called as a scheduled work item. + */ +static void iwl_pcie_rx_allocator(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + + while (atomic_read(&rba->req_pending)) { + int i; + struct list_head local_empty; + struct list_head local_allocated; + + INIT_LIST_HEAD(&local_allocated); + spin_lock(&rba->lock); + /* swap out the entire rba->rbd_empty to a local list */ + list_replace_init(&rba->rbd_empty, &local_empty); + spin_unlock(&rba->lock); + + for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { + struct iwl_rx_mem_buffer *rxb; + struct page *page; + + /* List should never be empty - each reused RBD is + * returned to the list, and initial pool covers any + * possible gap between the time the page is allocated + * to the time the RBD is added. + */ + BUG_ON(list_empty(&local_empty)); + /* Get the first rxb from the rbd list */ + rxb = list_first_entry(&local_empty, + struct iwl_rx_mem_buffer, list); + BUG_ON(rxb->page); + + /* Alloc a new receive buffer */ + page = iwl_pcie_rx_alloc_page(trans); + if (!page) + continue; + rxb->page = page; + + /* Get physical address of the RB */ + rxb->page_dma = dma_map_page(trans->dev, page, 0, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + __free_pages(page, trans_pcie->rx_page_order); + continue; + } + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + /* move the allocated entry to the out list */ + list_move(&rxb->list, &local_allocated); + i++; + } + + spin_lock(&rba->lock); + /* add the allocated rbds to the allocator allocated list */ + list_splice_tail(&local_allocated, &rba->rbd_allocated); + /* add the unused rbds back to the allocator empty list */ + list_splice_tail(&local_empty, &rba->rbd_empty); + spin_unlock(&rba->lock); + + atomic_dec(&rba->req_pending); + atomic_inc(&rba->req_ready); + } +} + +/* + * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages +.* +.* Called by queue when the queue posted allocation request and + * has freed 8 RBDs in order to restock itself. + */ +static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, + struct iwl_rx_mem_buffer + *out[RX_CLAIM_REQ_ALLOC]) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + if (atomic_dec_return(&rba->req_ready) < 0) { + atomic_inc(&rba->req_ready); + IWL_DEBUG_RX(trans, + "Allocation request not ready, pending requests = %d\n", + atomic_read(&rba->req_pending)); + return -ENOMEM; + } + + spin_lock(&rba->lock); + for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { + /* Get next free Rx buffer, remove it from free list */ + out[i] = list_first_entry(&rba->rbd_allocated, + struct iwl_rx_mem_buffer, list); + list_del(&out[i]->list); + } + spin_unlock(&rba->lock); + + return 0; +} + +static void iwl_pcie_rx_allocator_work(struct work_struct *data) { + struct iwl_rb_allocator *rba_p = + container_of(data, struct iwl_rb_allocator, rx_alloc); struct iwl_trans_pcie *trans_pcie = - container_of(data, struct iwl_trans_pcie, rx_replenish); + container_of(rba_p, struct iwl_trans_pcie, rba); - iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); + iwl_pcie_rx_allocator(trans_pcie->trans); } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); spin_lock_init(&rxq->lock); + spin_lock_init(&rba->lock); if (WARN_ON(rxq->bd || rxq->rb_stts)) return -EINVAL; @@ -487,15 +637,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; + rxq->used_count = 0; - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + for (i = 0; i < RX_QUEUE_SIZE; i++) list_add(&rxq->pool[i].list, &rxq->rx_used); } +static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) +{ + int i; + + lockdep_assert_held(&rba->lock); + + INIT_LIST_HEAD(&rba->rbd_allocated); + INIT_LIST_HEAD(&rba->rbd_empty); + + for (i = 0; i < RX_POOL_SIZE; i++) + list_add(&rba->pool[i].list, &rba->rbd_empty); +} + +static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + lockdep_assert_held(&rba->lock); + + for (i = 0; i < RX_POOL_SIZE; i++) { + if (!rba->pool[i].page) + continue; + dma_unmap_page(trans->dev, rba->pool[i].page_dma, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); + rba->pool[i].page = NULL; + } +} + int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err; if (!rxq->bd) { @@ -503,11 +687,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (err) return err; } + if (!rba->alloc_wq) + rba->alloc_wq = alloc_workqueue("rb_allocator", + WQ_HIGHPRI | WQ_UNBOUND, 1); + INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); + + spin_lock(&rba->lock); + atomic_set(&rba->req_pending, 0); + atomic_set(&rba->req_ready, 0); + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_rx_free_rba(trans); + iwl_pcie_rx_init_rba(rba); + spin_unlock(&rba->lock); spin_lock(&rxq->lock); - INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); - /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rx_init_rxb_lists(rxq); @@ -522,7 +716,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans, GFP_KERNEL); + iwl_pcie_rx_replenish(trans); iwl_pcie_rx_hw_init(trans, rxq); @@ -537,6 +731,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ @@ -545,7 +740,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) return; } - cancel_work_sync(&trans_pcie->rx_replenish); + cancel_work_sync(&rba->rx_alloc); + if (rba->alloc_wq) { + destroy_workqueue(rba->alloc_wq); + rba->alloc_wq = NULL; + } + + spin_lock(&rba->lock); + iwl_pcie_rx_free_rba(trans); + spin_unlock(&rba->lock); spin_lock(&rxq->lock); iwl_pcie_rxq_free_rbs(trans); @@ -566,6 +769,43 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rxq->rb_stts = NULL; } +/* + * iwl_pcie_rx_reuse_rbd - Recycle used RBDs + * + * Called when a RBD can be reused. The RBD is transferred to the allocator. + * When there are 2 empty RBDs - a request for allocation is posted + */ +static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, + struct iwl_rx_mem_buffer *rxb, + struct iwl_rxq *rxq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + + /* Count the used RBDs */ + rxq->used_count++; + + /* Move the RBD to the used list, will be moved to allocator in batches + * before claiming or posting a request*/ + list_add_tail(&rxb->list, &rxq->rx_used); + + /* If we have RX_POST_REQ_ALLOC new released rx buffers - + * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is + * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, + * after but we still need to post another request. + */ + if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { + /* Move the 2 RBDs to the allocator ownership. + Allocator has another 6 from pool for the request completion*/ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + + atomic_inc(&rba->req_pending); + queue_work(rba->alloc_wq, &rba->rx_alloc); + } +} + static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { @@ -688,13 +928,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; - list_add_tail(&rxb->list, &rxq->rx_used); + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else - list_add_tail(&rxb->list, &rxq->rx_used); + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); } /* @@ -704,10 +944,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 r, i; - u8 fill_rx = 0; - u32 count = 8; - int total_empty; + u32 r, i, j; restart: spin_lock(&rxq->lock); @@ -720,14 +957,6 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) if (i == r) IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); - /* calculate total frames need to be restock after handling RX */ - total_empty = r - rxq->write_actual; - if (total_empty < 0) - total_empty += RX_QUEUE_SIZE; - - if (total_empty > (RX_QUEUE_SIZE / 2)) - fill_rx = 1; - while (i != r) { struct iwl_rx_mem_buffer *rxb; @@ -739,29 +968,48 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) iwl_pcie_rx_handle_rb(trans, rxb); i = (i + 1) & RX_QUEUE_MASK; - /* If there are a lot of unused frames, - * restock the Rx queue so ucode wont assert. */ - if (fill_rx) { - count++; - if (count >= 8) { - rxq->read = i; - spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans, GFP_ATOMIC); - count = 0; - goto restart; + + /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - + * try to claim the pre-allocated buffers from the allocator */ + if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; + + /* Add the remaining 6 empty RBDs for allocator use */ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + + /* If not ready - continue, will try to reclaim later. + * No need to reschedule work - allocator exits only on + * success */ + if (!iwl_pcie_rx_allocator_get(trans, out)) { + /* If success - then RX_CLAIM_REQ_ALLOC + * buffers were retrieved and should be added + * to free list */ + rxq->used_count -= RX_CLAIM_REQ_ALLOC; + for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { + list_add_tail(&out[j]->list, + &rxq->rx_free); + rxq->free_count++; + } } } + /* handle restock for two cases: + * - we just pulled buffers from the allocator + * - we have 8+ unstolen pages accumulated */ + if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { + rxq->read = i; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_restock(trans); + goto restart; + } } /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); - if (fill_rx) - iwl_pcie_rx_replenish(trans, GFP_ATOMIC); - else - iwl_pcie_rxq_restock(trans); - if (trans_pcie->napi.poll) napi_gro_flush(&trans_pcie->napi, false); } @@ -775,6 +1023,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && + !trans->cfg->apmg_not_supported && (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(trans, APMG_PS_CTRL_REG) & diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 4526336672c5..43ae658af6ec 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -182,6 +182,9 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { + if (!trans->cfg->apmg_not_supported) + return; + if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, @@ -315,7 +318,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) * bits do not disable clocks. This preserves any hardware * bits already set by default in "CLK_CTRL_REG" after reset. */ - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { + if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(20); @@ -515,8 +518,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) spin_unlock(&trans_pcie->irq_lock); - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) - iwl_pcie_set_pwr(trans, false); + iwl_pcie_set_pwr(trans, false); iwl_op_mode_nic_config(trans->op_mode); @@ -973,12 +975,8 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, return ret; /* load to FW the binary sections of CPU2 */ - ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2, - &first_ucode_section); - if (ret) - return ret; - - return 0; + return iwl_pcie_load_cpu_sections_8000(trans, image, 2, + &first_ucode_section); } static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, @@ -1067,9 +1065,11 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_rx_stop(trans); /* Power-down device's busmaster DMA clocks */ - iwl_write_prph(trans, APMG_CLK_DIS_REG, - APMG_CLK_VAL_DMA_CLK_RQT); - udelay(5); + if (!trans->cfg->apmg_not_supported) { + iwl_write_prph(trans, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + } } /* Make sure (redundant) we've released our request to stay awake */ @@ -1362,14 +1362,13 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iounmap(trans_pcie->hw_base); pci_release_regions(trans_pcie->pci_dev); pci_disable_device(trans_pcie->pci_dev); - kmem_cache_destroy(trans->dev_cmd_pool); if (trans_pcie->napi.poll) netif_napi_del(&trans_pcie->napi); iwl_pcie_free_fw_monitor(trans); - kfree(trans); + iwl_trans_free(trans); } static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) @@ -1388,7 +1387,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, spin_lock_irqsave(&trans_pcie->reg_lock, *flags); - if (trans_pcie->cmd_in_flight) + if (trans_pcie->cmd_hold_nic_awake) goto out; /* this bit wakes up the NIC */ @@ -1454,7 +1453,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, */ __acquire(&trans_pcie->reg_lock); - if (trans_pcie->cmd_in_flight) + if (trans_pcie->cmd_hold_nic_awake) goto out; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, @@ -2462,18 +2461,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, u16 pci_cmd; int err; - trans = kzalloc(sizeof(struct iwl_trans) + - sizeof(struct iwl_trans_pcie), GFP_KERNEL); - if (!trans) { - err = -ENOMEM; - goto out; - } + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), + &pdev->dev, cfg, &trans_ops_pcie, 0); + if (!trans) + return ERR_PTR(-ENOMEM); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - trans->ops = &trans_ops_pcie; - trans->cfg = cfg; - trans_lockdep_init(trans); trans_pcie->trans = trans; spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->reg_lock); @@ -2597,25 +2591,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); - snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), - "iwl_cmd_pool:%s", dev_name(trans->dev)); - - trans->dev_cmd_headroom = 0; - trans->dev_cmd_pool = - kmem_cache_create(trans->dev_cmd_pool_name, - sizeof(struct iwl_device_cmd) - + trans->dev_cmd_headroom, - sizeof(void *), - SLAB_HWCACHE_ALIGN, - NULL); - - if (!trans->dev_cmd_pool) { - err = -ENOMEM; - goto out_pci_disable_msi; - } - if (iwl_pcie_alloc_ict(trans)) - goto out_free_cmd_pool; + goto out_pci_disable_msi; err = request_threaded_irq(pdev->irq, iwl_pcie_isr, iwl_pcie_irq_handler, @@ -2632,8 +2609,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, out_free_ict: iwl_pcie_free_ict(trans); -out_free_cmd_pool: - kmem_cache_destroy(trans->dev_cmd_pool); out_pci_disable_msi: pci_disable_msi(pdev); out_pci_release_regions: @@ -2641,7 +2616,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, out_pci_disable_device: pci_disable_device(pdev); out_no_pci: - kfree(trans); -out: + iwl_trans_free(trans); return ERR_PTR(err); } diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 06952aadfd7b..2b86c2135de3 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1039,22 +1039,16 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, iwl_trans_pcie_ref(trans); } - if (trans_pcie->cmd_in_flight) - return 0; - - trans_pcie->cmd_in_flight = true; - /* * wake up the NIC to make sure that the firmware will see the host * command - we will let the NIC sleep once all the host commands * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set. */ - if (trans->cfg->base_params->apmg_wake_up_wa) { + if (trans->cfg->base_params->apmg_wake_up_wa && + !trans_pcie->cmd_hold_nic_awake) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - udelay(2); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, @@ -1064,10 +1058,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, if (ret < 0) { __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - trans_pcie->cmd_in_flight = false; IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); return -EIO; } + trans_pcie->cmd_hold_nic_awake = true; } return 0; @@ -1085,15 +1079,14 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) iwl_trans_pcie_unref(trans); } - if (WARN_ON(!trans_pcie->cmd_in_flight)) - return 0; - - trans_pcie->cmd_in_flight = false; + if (trans->cfg->base_params->apmg_wake_up_wa) { + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) + return 0; - if (trans->cfg->base_params->apmg_wake_up_wa) + trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + } return 0; } diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 1a4d558022d8..8317afd065b4 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -835,14 +835,13 @@ static int lbs_cfg_scan(struct wiphy *wiphy, * Events */ -void lbs_send_disconnect_notification(struct lbs_private *priv) +void lbs_send_disconnect_notification(struct lbs_private *priv, + bool locally_generated) { lbs_deb_enter(LBS_DEB_CFG80211); - cfg80211_disconnected(priv->dev, - 0, - NULL, 0, - GFP_KERNEL); + cfg80211_disconnected(priv->dev, 0, NULL, 0, locally_generated, + GFP_KERNEL); lbs_deb_leave(LBS_DEB_CFG80211); } @@ -1458,7 +1457,7 @@ int lbs_disconnect(struct lbs_private *priv, u16 reason) cfg80211_disconnected(priv->dev, reason, - NULL, 0, + NULL, 0, true, GFP_KERNEL); priv->connect_status = LBS_DISCONNECTED; @@ -2031,7 +2030,7 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev) ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd); /* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */ - lbs_mac_event_disconnected(priv); + lbs_mac_event_disconnected(priv, true); lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h index 10995f59fe34..acccc2922401 100644 --- a/drivers/net/wireless/libertas/cfg.h +++ b/drivers/net/wireless/libertas/cfg.h @@ -10,7 +10,8 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev); int lbs_cfg_register(struct lbs_private *priv); void lbs_cfg_free(struct lbs_private *priv); -void lbs_send_disconnect_notification(struct lbs_private *priv); +void lbs_send_disconnect_notification(struct lbs_private *priv, + bool locally_generated); void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); void lbs_scan_done(struct lbs_private *priv); diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h index 4279e8ab95f2..0c5444b02c64 100644 --- a/drivers/net/wireless/libertas/cmd.h +++ b/drivers/net/wireless/libertas/cmd.h @@ -68,7 +68,8 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len); /* From cmdresp.c */ -void lbs_mac_event_disconnected(struct lbs_private *priv); +void lbs_mac_event_disconnected(struct lbs_private *priv, + bool locally_generated); diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c index 65f18f1e869c..e5442e8956f7 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/libertas/cmdresp.c @@ -19,10 +19,13 @@ * reset link state etc. * * @priv: A pointer to struct lbs_private structure + * @locally_generated: indicates disconnect was requested locally + * (usually by userspace) * * returns: n/a */ -void lbs_mac_event_disconnected(struct lbs_private *priv) +void lbs_mac_event_disconnected(struct lbs_private *priv, + bool locally_generated) { if (priv->connect_status != LBS_CONNECTED) return; @@ -36,7 +39,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv) msleep_interruptible(1000); if (priv->wdev->iftype == NL80211_IFTYPE_STATION) - lbs_send_disconnect_notification(priv); + lbs_send_disconnect_notification(priv, locally_generated); /* report disconnect to upper layer */ netif_stop_queue(priv->dev); @@ -229,17 +232,17 @@ int lbs_process_event(struct lbs_private *priv, u32 event) case MACREG_INT_CODE_DEAUTHENTICATED: lbs_deb_cmd("EVENT: deauthenticated\n"); - lbs_mac_event_disconnected(priv); + lbs_mac_event_disconnected(priv, false); break; case MACREG_INT_CODE_DISASSOCIATED: lbs_deb_cmd("EVENT: disassociated\n"); - lbs_mac_event_disconnected(priv); + lbs_mac_event_disconnected(priv, false); break; case MACREG_INT_CODE_LINK_LOST_NO_SCAN: lbs_deb_cmd("EVENT: link lost\n"); - lbs_mac_event_disconnected(priv); + lbs_mac_event_disconnected(priv, true); break; case MACREG_INT_CODE_PS_SLEEP: diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig new file mode 100644 index 000000000000..cba300c6b5da --- /dev/null +++ b/drivers/net/wireless/mediatek/Kconfig @@ -0,0 +1,10 @@ +menuconfig WL_MEDIATEK + bool "Mediatek Wireless LAN support" + ---help--- + Enable community drivers for MediaTek WiFi devices. + Those drivers make use of the Linux mac80211 stack. + + +if WL_MEDIATEK +source "drivers/net/wireless/mediatek/mt7601u/Kconfig" +endif # WL_MEDIATEK diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile new file mode 100644 index 000000000000..9d5f182fd7fd --- /dev/null +++ b/drivers/net/wireless/mediatek/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MT7601U) += mt7601u/ diff --git a/drivers/net/wireless/mediatek/mt7601u/Kconfig b/drivers/net/wireless/mediatek/mt7601u/Kconfig new file mode 100644 index 000000000000..f46bed92796b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/Kconfig @@ -0,0 +1,6 @@ +config MT7601U + tristate "MediaTek MT7601U (USB) support" + depends on MAC80211 + depends on USB + ---help--- + This adds support for MT7601U-based wireless USB dongles. diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile new file mode 100644 index 000000000000..ea9ed8a5db4d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/Makefile @@ -0,0 +1,9 @@ +ccflags-y += -D__CHECK_ENDIAN__ + +obj-$(CONFIG_MT7601U) += mt7601u.o + +mt7601u-objs = \ + usb.o init.o main.o mcu.o trace.o dma.o core.o eeprom.o phy.o \ + mac.o util.o debugfs.o tx.o + +CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt7601u/core.c b/drivers/net/wireless/mediatek/mt7601u/core.c new file mode 100644 index 000000000000..0aabd790f985 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/core.c @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" + +int mt7601u_wait_asic_ready(struct mt7601u_dev *dev) +{ + int i = 100; + u32 val; + + do { + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return -EIO; + + val = mt7601u_rr(dev, MT_MAC_CSR0); + if (val && ~val) + return 0; + + udelay(10); + } while (i--); + + return -EIO; +} + +bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val, + int timeout) +{ + u32 cur; + + timeout /= 10; + do { + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return false; + + cur = mt7601u_rr(dev, offset) & mask; + if (cur == val) + return true; + + udelay(10); + } while (timeout-- > 0); + + dev_err(dev->dev, "Error: Time out with reg %08x\n", offset); + + return false; +} + +bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val, + int timeout) +{ + u32 cur; + + timeout /= 10; + do { + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return false; + + cur = mt7601u_rr(dev, offset) & mask; + if (cur == val) + return true; + + msleep(10); + } while (timeout-- > 0); + + dev_err(dev->dev, "Error: Time out with reg %08x\n", offset); + + return false; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c new file mode 100644 index 000000000000..fc008475a03b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#include "mt7601u.h" +#include "eeprom.h" + +static int +mt76_reg_set(void *data, u64 val) +{ + struct mt7601u_dev *dev = data; + + mt76_wr(dev, dev->debugfs_reg, val); + return 0; +} + +static int +mt76_reg_get(void *data, u64 *val) +{ + struct mt7601u_dev *dev = data; + + *val = mt76_rr(dev, dev->debugfs_reg); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n"); + +static int +mt7601u_ampdu_stat_read(struct seq_file *file, void *data) +{ + struct mt7601u_dev *dev = file->private; + int i, j; + +#define stat_printf(grp, off, name) \ + seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off]) + + stat_printf(rx_stat, 0, rx_crc_err); + stat_printf(rx_stat, 1, rx_phy_err); + stat_printf(rx_stat, 2, rx_false_cca); + stat_printf(rx_stat, 3, rx_plcp_err); + stat_printf(rx_stat, 4, rx_fifo_overflow); + stat_printf(rx_stat, 5, rx_duplicate); + + stat_printf(tx_stat, 0, tx_fail_cnt); + stat_printf(tx_stat, 1, tx_bcn_cnt); + stat_printf(tx_stat, 2, tx_success); + stat_printf(tx_stat, 3, tx_retransmit); + stat_printf(tx_stat, 4, tx_zero_len); + stat_printf(tx_stat, 5, tx_underflow); + + stat_printf(aggr_stat, 0, non_aggr_tx); + stat_printf(aggr_stat, 1, aggr_tx); + + stat_printf(zero_len_del, 0, tx_zero_len_del); + stat_printf(zero_len_del, 1, rx_zero_len_del); +#undef stat_printf + + seq_puts(file, "Aggregations stats:\n"); + for (i = 0; i < 4; i++) { + for (j = 0; j < 8; j++) + seq_printf(file, "%08llx ", + dev->stats.aggr_n[i * 8 + j]); + seq_putc(file, '\n'); + } + + seq_printf(file, "recent average AMPDU len: %d\n", + atomic_read(&dev->avg_ampdu_len)); + + return 0; +} + +static int +mt7601u_ampdu_stat_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt7601u_ampdu_stat_read, inode->i_private); +} + +static const struct file_operations fops_ampdu_stat = { + .open = mt7601u_ampdu_stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int +mt7601u_eeprom_param_read(struct seq_file *file, void *data) +{ + struct mt7601u_dev *dev = file->private; + struct mt7601u_rate_power *rp = &dev->ee->power_rate_table; + struct tssi_data *td = &dev->ee->tssi_data; + int i; + + seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off); + seq_printf(file, "RSSI offset: %hhx %hhx\n", + dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]); + seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp); + seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain); + seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start, + dev->ee->reg.start + dev->ee->reg.num - 1); + + seq_puts(file, "Per rate power:\n"); + for (i = 0; i < 2; i++) + seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n", + rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40); + for (i = 0; i < 4; i++) + seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n", + rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40); + for (i = 0; i < 4; i++) + seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n", + rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40); + + seq_puts(file, "Per channel power:\n"); + for (i = 0; i < 7; i++) + seq_printf(file, "\t tx_power ch%u:%02hhx ch%u:%02hhx\n", + i * 2 + 1, dev->ee->chan_pwr[i * 2], + i * 2 + 2, dev->ee->chan_pwr[i * 2 + 1]); + + if (!dev->ee->tssi_enabled) + return 0; + + seq_puts(file, "TSSI:\n"); + seq_printf(file, "\t slope:%02hhx\n", td->slope); + seq_printf(file, "\t offset=%02hhx %02hhx %02hhx\n", + td->offset[0], td->offset[1], td->offset[2]); + seq_printf(file, "\t delta_off:%08x\n", td->tx0_delta_offset); + + return 0; +} + +static int +mt7601u_eeprom_param_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt7601u_eeprom_param_read, inode->i_private); +} + +static const struct file_operations fops_eeprom_param = { + .open = mt7601u_eeprom_param_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void mt7601u_init_debugfs(struct mt7601u_dev *dev) +{ + struct dentry *dir; + + dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir); + if (!dir) + return; + + debugfs_create_u8("temperature", S_IRUSR, dir, &dev->raw_temp); + debugfs_create_u32("temp_mode", S_IRUSR, dir, &dev->temp_mode); + + debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg); + debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev, + &fops_regval); + debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat); + debugfs_create_file("eeprom_param", S_IRUSR, dir, dev, + &fops_eeprom_param); +} diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c new file mode 100644 index 000000000000..9c9e1288644b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -0,0 +1,533 @@ +/* + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "dma.h" +#include "usb.h" +#include "trace.h" + +static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev, + struct mt7601u_dma_buf_rx *e, gfp_t gfp); + +static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len) +{ + const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data; + unsigned int hdrlen; + + if (unlikely(len < 10)) + return 0; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (unlikely(hdrlen > len)) + return 0; + return hdrlen; +} + +static struct sk_buff * +mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, + u8 *data, u32 seg_len) +{ + struct sk_buff *skb; + u32 true_len; + + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) + seg_len -= 2; + + skb = alloc_skb(seg_len, GFP_ATOMIC); + if (!skb) + return NULL; + + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) { + int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len); + + memcpy(skb_put(skb, hdr_len), data, hdr_len); + data += hdr_len + 2; + seg_len -= hdr_len; + } + + memcpy(skb_put(skb, seg_len), data, seg_len); + + true_len = mt76_mac_process_rx(dev, skb, skb->data, rxwi); + skb_trim(skb, true_len); + + return skb; +} + +static struct sk_buff * +mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev *dev, + struct mt7601u_rxwi *rxwi, void *data, + u32 seg_len, u32 truesize, struct page *p) +{ + unsigned int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len); + unsigned int true_len, copy, frag; + struct sk_buff *skb; + + skb = alloc_skb(128, GFP_ATOMIC); + if (!skb) + return NULL; + + true_len = mt76_mac_process_rx(dev, skb, data, rxwi); + + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) { + memcpy(skb_put(skb, hdr_len), data, hdr_len); + data += hdr_len + 2; + true_len -= hdr_len; + hdr_len = 0; + } + + copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8; + frag = true_len - copy; + + memcpy(skb_put(skb, copy), data, copy); + data += copy; + + if (frag) { + skb_add_rx_frag(skb, 0, p, data - page_address(p), + frag, truesize); + get_page(p); + } + + return skb; +} + +static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, + u32 seg_len, struct page *p, bool paged) +{ + struct sk_buff *skb; + struct mt7601u_rxwi *rxwi; + u32 fce_info, truesize = seg_len; + + /* DMA_INFO field at the beginning of the segment contains only some of + * the information, we need to read the FCE descriptor from the end. + */ + fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN); + seg_len -= MT_FCE_INFO_LEN; + + data += MT_DMA_HDR_LEN; + seg_len -= MT_DMA_HDR_LEN; + + rxwi = (struct mt7601u_rxwi *) data; + data += sizeof(struct mt7601u_rxwi); + seg_len -= sizeof(struct mt7601u_rxwi); + + if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2])) + dev_err_once(dev->dev, "Error: RXWI zero fields are set\n"); + if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info))) + dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n"); + + trace_mt_rx(dev, rxwi, fce_info); + + if (paged) + skb = mt7601u_rx_skb_from_seg_paged(dev, rxwi, data, seg_len, + truesize, p); + else + skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len); + if (!skb) + return; + + ieee80211_rx_ni(dev->hw, skb); +} + +static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len) +{ + u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN + + sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN; + u16 dma_len = get_unaligned_le16(data); + + if (data_len < min_seg_len || + WARN_ON(!dma_len) || + WARN_ON(dma_len + MT_DMA_HDRS > data_len) || + WARN_ON(dma_len & 0x3)) + return 0; + + return MT_DMA_HDRS + dma_len; +} + +static void +mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) +{ + u32 seg_len, data_len = e->urb->actual_length; + u8 *data = page_address(e->p); + struct page *new_p = NULL; + bool paged = true; + int cnt = 0; + + if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state)) + return; + + /* Copy if there is very little data in the buffer. */ + if (data_len < 512) { + paged = false; + } else { + new_p = dev_alloc_pages(MT_RX_ORDER); + if (!new_p) + paged = false; + } + + while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) { + mt7601u_rx_process_seg(dev, data, seg_len, e->p, paged); + + data_len -= seg_len; + data += seg_len; + cnt++; + } + + if (cnt > 1) + trace_mt_rx_dma_aggr(dev, cnt, paged); + + if (paged) { + /* we have one extra ref from the allocator */ + __free_pages(e->p, MT_RX_ORDER); + + e->p = new_p; + } +} + +static struct mt7601u_dma_buf_rx * +mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev) +{ + struct mt7601u_rx_queue *q = &dev->rx_q; + struct mt7601u_dma_buf_rx *buf = NULL; + unsigned long flags; + + spin_lock_irqsave(&dev->rx_lock, flags); + + if (!q->pending) + goto out; + + buf = &q->e[q->start]; + q->pending--; + q->start = (q->start + 1) % q->entries; +out: + spin_unlock_irqrestore(&dev->rx_lock, flags); + + return buf; +} + +static void mt7601u_complete_rx(struct urb *urb) +{ + struct mt7601u_dev *dev = urb->context; + struct mt7601u_rx_queue *q = &dev->rx_q; + unsigned long flags; + + spin_lock_irqsave(&dev->rx_lock, flags); + + if (mt7601u_urb_has_error(urb)) + dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status); + if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch")) + goto out; + + q->end = (q->end + 1) % q->entries; + q->pending++; + tasklet_schedule(&dev->rx_tasklet); +out: + spin_unlock_irqrestore(&dev->rx_lock, flags); +} + +static void mt7601u_rx_tasklet(unsigned long data) +{ + struct mt7601u_dev *dev = (struct mt7601u_dev *) data; + struct mt7601u_dma_buf_rx *e; + + while ((e = mt7601u_rx_get_pending_entry(dev))) { + if (e->urb->status) + continue; + + mt7601u_rx_process_entry(dev, e); + mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC); + } +} + +static void mt7601u_complete_tx(struct urb *urb) +{ + struct mt7601u_tx_queue *q = urb->context; + struct mt7601u_dev *dev = q->dev; + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&dev->tx_lock, flags); + + if (mt7601u_urb_has_error(urb)) + dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status); + if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch")) + goto out; + + skb = q->e[q->start].skb; + trace_mt_tx_dma_done(dev, skb); + + mt7601u_tx_status(dev, skb); + + if (q->used == q->entries - q->entries / 8) + ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb)); + + q->start = (q->start + 1) % q->entries; + q->used--; + + if (urb->status) + goto out; + + set_bit(MT7601U_STATE_MORE_STATS, &dev->state); + if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state)) + queue_delayed_work(dev->stat_wq, &dev->stat_work, + msecs_to_jiffies(10)); +out: + spin_unlock_irqrestore(&dev->tx_lock, flags); +} + +static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, + struct sk_buff *skb, u8 ep) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]); + struct mt7601u_dma_buf_tx *e; + struct mt7601u_tx_queue *q = &dev->tx_q[ep]; + unsigned long flags; + int ret; + + spin_lock_irqsave(&dev->tx_lock, flags); + + if (WARN_ON(q->entries <= q->used)) { + ret = -ENOSPC; + goto out; + } + + e = &q->e[q->end]; + e->skb = skb; + usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, + mt7601u_complete_tx, q); + ret = usb_submit_urb(e->urb, GFP_ATOMIC); + if (ret) { + /* Special-handle ENODEV from TX urb submission because it will + * often be the first ENODEV we see after device is removed. + */ + if (ret == -ENODEV) + set_bit(MT7601U_STATE_REMOVED, &dev->state); + else + dev_err(dev->dev, "Error: TX urb submit failed:%d\n", + ret); + goto out; + } + + q->end = (q->end + 1) % q->entries; + q->used++; + + if (q->used >= q->entries) + ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); +out: + spin_unlock_irqrestore(&dev->tx_lock, flags); + + return ret; +} + +/* Map hardware Q to USB endpoint number */ +static u8 q2ep(u8 qid) +{ + /* TODO: take management packets to queue 5 */ + return qid + 1; +} + +/* Map USB endpoint number to Q id in the DMA engine */ +static enum mt76_qsel ep2dmaq(u8 ep) +{ + if (ep == 5) + return MT_QSEL_MGMT; + return MT_QSEL_EDCA; +} + +int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb, + struct mt76_wcid *wcid, int hw_q) +{ + u8 ep = q2ep(hw_q); + u32 dma_flags; + int ret; + + dma_flags = MT_TXD_PKT_INFO_80211; + if (wcid->hw_key_idx == 0xff) + dma_flags |= MT_TXD_PKT_INFO_WIV; + + ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags); + if (ret) + return ret; + + ret = mt7601u_dma_submit_tx(dev, skb, ep); + if (ret) { + ieee80211_free_txskb(dev->hw, skb); + return ret; + } + + return 0; +} + +static void mt7601u_kill_rx(struct mt7601u_dev *dev) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&dev->rx_lock, flags); + + for (i = 0; i < dev->rx_q.entries; i++) { + int next = dev->rx_q.end; + + spin_unlock_irqrestore(&dev->rx_lock, flags); + usb_poison_urb(dev->rx_q.e[next].urb); + spin_lock_irqsave(&dev->rx_lock, flags); + } + + spin_unlock_irqrestore(&dev->rx_lock, flags); +} + +static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev, + struct mt7601u_dma_buf_rx *e, gfp_t gfp) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + u8 *buf = page_address(e->p); + unsigned pipe; + int ret; + + pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]); + + usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE, + mt7601u_complete_rx, dev); + + trace_mt_submit_urb(dev, e->urb); + ret = usb_submit_urb(e->urb, gfp); + if (ret) + dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret); + + return ret; +} + +static int mt7601u_submit_rx(struct mt7601u_dev *dev) +{ + int i, ret; + + for (i = 0; i < dev->rx_q.entries; i++) { + ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL); + if (ret) + return ret; + } + + return 0; +} + +static void mt7601u_free_rx(struct mt7601u_dev *dev) +{ + int i; + + for (i = 0; i < dev->rx_q.entries; i++) { + __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER); + usb_free_urb(dev->rx_q.e[i].urb); + } +} + +static int mt7601u_alloc_rx(struct mt7601u_dev *dev) +{ + int i; + + memset(&dev->rx_q, 0, sizeof(dev->rx_q)); + dev->rx_q.dev = dev; + dev->rx_q.entries = N_RX_ENTRIES; + + for (i = 0; i < N_RX_ENTRIES; i++) { + dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL); + dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER); + + if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p) + return -ENOMEM; + } + + return 0; +} + +static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q) +{ + int i; + + WARN_ON(q->used); + + for (i = 0; i < q->entries; i++) { + usb_poison_urb(q->e[i].urb); + usb_free_urb(q->e[i].urb); + } +} + +static void mt7601u_free_tx(struct mt7601u_dev *dev) +{ + int i; + + for (i = 0; i < __MT_EP_OUT_MAX; i++) + mt7601u_free_tx_queue(&dev->tx_q[i]); +} + +static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev, + struct mt7601u_tx_queue *q) +{ + int i; + + q->dev = dev; + q->entries = N_TX_ENTRIES; + + for (i = 0; i < N_TX_ENTRIES; i++) { + q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL); + if (!q->e[i].urb) + return -ENOMEM; + } + + return 0; +} + +static int mt7601u_alloc_tx(struct mt7601u_dev *dev) +{ + int i; + + dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX, + sizeof(*dev->tx_q), GFP_KERNEL); + + for (i = 0; i < __MT_EP_OUT_MAX; i++) + if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i])) + return -ENOMEM; + + return 0; +} + +int mt7601u_dma_init(struct mt7601u_dev *dev) +{ + int ret = -ENOMEM; + + tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev); + + ret = mt7601u_alloc_tx(dev); + if (ret) + goto err; + ret = mt7601u_alloc_rx(dev); + if (ret) + goto err; + + ret = mt7601u_submit_rx(dev); + if (ret) + goto err; + + return 0; +err: + mt7601u_dma_cleanup(dev); + return ret; +} + +void mt7601u_dma_cleanup(struct mt7601u_dev *dev) +{ + mt7601u_kill_rx(dev); + + tasklet_kill(&dev->rx_tasklet); + + mt7601u_free_rx(dev); + mt7601u_free_tx(dev); +} diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h new file mode 100644 index 000000000000..978e8a90b87f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/dma.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_DMA_H +#define __MT7601U_DMA_H + +#include +#include + +#include "util.h" + +#define MT_DMA_HDR_LEN 4 +#define MT_RX_INFO_LEN 4 +#define MT_FCE_INFO_LEN 4 +#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN) + +/* Common Tx DMA descriptor fields */ +#define MT_TXD_INFO_LEN GENMASK(15, 0) +#define MT_TXD_INFO_D_PORT GENMASK(29, 27) +#define MT_TXD_INFO_TYPE GENMASK(31, 30) + +enum mt76_msg_port { + WLAN_PORT, + CPU_RX_PORT, + CPU_TX_PORT, + HOST_PORT, + VIRTUAL_CPU_RX_PORT, + VIRTUAL_CPU_TX_PORT, + DISCARD, +}; + +enum mt76_info_type { + DMA_PACKET, + DMA_COMMAND, +}; + +/* Tx DMA packet specific flags */ +#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16) +#define MT_TXD_PKT_INFO_TX_BURST BIT(17) +#define MT_TXD_PKT_INFO_80211 BIT(19) +#define MT_TXD_PKT_INFO_TSO BIT(20) +#define MT_TXD_PKT_INFO_CSO BIT(21) +#define MT_TXD_PKT_INFO_WIV BIT(24) +#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25) + +enum mt76_qsel { + MT_QSEL_MGMT, + MT_QSEL_HCCA, + MT_QSEL_EDCA, + MT_QSEL_EDCA_2, +}; + +/* Tx DMA MCU command specific flags */ +#define MT_TXD_CMD_INFO_SEQ GENMASK(19, 16) +#define MT_TXD_CMD_INFO_TYPE GENMASK(26, 20) + +static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb, + enum mt76_msg_port d_port, + enum mt76_info_type type, u32 flags) +{ + u32 info; + + /* Buffer layout: + * | 4B | xfer len | pad | 4B | + * | TXINFO | pkt/cmd | zero pad to 4B | zero | + * + * length field of TXINFO should be set to 'xfer len'. + */ + + info = flags | + MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | + MT76_SET(MT_TXD_INFO_D_PORT, d_port) | + MT76_SET(MT_TXD_INFO_TYPE, type); + + put_unaligned_le32(info, skb_push(skb, sizeof(info))); + return skb_put_padto(skb, round_up(skb->len, 4) + 4); +} + +static inline int +mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags) +{ + flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel); + return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags); +} + +/* Common Rx DMA descriptor fields */ +#define MT_RXD_INFO_LEN GENMASK(13, 0) +#define MT_RXD_INFO_PCIE_INTR BIT(24) +#define MT_RXD_INFO_QSEL GENMASK(26, 25) +#define MT_RXD_INFO_PORT GENMASK(29, 27) +#define MT_RXD_INFO_TYPE GENMASK(31, 30) + +/* Rx DMA packet specific flags */ +#define MT_RXD_PKT_INFO_UDP_ERR BIT(16) +#define MT_RXD_PKT_INFO_TCP_ERR BIT(17) +#define MT_RXD_PKT_INFO_IP_ERR BIT(18) +#define MT_RXD_PKT_INFO_PKT_80211 BIT(19) +#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20) +#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21) + +/* Rx DMA MCU command specific flags */ +#define MT_RXD_CMD_INFO_SELF_GEN BIT(15) +#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16) +#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20) + +enum mt76_evt_type { + CMD_DONE, + CMD_ERROR, + CMD_RETRY, + EVENT_PWR_RSP, + EVENT_WOW_RSP, + EVENT_CARRIER_DETECT_RSP, + EVENT_DFS_DETECT_RSP, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c new file mode 100644 index 000000000000..ce3837f270f0 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c @@ -0,0 +1,414 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "mt7601u.h" +#include "eeprom.h" + +static bool +field_valid(u8 val) +{ + return val != 0xff; +} + +static s8 +field_validate(u8 val) +{ + if (!field_valid(val)) + return 0; + + return val; +} + +static int +mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data, + enum mt7601u_eeprom_access_modes mode) +{ + u32 val; + int i; + + val = mt76_rr(dev, MT_EFUSE_CTRL); + val &= ~(MT_EFUSE_CTRL_AIN | + MT_EFUSE_CTRL_MODE); + val |= MT76_SET(MT_EFUSE_CTRL_AIN, addr & ~0xf) | + MT76_SET(MT_EFUSE_CTRL_MODE, mode) | + MT_EFUSE_CTRL_KICK; + mt76_wr(dev, MT_EFUSE_CTRL, val); + + if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) + return -ETIMEDOUT; + + val = mt76_rr(dev, MT_EFUSE_CTRL); + if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { + /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0) + * will not return valid data but it's ok. + */ + memset(data, 0xff, 16); + return 0; + } + + for (i = 0; i < 4; i++) { + val = mt76_rr(dev, MT_EFUSE_DATA(i)); + put_unaligned_le32(val, data + 4 * i); + } + + return 0; +} + +static int +mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev) +{ + const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16); + u8 data[map_reads * 16]; + int ret, i; + u32 start = 0, end = 0, cnt_free; + + for (i = 0; i < map_reads; i++) { + ret = mt7601u_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16, + data + i * 16, MT_EE_PHYSICAL_READ); + if (ret) + return ret; + } + + for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++) + if (!data[i]) { + if (!start) + start = MT_EE_USAGE_MAP_START + i; + end = MT_EE_USAGE_MAP_START + i; + } + cnt_free = end - start + 1; + + if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) { + dev_err(dev->dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n"); + return -EINVAL; + } + + return 0; +} + +static bool +mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom) +{ + u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); + + return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); +} + +static void +mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom) +{ + u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0); + u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); + + if (!field_valid(nic_conf1 & 0xff)) + nic_conf1 &= 0xff00; + + dev->ee->tssi_enabled = mt7601u_has_tssi(dev, eeprom) && + !(nic_conf1 & MT_EE_NIC_CONF_1_TEMP_TX_ALC); + + if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL) + dev_err(dev->dev, + "Error: this driver does not support HW RF ctrl\n"); + + if (!field_valid(nic_conf0 >> 8)) + return; + + if (MT76_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 || + MT76_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1) + dev_err(dev->dev, + "Error: device has more than 1 RX/TX stream!\n"); +} + +static int +mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom) +{ + const void *src = eeprom + MT_EE_MAC_ADDR; + + ether_addr_copy(dev->macaddr, src); + + if (!is_valid_ether_addr(dev->macaddr)) { + eth_random_addr(dev->macaddr); + dev_info(dev->dev, + "Invalid MAC address, using random address %pM\n", + dev->macaddr); + } + + mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); + mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) | + MT76_SET(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); + + return 0; +} + +static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev, + u8 *eeprom, u8 max_pwr) +{ + u8 trgt_pwr = eeprom[MT_EE_TX_TSSI_TARGET_POWER]; + + if (trgt_pwr > max_pwr || !trgt_pwr) { + dev_warn(dev->dev, "Error: EEPROM trgt power invalid %hhx!\n", + trgt_pwr); + trgt_pwr = 0x20; + } + + memset(dev->ee->chan_pwr, trgt_pwr, sizeof(dev->ee->chan_pwr)); +} + +static void +mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom) +{ + u32 i, val; + u8 max_pwr; + + val = mt7601u_rr(dev, MT_TX_ALC_CFG_0); + max_pwr = MT76_GET(MT_TX_ALC_CFG_0_LIMIT_0, val); + + if (mt7601u_has_tssi(dev, eeprom)) { + mt7601u_set_channel_target_power(dev, eeprom, max_pwr); + return; + } + + for (i = 0; i < 14; i++) { + s8 power = field_validate(eeprom[MT_EE_TX_POWER_OFFSET + i]); + + if (power > max_pwr || power < 0) + power = MT7601U_DEFAULT_TX_POWER; + + dev->ee->chan_pwr[i] = power; + } +} + +static void +mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom) +{ + /* Note: - region 31 is not valid for mt7601u (see rtmp_init.c) + * - comments in rtmp_def.h are incorrect (see rt_channel.c) + */ + static const struct reg_channel_bounds chan_bounds[] = { + /* EEPROM country regions 0 - 7 */ + { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 }, + { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 }, + /* EEPROM country regions 32 - 33 */ + { 1, 11 }, { 1, 14 } + }; + u8 val = eeprom[MT_EE_COUNTRY_REGION]; + int idx = -1; + + if (val < 8) + idx = val; + if (val > 31 && val < 33) + idx = val - 32 + 8; + + if (idx != -1) + dev_info(dev->dev, + "EEPROM country region %02hhx (channels %hhd-%hhd)\n", + val, chan_bounds[idx].start, + chan_bounds[idx].start + chan_bounds[idx].num - 1); + else + idx = 5; /* channels 1 - 14 */ + + dev->ee->reg = chan_bounds[idx]; + + /* TODO: country region 33 is special - phy should be set to B-mode + * before entering channel 14 (see sta/connect.c) + */ +} + +static void +mt7601u_set_rf_freq_off(struct mt7601u_dev *dev, u8 *eeprom) +{ + u8 comp; + + dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]); + comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]); + + if (comp & BIT(7)) + dev->ee->rf_freq_off -= comp & 0x7f; + else + dev->ee->rf_freq_off += comp; +} + +static void +mt7601u_set_rssi_offset(struct mt7601u_dev *dev, u8 *eeprom) +{ + int i; + s8 *rssi_offset = dev->ee->rssi_offset; + + for (i = 0; i < 2; i++) { + rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i]; + + if (rssi_offset[i] < -10 || rssi_offset[i] > 10) { + dev_warn(dev->dev, + "Warning: EEPROM RSSI is invalid %02hhx\n", + rssi_offset[i]); + rssi_offset[i] = 0; + } + } +} + +static void +mt7601u_extra_power_over_mac(struct mt7601u_dev *dev) +{ + u32 val; + + val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_1) & 0x0000ff00) >> 8); + val |= ((mt7601u_rr(dev, MT_TX_PWR_CFG_2) & 0x0000ff00) << 8); + mt7601u_wr(dev, MT_TX_PWR_CFG_7, val); + + val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8); + mt7601u_wr(dev, MT_TX_PWR_CFG_9, val); +} + +static void +mt7601u_set_power_rate(struct power_per_rate *rate, s8 delta, u8 value) +{ + rate->raw = s6_validate(value); + rate->bw20 = s6_to_int(value); + /* Note: vendor driver does cap the value to s6 right away */ + rate->bw40 = rate->bw20 + delta; +} + +static void +mt7601u_save_power_rate(struct mt7601u_dev *dev, s8 delta, u32 val, int i) +{ + struct mt7601u_rate_power *t = &dev->ee->power_rate_table; + + switch (i) { + case 0: + mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff); + mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff); + /* Save cck bw20 for fixups of channel 14 */ + dev->ee->real_cck_bw20[0] = t->cck[0].bw20; + dev->ee->real_cck_bw20[1] = t->cck[1].bw20; + + mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff); + mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff); + break; + case 1: + mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff); + mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff); + mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff); + mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff); + break; + case 2: + mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff); + mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff); + break; + } +} + +static s8 +get_delta(u8 val) +{ + s8 ret; + + if (!field_valid(val) || !(val & BIT(7))) + return 0; + + ret = val & 0x1f; + if (ret > 8) + ret = 8; + if (val & BIT(6)) + ret = -ret; + + return ret; +} + +static void +mt7601u_config_tx_power_per_rate(struct mt7601u_dev *dev, u8 *eeprom) +{ + u32 val; + s8 bw40_delta; + int i; + + bw40_delta = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]); + + for (i = 0; i < 5; i++) { + val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i)); + + mt7601u_save_power_rate(dev, bw40_delta, val, i); + + if (~val) + mt7601u_wr(dev, MT_TX_PWR_CFG_0 + i * 4, val); + } + + mt7601u_extra_power_over_mac(dev); +} + +static void +mt7601u_init_tssi_params(struct mt7601u_dev *dev, u8 *eeprom) +{ + struct tssi_data *d = &dev->ee->tssi_data; + + if (!dev->ee->tssi_enabled) + return; + + d->slope = eeprom[MT_EE_TX_TSSI_SLOPE]; + d->tx0_delta_offset = eeprom[MT_EE_TX_TSSI_OFFSET] * 1024; + d->offset[0] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP]; + d->offset[1] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 1]; + d->offset[2] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 2]; +} + +int +mt7601u_eeprom_init(struct mt7601u_dev *dev) +{ + u8 *eeprom; + int i, ret; + + ret = mt7601u_efuse_physical_size_check(dev); + if (ret) + return ret; + + dev->ee = devm_kzalloc(dev->dev, sizeof(*dev->ee), GFP_KERNEL); + if (!dev->ee) + return -ENOMEM; + + eeprom = kmalloc(MT7601U_EEPROM_SIZE, GFP_KERNEL); + if (!eeprom) + return -ENOMEM; + + for (i = 0; i + 16 <= MT7601U_EEPROM_SIZE; i += 16) { + ret = mt7601u_efuse_read(dev, i, eeprom + i, MT_EE_READ); + if (ret) + goto out; + } + + if (eeprom[MT_EE_VERSION_EE] > MT7601U_EE_MAX_VER) + dev_warn(dev->dev, + "Warning: unsupported EEPROM version %02hhx\n", + eeprom[MT_EE_VERSION_EE]); + dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n", + eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]); + + mt7601u_set_macaddr(dev, eeprom); + mt7601u_set_chip_cap(dev, eeprom); + mt7601u_set_channel_power(dev, eeprom); + mt7601u_set_country_reg(dev, eeprom); + mt7601u_set_rf_freq_off(dev, eeprom); + mt7601u_set_rssi_offset(dev, eeprom); + dev->ee->ref_temp = eeprom[MT_EE_REF_TEMP]; + dev->ee->lna_gain = eeprom[MT_EE_LNA_GAIN]; + + mt7601u_config_tx_power_per_rate(dev, eeprom); + + mt7601u_init_tssi_params(dev, eeprom); +out: + kfree(eeprom); + return ret; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h new file mode 100644 index 000000000000..662d12703b69 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_EEPROM_H +#define __MT7601U_EEPROM_H + +struct mt7601u_dev; + +#define MT7601U_EE_MAX_VER 0x0c +#define MT7601U_EEPROM_SIZE 256 + +#define MT7601U_DEFAULT_TX_POWER 6 + +enum mt76_eeprom_field { + MT_EE_CHIP_ID = 0x00, + MT_EE_VERSION_FAE = 0x02, + MT_EE_VERSION_EE = 0x03, + MT_EE_MAC_ADDR = 0x04, + MT_EE_NIC_CONF_0 = 0x34, + MT_EE_NIC_CONF_1 = 0x36, + MT_EE_COUNTRY_REGION = 0x39, + MT_EE_FREQ_OFFSET = 0x3a, + MT_EE_NIC_CONF_2 = 0x42, + + MT_EE_LNA_GAIN = 0x44, + MT_EE_RSSI_OFFSET = 0x46, + + MT_EE_TX_POWER_DELTA_BW40 = 0x50, + MT_EE_TX_POWER_OFFSET = 0x52, + + MT_EE_TX_TSSI_SLOPE = 0x6e, + MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f, + MT_EE_TX_TSSI_OFFSET = 0x76, + + MT_EE_TX_TSSI_TARGET_POWER = 0xd0, + MT_EE_REF_TEMP = 0xd1, + MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb, + MT_EE_TX_POWER_BYRATE_BASE = 0xde, + + MT_EE_USAGE_MAP_START = 0x1e0, + MT_EE_USAGE_MAP_END = 0x1fc, +}; + +#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0) +#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4) +#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12) + +#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0) +#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1) +#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2) +#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3) +#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13) + +#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0) +#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4) +#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8) +#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9) +#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11) +#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13) + +#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \ + (i) * 4) + +#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \ + MT_EE_USAGE_MAP_START + 1) + +enum mt7601u_eeprom_access_modes { + MT_EE_READ = 0, + MT_EE_PHYSICAL_READ = 1, +}; + +struct power_per_rate { + u8 raw; /* validated s6 value */ + s8 bw20; /* sign-extended int */ + s8 bw40; /* sign-extended int */ +}; + +/* Power per rate - one value per two rates */ +struct mt7601u_rate_power { + struct power_per_rate cck[2]; + struct power_per_rate ofdm[4]; + struct power_per_rate ht[4]; +}; + +struct reg_channel_bounds { + u8 start; + u8 num; +}; + +struct mt7601u_eeprom_params { + bool tssi_enabled; + u8 rf_freq_off; + s8 rssi_offset[2]; + s8 ref_temp; + s8 lna_gain; + + u8 chan_pwr[14]; + struct mt7601u_rate_power power_rate_table; + s8 real_cck_bw20[2]; + + /* TSSI stuff - only with internal TX ALC */ + struct tssi_data { + int tx0_delta_offset; + u8 slope; + u8 offset[3]; + } tssi_data; + + struct reg_channel_bounds reg; +}; + +int mt7601u_eeprom_init(struct mt7601u_dev *dev); + +static inline u32 s6_validate(u32 reg) +{ + WARN_ON(reg & ~GENMASK(5, 0)); + return reg & GENMASK(5, 0); +} + +static inline int s6_to_int(u32 reg) +{ + int s6; + + s6 = s6_validate(reg); + if (s6 & BIT(5)) + s6 -= BIT(6); + + return s6; +} + +static inline u32 int_to_s6(int val) +{ + if (val < -0x20) + return 0x20; + if (val > 0x1f) + return 0x1f; + + return val & 0x3f; +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c new file mode 100644 index 000000000000..1fc86e865c8c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -0,0 +1,625 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "eeprom.h" +#include "trace.h" +#include "mcu.h" + +#include "initvals.h" + +static void +mt7601u_set_wlan_state(struct mt7601u_dev *dev, u32 val, bool enable) +{ + int i; + + /* Note: we don't turn off WLAN_CLK because that makes the device + * not respond properly on the probe path. + * In case anyone (PSM?) wants to use this function we can + * bring the clock stuff back and fixup the probe path. + */ + + if (enable) + val |= (MT_WLAN_FUN_CTRL_WLAN_EN | + MT_WLAN_FUN_CTRL_WLAN_CLK_EN); + else + val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN); + + mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + if (enable) { + set_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state); + } else { + clear_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state); + return; + } + + for (i = 200; i; i--) { + val = mt7601u_rr(dev, MT_CMB_CTRL); + + if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD) + break; + + udelay(20); + } + + /* Note: vendor driver tries to disable/enable wlan here and retry + * but the code which does it is so buggy it must have never + * triggered, so don't bother. + */ + if (!i) + dev_err(dev->dev, "Error: PLL and XTAL check failed!\n"); +} + +static void mt7601u_chip_onoff(struct mt7601u_dev *dev, bool enable, bool reset) +{ + u32 val; + + mutex_lock(&dev->hw_atomic_mutex); + + val = mt7601u_rr(dev, MT_WLAN_FUN_CTRL); + + if (reset) { + val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN; + val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL; + + if (val & MT_WLAN_FUN_CTRL_WLAN_EN) { + val |= (MT_WLAN_FUN_CTRL_WLAN_RESET | + MT_WLAN_FUN_CTRL_WLAN_RESET_RF); + mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET | + MT_WLAN_FUN_CTRL_WLAN_RESET_RF); + } + } + + mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + mt7601u_set_wlan_state(dev, val, enable); + + mutex_unlock(&dev->hw_atomic_mutex); +} + +static void mt7601u_reset_csr_bbp(struct mt7601u_dev *dev) +{ + mt7601u_wr(dev, MT_MAC_SYS_CTRL, (MT_MAC_SYS_CTRL_RESET_CSR | + MT_MAC_SYS_CTRL_RESET_BBP)); + mt7601u_wr(dev, MT_USB_DMA_CFG, 0); + msleep(1); + mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0); +} + +static void mt7601u_init_usb_dma(struct mt7601u_dev *dev) +{ + u32 val; + + val = MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) | + MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) | + MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN; + if (dev->in_max_packet == 512) + val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN; + mt7601u_wr(dev, MT_USB_DMA_CFG, val); + + val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP; + mt7601u_wr(dev, MT_USB_DMA_CFG, val); + val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP; + mt7601u_wr(dev, MT_USB_DMA_CFG, val); +} + +static int mt7601u_init_bbp(struct mt7601u_dev *dev) +{ + int ret; + + ret = mt7601u_wait_bbp_ready(dev); + if (ret) + return ret; + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_common_vals, + ARRAY_SIZE(bbp_common_vals)); + if (ret) + return ret; + + return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_chip_vals, + ARRAY_SIZE(bbp_chip_vals)); +} + +static void +mt76_init_beacon_offsets(struct mt7601u_dev *dev) +{ + u16 base = MT_BEACON_BASE; + u32 regs[4] = {}; + int i; + + for (i = 0; i < 16; i++) { + u16 addr = dev->beacon_offsets[i]; + + regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4)); + } + + for (i = 0; i < 4; i++) + mt7601u_wr(dev, MT_BCN_OFFSET(i), regs[i]); +} + +static int mt7601u_write_mac_initvals(struct mt7601u_dev *dev) +{ + int ret; + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_common_vals, + ARRAY_SIZE(mac_common_vals)); + if (ret) + return ret; + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, + mac_chip_vals, ARRAY_SIZE(mac_chip_vals)); + if (ret) + return ret; + + mt76_init_beacon_offsets(dev); + + mt7601u_wr(dev, MT_AUX_CLK_CFG, 0); + + return 0; +} + +static int mt7601u_init_wcid_mem(struct mt7601u_dev *dev) +{ + u32 *vals; + int i, ret; + + vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL); + if (!vals) + return -ENOMEM; + + for (i = 0; i < N_WCIDS; i++) { + vals[i * 2] = 0xffffffff; + vals[i * 2 + 1] = 0x00ffffff; + } + + ret = mt7601u_burst_write_regs(dev, MT_WCID_ADDR_BASE, + vals, N_WCIDS * 2); + kfree(vals); + + return ret; +} + +static int mt7601u_init_key_mem(struct mt7601u_dev *dev) +{ + u32 vals[4] = {}; + + return mt7601u_burst_write_regs(dev, MT_SKEY_MODE_BASE_0, + vals, ARRAY_SIZE(vals)); +} + +static int mt7601u_init_wcid_attr_mem(struct mt7601u_dev *dev) +{ + u32 *vals; + int i, ret; + + vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL); + if (!vals) + return -ENOMEM; + + for (i = 0; i < N_WCIDS * 2; i++) + vals[i] = 1; + + ret = mt7601u_burst_write_regs(dev, MT_WCID_ATTR_BASE, + vals, N_WCIDS * 2); + kfree(vals); + + return ret; +} + +static void mt7601u_reset_counters(struct mt7601u_dev *dev) +{ + mt7601u_rr(dev, MT_RX_STA_CNT0); + mt7601u_rr(dev, MT_RX_STA_CNT1); + mt7601u_rr(dev, MT_RX_STA_CNT2); + mt7601u_rr(dev, MT_TX_STA_CNT0); + mt7601u_rr(dev, MT_TX_STA_CNT1); + mt7601u_rr(dev, MT_TX_STA_CNT2); +} + +int mt7601u_mac_start(struct mt7601u_dev *dev) +{ + mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + + if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000)) + return -ETIMEDOUT; + + dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR | + MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC | + MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP | + MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND | + MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS | + MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL | + MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV; + mt7601u_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mt7601u_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); + + if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50)) + return -ETIMEDOUT; + + return 0; +} + +static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev) +{ + int i, ok; + + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return; + + mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_BEACON_TX); + + if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000)) + dev_warn(dev->dev, "Warning: TX DMA did not stop!\n"); + + /* Page count on TxQ */ + i = 200; + while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) || + (mt76_rr(dev, 0x0a30) & 0x000000ff) || + (mt76_rr(dev, 0x0a34) & 0x00ff00ff))) + msleep(10); + + if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000)) + dev_warn(dev->dev, "Warning: MAC TX did not stop!\n"); + + mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX | + MT_MAC_SYS_CTRL_ENABLE_TX); + + /* Page count on RxQ */ + ok = 0; + i = 200; + while (i--) { + if ((mt76_rr(dev, 0x0430) & 0x00ff0000) || + (mt76_rr(dev, 0x0a30) & 0xffffffff) || + (mt76_rr(dev, 0x0a34) & 0xffffffff)) + ok++; + if (ok > 6) + break; + + msleep(1); + } + + if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000)) + dev_warn(dev->dev, "Warning: MAC RX did not stop!\n"); + + if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000)) + dev_warn(dev->dev, "Warning: RX DMA did not stop!\n"); +} + +void mt7601u_mac_stop(struct mt7601u_dev *dev) +{ + mt7601u_mac_stop_hw(dev); + flush_delayed_work(&dev->stat_work); + cancel_delayed_work_sync(&dev->stat_work); +} + +static void mt7601u_stop_hardware(struct mt7601u_dev *dev) +{ + mt7601u_chip_onoff(dev, false, false); +} + +int mt7601u_init_hardware(struct mt7601u_dev *dev) +{ + static const u16 beacon_offsets[16] = { + /* 512 byte per beacon */ + 0xc000, 0xc200, 0xc400, 0xc600, + 0xc800, 0xca00, 0xcc00, 0xce00, + 0xd000, 0xd200, 0xd400, 0xd600, + 0xd800, 0xda00, 0xdc00, 0xde00 + }; + int ret; + + dev->beacon_offsets = beacon_offsets; + + mt7601u_chip_onoff(dev, true, false); + + ret = mt7601u_wait_asic_ready(dev); + if (ret) + goto err; + ret = mt7601u_mcu_init(dev); + if (ret) + goto err; + + if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) { + ret = -EIO; + goto err; + } + + /* Wait for ASIC ready after FW load. */ + ret = mt7601u_wait_asic_ready(dev); + if (ret) + goto err; + + mt7601u_reset_csr_bbp(dev); + mt7601u_init_usb_dma(dev); + + ret = mt7601u_mcu_cmd_init(dev); + if (ret) + goto err; + ret = mt7601u_dma_init(dev); + if (ret) + goto err_mcu; + ret = mt7601u_write_mac_initvals(dev); + if (ret) + goto err_rx; + + if (!mt76_poll_msec(dev, MT_MAC_STATUS, + MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100)) { + ret = -EIO; + goto err_rx; + } + + ret = mt7601u_init_bbp(dev); + if (ret) + goto err_rx; + ret = mt7601u_init_wcid_mem(dev); + if (ret) + goto err_rx; + ret = mt7601u_init_key_mem(dev); + if (ret) + goto err_rx; + ret = mt7601u_init_wcid_attr_mem(dev); + if (ret) + goto err_rx; + + mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | + MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_BEACON_TX)); + + mt7601u_reset_counters(dev); + + mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); + + mt7601u_wr(dev, MT_TXOP_CTRL_CFG, MT76_SET(MT_TXOP_TRUN_EN, 0x3f) | + MT76_SET(MT_TXOP_EXT_CCA_DLY, 0x58)); + + ret = mt7601u_eeprom_init(dev); + if (ret) + goto err_rx; + + ret = mt7601u_phy_init(dev); + if (ret) + goto err_rx; + + mt7601u_set_rx_path(dev, 0); + mt7601u_set_tx_dac(dev, 0); + + mt7601u_mac_set_ctrlch(dev, false); + mt7601u_bbp_set_ctrlch(dev, false); + mt7601u_bbp_set_bw(dev, MT_BW_20); + + return 0; + +err_rx: + mt7601u_dma_cleanup(dev); +err_mcu: + mt7601u_mcu_cmd_deinit(dev); +err: + mt7601u_chip_onoff(dev, false, false); + return ret; +} + +void mt7601u_cleanup(struct mt7601u_dev *dev) +{ + mt7601u_stop_hardware(dev); + mt7601u_dma_cleanup(dev); + mt7601u_mcu_cmd_deinit(dev); +} + +struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev) +{ + struct ieee80211_hw *hw; + struct mt7601u_dev *dev; + + hw = ieee80211_alloc_hw(sizeof(*dev), &mt7601u_ops); + if (!hw) + return NULL; + + dev = hw->priv; + dev->dev = pdev; + dev->hw = hw; + mutex_init(&dev->vendor_req_mutex); + mutex_init(&dev->reg_atomic_mutex); + mutex_init(&dev->hw_atomic_mutex); + mutex_init(&dev->mutex); + spin_lock_init(&dev->tx_lock); + spin_lock_init(&dev->rx_lock); + spin_lock_init(&dev->lock); + spin_lock_init(&dev->con_mon_lock); + atomic_set(&dev->avg_ampdu_len, 1); + + dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0); + if (!dev->stat_wq) { + ieee80211_free_hw(hw); + return NULL; + } + + return dev; +} + +#define CHAN2G(_idx, _freq) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 30, \ +} + +static const struct ieee80211_channel mt76_channels_2ghz[] = { + CHAN2G(1, 2412), + CHAN2G(2, 2417), + CHAN2G(3, 2422), + CHAN2G(4, 2427), + CHAN2G(5, 2432), + CHAN2G(6, 2437), + CHAN2G(7, 2442), + CHAN2G(8, 2447), + CHAN2G(9, 2452), + CHAN2G(10, 2457), + CHAN2G(11, 2462), + CHAN2G(12, 2467), + CHAN2G(13, 2472), + CHAN2G(14, 2484), +}; + +#define CCK_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ + .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ +} + +#define OFDM_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ +} + +static struct ieee80211_rate mt76_rates[] = { + CCK_RATE(0, 10), + CCK_RATE(1, 20), + CCK_RATE(2, 55), + CCK_RATE(3, 110), + OFDM_RATE(0, 60), + OFDM_RATE(1, 90), + OFDM_RATE(2, 120), + OFDM_RATE(3, 180), + OFDM_RATE(4, 240), + OFDM_RATE(5, 360), + OFDM_RATE(6, 480), + OFDM_RATE(7, 540), +}; + +static int +mt76_init_sband(struct mt7601u_dev *dev, struct ieee80211_supported_band *sband, + const struct ieee80211_channel *chan, int n_chan, + struct ieee80211_rate *rates, int n_rates) +{ + struct ieee80211_sta_ht_cap *ht_cap; + void *chanlist; + int size; + + size = n_chan * sizeof(*chan); + chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL); + if (!chanlist) + return -ENOMEM; + + sband->channels = chanlist; + sband->n_channels = n_chan; + sband->bitrates = rates; + sband->n_bitrates = n_rates; + + ht_cap = &sband->ht_cap; + ht_cap->ht_supported = true; + ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); + + ht_cap->mcs.rx_mask[0] = 0xff; + ht_cap->mcs.rx_mask[4] = 0x1; + ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2; + + dev->chandef.chan = &sband->channels[0]; + + return 0; +} + +static int +mt76_init_sband_2g(struct mt7601u_dev *dev) +{ + dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g), + GFP_KERNEL); + dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g; + + WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num > + ARRAY_SIZE(mt76_channels_2ghz)); + + return mt76_init_sband(dev, dev->sband_2g, + &mt76_channels_2ghz[dev->ee->reg.start - 1], + dev->ee->reg.num, + mt76_rates, ARRAY_SIZE(mt76_rates)); +} + +int mt7601u_register_device(struct mt7601u_dev *dev) +{ + struct ieee80211_hw *hw = dev->hw; + struct wiphy *wiphy = hw->wiphy; + int ret; + + /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to + * entry no. 1 like it does in the vendor driver. + */ + dev->wcid_mask[0] |= 1; + + /* init fake wcid for monitor interfaces */ + dev->mon_wcid = devm_kmalloc(dev->dev, sizeof(*dev->mon_wcid), + GFP_KERNEL); + if (!dev->mon_wcid) + return -ENOMEM; + dev->mon_wcid->idx = 0xff; + dev->mon_wcid->hw_key_idx = -1; + + SET_IEEE80211_DEV(hw, dev->dev); + + hw->queues = 4; + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_PS_NULLFUNC_STACK | + IEEE80211_HW_SUPPORTS_HT_CCK_RATES | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_SUPPORTS_RC_TABLE; + hw->max_rates = 1; + hw->max_report_rates = 7; + hw->max_rate_tries = 1; + + hw->sta_data_size = sizeof(struct mt76_sta); + hw->vif_data_size = sizeof(struct mt76_vif); + + SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); + + wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + + ret = mt76_init_sband_2g(dev); + if (ret) + return ret; + + INIT_DELAYED_WORK(&dev->mac_work, mt7601u_mac_work); + INIT_DELAYED_WORK(&dev->stat_work, mt7601u_tx_stat); + + ret = ieee80211_register_hw(hw); + if (ret) + return ret; + + mt7601u_init_debugfs(dev); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals.h b/drivers/net/wireless/mediatek/mt7601u/initvals.h new file mode 100644 index 000000000000..ec11ff66969d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/initvals.h @@ -0,0 +1,164 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_INITVALS_H +#define __MT7601U_INITVALS_H + +static const struct mt76_reg_pair bbp_common_vals[] = { + { 65, 0x2c }, + { 66, 0x38 }, + { 68, 0x0b }, + { 69, 0x12 }, + { 70, 0x0a }, + { 73, 0x10 }, + { 81, 0x37 }, + { 82, 0x62 }, + { 83, 0x6a }, + { 84, 0x99 }, + { 86, 0x00 }, + { 91, 0x04 }, + { 92, 0x00 }, + { 103, 0x00 }, + { 105, 0x05 }, + { 106, 0x35 }, +}; + +static const struct mt76_reg_pair bbp_chip_vals[] = { + { 1, 0x04 }, { 4, 0x40 }, { 20, 0x06 }, { 31, 0x08 }, + /* CCK Tx Control */ + { 178, 0xff }, + /* AGC/Sync controls */ + { 66, 0x14 }, { 68, 0x8b }, { 69, 0x12 }, { 70, 0x09 }, + { 73, 0x11 }, { 75, 0x60 }, { 76, 0x44 }, { 84, 0x9a }, + { 86, 0x38 }, { 91, 0x07 }, { 92, 0x02 }, + /* Rx Path Controls */ + { 99, 0x50 }, { 101, 0x00 }, { 103, 0xc0 }, { 104, 0x92 }, + { 105, 0x3c }, { 106, 0x03 }, { 128, 0x12 }, + /* Change RXWI content: Gain Report */ + { 142, 0x04 }, { 143, 0x37 }, + /* Change RXWI content: Antenna Report */ + { 142, 0x03 }, { 143, 0x99 }, + /* Calibration Index Register */ + /* CCK Receiver Control */ + { 160, 0xeb }, { 161, 0xc4 }, { 162, 0x77 }, { 163, 0xf9 }, + { 164, 0x88 }, { 165, 0x80 }, { 166, 0xff }, { 167, 0xe4 }, + /* Added AGC controls - these AGC/GLRT registers are accessed + * through R195 and R196. + */ + { 195, 0x00 }, { 196, 0x00 }, + { 195, 0x01 }, { 196, 0x04 }, + { 195, 0x02 }, { 196, 0x20 }, + { 195, 0x03 }, { 196, 0x0a }, + { 195, 0x06 }, { 196, 0x16 }, + { 195, 0x07 }, { 196, 0x05 }, + { 195, 0x08 }, { 196, 0x37 }, + { 195, 0x0a }, { 196, 0x15 }, + { 195, 0x0b }, { 196, 0x17 }, + { 195, 0x0c }, { 196, 0x06 }, + { 195, 0x0d }, { 196, 0x09 }, + { 195, 0x0e }, { 196, 0x05 }, + { 195, 0x0f }, { 196, 0x09 }, + { 195, 0x10 }, { 196, 0x20 }, + { 195, 0x20 }, { 196, 0x17 }, + { 195, 0x21 }, { 196, 0x06 }, + { 195, 0x22 }, { 196, 0x09 }, + { 195, 0x23 }, { 196, 0x17 }, + { 195, 0x24 }, { 196, 0x06 }, + { 195, 0x25 }, { 196, 0x09 }, + { 195, 0x26 }, { 196, 0x17 }, + { 195, 0x27 }, { 196, 0x06 }, + { 195, 0x28 }, { 196, 0x09 }, + { 195, 0x29 }, { 196, 0x05 }, + { 195, 0x2a }, { 196, 0x09 }, + { 195, 0x80 }, { 196, 0x8b }, + { 195, 0x81 }, { 196, 0x12 }, + { 195, 0x82 }, { 196, 0x09 }, + { 195, 0x83 }, { 196, 0x17 }, + { 195, 0x84 }, { 196, 0x11 }, + { 195, 0x85 }, { 196, 0x00 }, + { 195, 0x86 }, { 196, 0x00 }, + { 195, 0x87 }, { 196, 0x18 }, + { 195, 0x88 }, { 196, 0x60 }, + { 195, 0x89 }, { 196, 0x44 }, + { 195, 0x8a }, { 196, 0x8b }, + { 195, 0x8b }, { 196, 0x8b }, + { 195, 0x8c }, { 196, 0x8b }, + { 195, 0x8d }, { 196, 0x8b }, + { 195, 0x8e }, { 196, 0x09 }, + { 195, 0x8f }, { 196, 0x09 }, + { 195, 0x90 }, { 196, 0x09 }, + { 195, 0x91 }, { 196, 0x09 }, + { 195, 0x92 }, { 196, 0x11 }, + { 195, 0x93 }, { 196, 0x11 }, + { 195, 0x94 }, { 196, 0x11 }, + { 195, 0x95 }, { 196, 0x11 }, + /* PPAD */ + { 47, 0x80 }, { 60, 0x80 }, { 150, 0xd2 }, { 151, 0x32 }, + { 152, 0x23 }, { 153, 0x41 }, { 154, 0x00 }, { 155, 0x4f }, + { 253, 0x7e }, { 195, 0x30 }, { 196, 0x32 }, { 195, 0x31 }, + { 196, 0x23 }, { 195, 0x32 }, { 196, 0x45 }, { 195, 0x35 }, + { 196, 0x4a }, { 195, 0x36 }, { 196, 0x5a }, { 195, 0x37 }, + { 196, 0x5a }, +}; + +static const struct mt76_reg_pair mac_common_vals[] = { + { MT_LEGACY_BASIC_RATE, 0x0000013f }, + { MT_HT_BASIC_RATE, 0x00008003 }, + { MT_MAC_SYS_CTRL, 0x00000000 }, + { MT_RX_FILTR_CFG, 0x00017f97 }, + { MT_BKOFF_SLOT_CFG, 0x00000209 }, + { MT_TX_SW_CFG0, 0x00000000 }, + { MT_TX_SW_CFG1, 0x00080606 }, + { MT_TX_LINK_CFG, 0x00001020 }, + { MT_TX_TIMEOUT_CFG, 0x000a2090 }, + { MT_MAX_LEN_CFG, 0x00003fff }, + { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f }, + { MT_PBF_RX_MAX_PCNT, 0x0000009f }, + { MT_TX_RETRY_CFG, 0x47d01f0f }, + { MT_AUTO_RSP_CFG, 0x00000013 }, + { MT_CCK_PROT_CFG, 0x05740003 }, + { MT_OFDM_PROT_CFG, 0x05740003 }, + { MT_MM40_PROT_CFG, 0x03f44084 }, + { MT_GF20_PROT_CFG, 0x01744004 }, + { MT_GF40_PROT_CFG, 0x03f44084 }, + { MT_MM20_PROT_CFG, 0x01744004 }, + { MT_TXOP_CTRL_CFG, 0x0000583f }, + { MT_TX_RTS_CFG, 0x01092b20 }, + { MT_EXP_ACK_TIME, 0x002400ca }, + { MT_TXOP_HLDR_ET, 0x00000002 }, + { MT_XIFS_TIME_CFG, 0x33a41010 }, + { MT_PWR_PIN_CFG, 0x00000000 }, +}; + +static const struct mt76_reg_pair mac_chip_vals[] = { + { MT_TSO_CTRL, 0x00006050 }, + { MT_BCN_OFFSET(0), 0x18100800 }, + { MT_BCN_OFFSET(1), 0x38302820 }, + { MT_PBF_SYS_CTRL, 0x00080c00 }, + { MT_PBF_CFG, 0x7f723c1f }, + { MT_FCE_PSE_CTRL, 0x00000001 }, + { MT_PAUSE_ENABLE_CONTROL1, 0x00000000 }, + { MT_TX0_RF_GAIN_CORR, 0x003b0005 }, + { MT_TX0_RF_GAIN_ATTEN, 0x00006900 }, + { MT_TX0_BB_GAIN_ATTEN, 0x00000400 }, + { MT_TX_ALC_VGA3, 0x00060006 }, + { MT_TX_SW_CFG0, 0x00000402 }, + { MT_TX_SW_CFG1, 0x00000000 }, + { MT_TX_SW_CFG2, 0x00000000 }, + { MT_HEADER_TRANS_CTRL_REG, 0x00000000 }, + { MT_FCE_CSO, 0x0000030f }, + { MT_FCE_PARAMETERS, 0x00256f0f }, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h new file mode 100644 index 000000000000..a2bdc3e322bf --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h @@ -0,0 +1,291 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_PHY_INITVALS_H +#define __MT7601U_PHY_INITVALS_H + +#define RF_REG_PAIR(bank, reg, value) \ + { MT_MCU_MEMMAP_RF | (bank) << 16 | (reg), value } + +static const struct mt76_reg_pair rf_central[] = { + /* Bank 0 - for central blocks: BG, PLL, XTAL, LO, ADC/DAC */ + RF_REG_PAIR(0, 0, 0x02), + RF_REG_PAIR(0, 1, 0x01), + RF_REG_PAIR(0, 2, 0x11), + RF_REG_PAIR(0, 3, 0xff), + RF_REG_PAIR(0, 4, 0x0a), + RF_REG_PAIR(0, 5, 0x20), + RF_REG_PAIR(0, 6, 0x00), + /* B/G */ + RF_REG_PAIR(0, 7, 0x00), + RF_REG_PAIR(0, 8, 0x00), + RF_REG_PAIR(0, 9, 0x00), + RF_REG_PAIR(0, 10, 0x00), + RF_REG_PAIR(0, 11, 0x21), + /* XO */ + RF_REG_PAIR(0, 13, 0x00), /* 40mhz xtal */ + /* RF_REG_PAIR(0, 13, 0x13), */ /* 20mhz xtal */ + RF_REG_PAIR(0, 14, 0x7c), + RF_REG_PAIR(0, 15, 0x22), + RF_REG_PAIR(0, 16, 0x80), + /* PLL */ + RF_REG_PAIR(0, 17, 0x99), + RF_REG_PAIR(0, 18, 0x99), + RF_REG_PAIR(0, 19, 0x09), + RF_REG_PAIR(0, 20, 0x50), + RF_REG_PAIR(0, 21, 0xb0), + RF_REG_PAIR(0, 22, 0x00), + RF_REG_PAIR(0, 23, 0xc5), + RF_REG_PAIR(0, 24, 0xfc), + RF_REG_PAIR(0, 25, 0x40), + RF_REG_PAIR(0, 26, 0x4d), + RF_REG_PAIR(0, 27, 0x02), + RF_REG_PAIR(0, 28, 0x72), + RF_REG_PAIR(0, 29, 0x01), + RF_REG_PAIR(0, 30, 0x00), + RF_REG_PAIR(0, 31, 0x00), + /* test ports */ + RF_REG_PAIR(0, 32, 0x00), + RF_REG_PAIR(0, 33, 0x00), + RF_REG_PAIR(0, 34, 0x23), + RF_REG_PAIR(0, 35, 0x01), /* change setting to reduce spurs */ + RF_REG_PAIR(0, 36, 0x00), + RF_REG_PAIR(0, 37, 0x00), + /* ADC/DAC */ + RF_REG_PAIR(0, 38, 0x00), + RF_REG_PAIR(0, 39, 0x20), + RF_REG_PAIR(0, 40, 0x00), + RF_REG_PAIR(0, 41, 0xd0), + RF_REG_PAIR(0, 42, 0x1b), + RF_REG_PAIR(0, 43, 0x02), + RF_REG_PAIR(0, 44, 0x00), +}; + +static const struct mt76_reg_pair rf_channel[] = { + RF_REG_PAIR(4, 0, 0x01), + RF_REG_PAIR(4, 1, 0x00), + RF_REG_PAIR(4, 2, 0x00), + RF_REG_PAIR(4, 3, 0x00), + /* LDO */ + RF_REG_PAIR(4, 4, 0x00), + RF_REG_PAIR(4, 5, 0x08), + RF_REG_PAIR(4, 6, 0x00), + /* RX */ + RF_REG_PAIR(4, 7, 0x5b), + RF_REG_PAIR(4, 8, 0x52), + RF_REG_PAIR(4, 9, 0xb6), + RF_REG_PAIR(4, 10, 0x57), + RF_REG_PAIR(4, 11, 0x33), + RF_REG_PAIR(4, 12, 0x22), + RF_REG_PAIR(4, 13, 0x3d), + RF_REG_PAIR(4, 14, 0x3e), + RF_REG_PAIR(4, 15, 0x13), + RF_REG_PAIR(4, 16, 0x22), + RF_REG_PAIR(4, 17, 0x23), + RF_REG_PAIR(4, 18, 0x02), + RF_REG_PAIR(4, 19, 0xa4), + RF_REG_PAIR(4, 20, 0x01), + RF_REG_PAIR(4, 21, 0x12), + RF_REG_PAIR(4, 22, 0x80), + RF_REG_PAIR(4, 23, 0xb3), + RF_REG_PAIR(4, 24, 0x00), /* reserved */ + RF_REG_PAIR(4, 25, 0x00), /* reserved */ + RF_REG_PAIR(4, 26, 0x00), /* reserved */ + RF_REG_PAIR(4, 27, 0x00), /* reserved */ + /* LOGEN */ + RF_REG_PAIR(4, 28, 0x18), + RF_REG_PAIR(4, 29, 0xee), + RF_REG_PAIR(4, 30, 0x6b), + RF_REG_PAIR(4, 31, 0x31), + RF_REG_PAIR(4, 32, 0x5d), + RF_REG_PAIR(4, 33, 0x00), /* reserved */ + /* TX */ + RF_REG_PAIR(4, 34, 0x96), + RF_REG_PAIR(4, 35, 0x55), + RF_REG_PAIR(4, 36, 0x08), + RF_REG_PAIR(4, 37, 0xbb), + RF_REG_PAIR(4, 38, 0xb3), + RF_REG_PAIR(4, 39, 0xb3), + RF_REG_PAIR(4, 40, 0x03), + RF_REG_PAIR(4, 41, 0x00), /* reserved */ + RF_REG_PAIR(4, 42, 0x00), /* reserved */ + RF_REG_PAIR(4, 43, 0xc5), + RF_REG_PAIR(4, 44, 0xc5), + RF_REG_PAIR(4, 45, 0xc5), + RF_REG_PAIR(4, 46, 0x07), + RF_REG_PAIR(4, 47, 0xa8), + RF_REG_PAIR(4, 48, 0xef), + RF_REG_PAIR(4, 49, 0x1a), + /* PA */ + RF_REG_PAIR(4, 54, 0x07), + RF_REG_PAIR(4, 55, 0xa7), + RF_REG_PAIR(4, 56, 0xcc), + RF_REG_PAIR(4, 57, 0x14), + RF_REG_PAIR(4, 58, 0x07), + RF_REG_PAIR(4, 59, 0xa8), + RF_REG_PAIR(4, 60, 0xd7), + RF_REG_PAIR(4, 61, 0x10), + RF_REG_PAIR(4, 62, 0x1c), + RF_REG_PAIR(4, 63, 0x00), /* reserved */ +}; + +static const struct mt76_reg_pair rf_vga[] = { + RF_REG_PAIR(5, 0, 0x47), + RF_REG_PAIR(5, 1, 0x00), + RF_REG_PAIR(5, 2, 0x00), + RF_REG_PAIR(5, 3, 0x08), + RF_REG_PAIR(5, 4, 0x04), + RF_REG_PAIR(5, 5, 0x20), + RF_REG_PAIR(5, 6, 0x3a), + RF_REG_PAIR(5, 7, 0x3a), + RF_REG_PAIR(5, 8, 0x00), + RF_REG_PAIR(5, 9, 0x00), + RF_REG_PAIR(5, 10, 0x10), + RF_REG_PAIR(5, 11, 0x10), + RF_REG_PAIR(5, 12, 0x10), + RF_REG_PAIR(5, 13, 0x10), + RF_REG_PAIR(5, 14, 0x10), + RF_REG_PAIR(5, 15, 0x20), + RF_REG_PAIR(5, 16, 0x22), + RF_REG_PAIR(5, 17, 0x7c), + RF_REG_PAIR(5, 18, 0x00), + RF_REG_PAIR(5, 19, 0x00), + RF_REG_PAIR(5, 20, 0x00), + RF_REG_PAIR(5, 21, 0xf1), + RF_REG_PAIR(5, 22, 0x11), + RF_REG_PAIR(5, 23, 0x02), + RF_REG_PAIR(5, 24, 0x41), + RF_REG_PAIR(5, 25, 0x20), + RF_REG_PAIR(5, 26, 0x00), + RF_REG_PAIR(5, 27, 0xd7), + RF_REG_PAIR(5, 28, 0xa2), + RF_REG_PAIR(5, 29, 0x20), + RF_REG_PAIR(5, 30, 0x49), + RF_REG_PAIR(5, 31, 0x20), + RF_REG_PAIR(5, 32, 0x04), + RF_REG_PAIR(5, 33, 0xf1), + RF_REG_PAIR(5, 34, 0xa1), + RF_REG_PAIR(5, 35, 0x01), + RF_REG_PAIR(5, 41, 0x00), + RF_REG_PAIR(5, 42, 0x00), + RF_REG_PAIR(5, 43, 0x00), + RF_REG_PAIR(5, 44, 0x00), + RF_REG_PAIR(5, 45, 0x00), + RF_REG_PAIR(5, 46, 0x00), + RF_REG_PAIR(5, 47, 0x00), + RF_REG_PAIR(5, 48, 0x00), + RF_REG_PAIR(5, 49, 0x00), + RF_REG_PAIR(5, 50, 0x00), + RF_REG_PAIR(5, 51, 0x00), + RF_REG_PAIR(5, 52, 0x00), + RF_REG_PAIR(5, 53, 0x00), + RF_REG_PAIR(5, 54, 0x00), + RF_REG_PAIR(5, 55, 0x00), + RF_REG_PAIR(5, 56, 0x00), + RF_REG_PAIR(5, 57, 0x00), + RF_REG_PAIR(5, 58, 0x31), + RF_REG_PAIR(5, 59, 0x31), + RF_REG_PAIR(5, 60, 0x0a), + RF_REG_PAIR(5, 61, 0x02), + RF_REG_PAIR(5, 62, 0x00), + RF_REG_PAIR(5, 63, 0x00), +}; + +/* TODO: BBP178 is set to 0xff for "CCK CH14 OBW" which overrides the settings + * from channel switching. Seems stupid at best. + */ +static const struct mt76_reg_pair bbp_high_temp[] = { + { 75, 0x60 }, + { 92, 0x02 }, + { 178, 0xff }, /* For CCK CH14 OBW */ + { 195, 0x88 }, { 196, 0x60 }, +}, bbp_high_temp_bw20[] = { + { 69, 0x12 }, + { 91, 0x07 }, + { 195, 0x23 }, { 196, 0x17 }, + { 195, 0x24 }, { 196, 0x06 }, + { 195, 0x81 }, { 196, 0x12 }, + { 195, 0x83 }, { 196, 0x17 }, +}, bbp_high_temp_bw40[] = { + { 69, 0x15 }, + { 91, 0x04 }, + { 195, 0x23 }, { 196, 0x12 }, + { 195, 0x24 }, { 196, 0x08 }, + { 195, 0x81 }, { 196, 0x15 }, + { 195, 0x83 }, { 196, 0x16 }, +}, bbp_low_temp[] = { + { 178, 0xff }, /* For CCK CH14 OBW */ +}, bbp_low_temp_bw20[] = { + { 69, 0x12 }, + { 75, 0x5e }, + { 91, 0x07 }, + { 92, 0x02 }, + { 195, 0x23 }, { 196, 0x17 }, + { 195, 0x24 }, { 196, 0x06 }, + { 195, 0x81 }, { 196, 0x12 }, + { 195, 0x83 }, { 196, 0x17 }, + { 195, 0x88 }, { 196, 0x5e }, +}, bbp_low_temp_bw40[] = { + { 69, 0x15 }, + { 75, 0x5c }, + { 91, 0x04 }, + { 92, 0x03 }, + { 195, 0x23 }, { 196, 0x10 }, + { 195, 0x24 }, { 196, 0x08 }, + { 195, 0x81 }, { 196, 0x15 }, + { 195, 0x83 }, { 196, 0x16 }, + { 195, 0x88 }, { 196, 0x5b }, +}, bbp_normal_temp[] = { + { 75, 0x60 }, + { 92, 0x02 }, + { 178, 0xff }, /* For CCK CH14 OBW */ + { 195, 0x88 }, { 196, 0x60 }, +}, bbp_normal_temp_bw20[] = { + { 69, 0x12 }, + { 91, 0x07 }, + { 195, 0x23 }, { 196, 0x17 }, + { 195, 0x24 }, { 196, 0x06 }, + { 195, 0x81 }, { 196, 0x12 }, + { 195, 0x83 }, { 196, 0x17 }, +}, bbp_normal_temp_bw40[] = { + { 69, 0x15 }, + { 91, 0x04 }, + { 195, 0x23 }, { 196, 0x12 }, + { 195, 0x24 }, { 196, 0x08 }, + { 195, 0x81 }, { 196, 0x15 }, + { 195, 0x83 }, { 196, 0x16 }, +}; + +#define BBP_TABLE(arr) { arr, ARRAY_SIZE(arr), } + +static const struct reg_table { + const struct mt76_reg_pair *regs; + size_t n; +} bbp_mode_table[3][3] = { + { + BBP_TABLE(bbp_normal_temp_bw20), + BBP_TABLE(bbp_normal_temp_bw40), + BBP_TABLE(bbp_normal_temp), + }, { + BBP_TABLE(bbp_high_temp_bw20), + BBP_TABLE(bbp_high_temp_bw40), + BBP_TABLE(bbp_high_temp), + }, { + BBP_TABLE(bbp_low_temp_bw20), + BBP_TABLE(bbp_low_temp_bw40), + BBP_TABLE(bbp_low_temp), + } +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c new file mode 100644 index 000000000000..c161bcc6a7fa --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/mac.c @@ -0,0 +1,569 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "trace.h" +#include + +static void +mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate) +{ + u8 idx = MT76_GET(MT_TXWI_RATE_MCS, rate); + + txrate->idx = 0; + txrate->flags = 0; + txrate->count = 1; + + switch (MT76_GET(MT_TXWI_RATE_PHY_MODE, rate)) { + case MT_PHY_TYPE_OFDM: + txrate->idx = idx + 4; + return; + case MT_PHY_TYPE_CCK: + if (idx >= 8) + idx -= 8; + + txrate->idx = idx; + return; + case MT_PHY_TYPE_HT_GF: + txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; + /* fall through */ + case MT_PHY_TYPE_HT: + txrate->flags |= IEEE80211_TX_RC_MCS; + txrate->idx = idx; + break; + default: + WARN_ON(1); + return; + } + + if (MT76_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40) + txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + + if (rate & MT_TXWI_RATE_SGI) + txrate->flags |= IEEE80211_TX_RC_SHORT_GI; +} + +static void +mt76_mac_fill_tx_status(struct mt7601u_dev *dev, struct ieee80211_tx_info *info, + struct mt76_tx_status *st) +{ + struct ieee80211_tx_rate *rate = info->status.rates; + int cur_idx, last_rate; + int i; + + last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); + mt76_mac_process_tx_rate(&rate[last_rate], st->rate); + if (last_rate < IEEE80211_TX_MAX_RATES - 1) + rate[last_rate + 1].idx = -1; + + cur_idx = rate[last_rate].idx + st->retry; + for (i = 0; i <= last_rate; i++) { + rate[i].flags = rate[last_rate].flags; + rate[i].idx = max_t(int, 0, cur_idx - i); + rate[i].count = 1; + } + + if (last_rate > 0) + rate[last_rate - 1].count = st->retry + 1 - last_rate; + + info->status.ampdu_len = 1; + info->status.ampdu_ack_len = st->success; + + if (st->is_probe) + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; + + if (st->aggr) + info->flags |= IEEE80211_TX_CTL_AMPDU | + IEEE80211_TX_STAT_AMPDU; + + if (!st->ack_req) + info->flags |= IEEE80211_TX_CTL_NO_ACK; + else if (st->success) + info->flags |= IEEE80211_TX_STAT_ACK; +} + +u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev, + const struct ieee80211_tx_rate *rate, u8 *nss_val) +{ + u16 rateval; + u8 phy, rate_idx; + u8 nss = 1; + u8 bw = 0; + + if (rate->flags & IEEE80211_TX_RC_MCS) { + rate_idx = rate->idx; + nss = 1 + (rate->idx >> 3); + phy = MT_PHY_TYPE_HT; + if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) + phy = MT_PHY_TYPE_HT_GF; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + bw = 1; + } else { + const struct ieee80211_rate *r; + int band = dev->chandef.chan->band; + u16 val; + + r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx]; + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + val = r->hw_value_short; + else + val = r->hw_value; + + phy = val >> 8; + rate_idx = val & 0xff; + bw = 0; + } + + rateval = MT76_SET(MT_RXWI_RATE_MCS, rate_idx); + rateval |= MT76_SET(MT_RXWI_RATE_PHY, phy); + rateval |= MT76_SET(MT_RXWI_RATE_BW, bw); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + rateval |= MT_RXWI_RATE_SGI; + + *nss_val = nss; + return rateval; +} + +void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); + wcid->tx_rate_set = true; + spin_unlock_irqrestore(&dev->lock, flags); +} + +struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev) +{ + struct mt76_tx_status stat = {}; + u32 val; + + val = mt7601u_rr(dev, MT_TX_STAT_FIFO); + stat.valid = !!(val & MT_TX_STAT_FIFO_VALID); + stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS); + stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR); + stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ); + stat.pktid = MT76_GET(MT_TX_STAT_FIFO_PID_TYPE, val); + stat.wcid = MT76_GET(MT_TX_STAT_FIFO_WCID, val); + stat.rate = MT76_GET(MT_TX_STAT_FIFO_RATE, val); + + return stat; +} + +void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat) +{ + struct ieee80211_tx_info info = {}; + struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; + void *msta; + + rcu_read_lock(); + if (stat->wcid < ARRAY_SIZE(dev->wcid)) + wcid = rcu_dereference(dev->wcid[stat->wcid]); + + if (wcid) { + msta = container_of(wcid, struct mt76_sta, wcid); + sta = container_of(msta, struct ieee80211_sta, + drv_priv); + } + + mt76_mac_fill_tx_status(dev, &info, stat); + ieee80211_tx_status_noskb(dev->hw, sta, &info); + rcu_read_unlock(); +} + +void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot, + int ht_mode) +{ + int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION; + bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + u32 prot[6]; + bool ht_rts[4] = {}; + int i; + + prot[0] = MT_PROT_NAV_SHORT | + MT_PROT_TXOP_ALLOW_ALL | + MT_PROT_RTS_THR_EN; + prot[1] = prot[0]; + if (legacy_prot) + prot[1] |= MT_PROT_CTRL_CTS2SELF; + + prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20; + prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL; + + if (legacy_prot) { + prot[2] |= MT_PROT_RATE_CCK_11; + prot[3] |= MT_PROT_RATE_CCK_11; + prot[4] |= MT_PROT_RATE_CCK_11; + prot[5] |= MT_PROT_RATE_CCK_11; + } else { + prot[2] |= MT_PROT_RATE_OFDM_24; + prot[3] |= MT_PROT_RATE_DUP_OFDM_24; + prot[4] |= MT_PROT_RATE_OFDM_24; + prot[5] |= MT_PROT_RATE_DUP_OFDM_24; + } + + switch (mode) { + case IEEE80211_HT_OP_MODE_PROTECTION_NONE: + break; + + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: + ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true; + break; + + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: + ht_rts[1] = ht_rts[3] = true; + break; + + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: + ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true; + break; + } + + if (non_gf) + ht_rts[2] = ht_rts[3] = true; + + for (i = 0; i < 4; i++) + if (ht_rts[i]) + prot[i + 2] |= MT_PROT_CTRL_RTS_CTS; + + for (i = 0; i < 6; i++) + mt7601u_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]); +} + +void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb) +{ + if (short_preamb) + mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); + else + mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); +} + +void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval) +{ + u32 val = mt7601u_rr(dev, MT_BEACON_TIME_CFG); + + val &= ~(MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | + MT_BEACON_TIME_CFG_TBTT_EN); + + if (!enable) { + mt7601u_wr(dev, MT_BEACON_TIME_CFG, val); + return; + } + + val &= ~MT_BEACON_TIME_CFG_INTVAL; + val |= MT76_SET(MT_BEACON_TIME_CFG_INTVAL, interval << 4) | + MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | + MT_BEACON_TIME_CFG_TBTT_EN; +} + +static void mt7601u_check_mac_err(struct mt7601u_dev *dev) +{ + u32 val = mt7601u_rr(dev, 0x10f4); + + if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5)))) + return; + + dev_err(dev->dev, "Error: MAC specific condition occurred\n"); + + mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); + udelay(10); + mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); +} + +void mt7601u_mac_work(struct work_struct *work) +{ + struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, + mac_work.work); + struct { + u32 addr_base; + u32 span; + u64 *stat_base; + } spans[] = { + { MT_RX_STA_CNT0, 3, dev->stats.rx_stat }, + { MT_TX_STA_CNT0, 3, dev->stats.tx_stat }, + { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat }, + { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del }, + { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] }, + { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] }, + }; + u32 sum, n; + int i, j, k; + + /* Note: using MCU_RANDOM_READ is actually slower then reading all the + * registers by hand. MCU takes ca. 20ms to complete read of 24 + * registers while reading them one by one will takes roughly + * 24*200us =~ 5ms. + */ + + k = 0; + n = 0; + sum = 0; + for (i = 0; i < ARRAY_SIZE(spans); i++) + for (j = 0; j < spans[i].span; j++) { + u32 val = mt7601u_rr(dev, spans[i].addr_base + j * 4); + + spans[i].stat_base[j * 2] += val & 0xffff; + spans[i].stat_base[j * 2 + 1] += val >> 16; + + /* Calculate average AMPDU length */ + if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 && + spans[i].addr_base != MT_TX_AGG_CNT_BASE1) + continue; + + n += (val >> 16) + (val & 0xffff); + sum += (val & 0xffff) * (1 + k * 2) + + (val >> 16) * (2 + k * 2); + k++; + } + + atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1); + + mt7601u_check_mac_err(dev); + + ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, 10 * HZ); +} + +void +mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac) +{ + u8 zmac[ETH_ALEN] = {}; + u32 attr; + + attr = MT76_SET(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | + MT76_SET(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); + + mt76_wr(dev, MT_WCID_ATTR(idx), attr); + + if (mac) + memcpy(zmac, mac, sizeof(zmac)); + + mt7601u_addr_wr(dev, MT_WCID_ADDR(idx), zmac); +} + +void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev) +{ + struct ieee80211_sta *sta; + struct mt76_wcid *wcid; + void *msta; + u8 min_factor = 3; + int i; + + rcu_read_lock(); + for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) { + wcid = rcu_dereference(dev->wcid[i]); + if (!wcid) + continue; + + msta = container_of(wcid, struct mt76_sta, wcid); + sta = container_of(msta, struct ieee80211_sta, drv_priv); + + min_factor = min(min_factor, sta->ht_cap.ampdu_factor); + } + rcu_read_unlock(); + + mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff | + MT76_SET(MT_MAX_LEN_CFG_AMPDU, min_factor)); +} + +static void +mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) +{ + u8 idx = MT76_GET(MT_RXWI_RATE_MCS, rate); + + switch (MT76_GET(MT_RXWI_RATE_PHY, rate)) { + case MT_PHY_TYPE_OFDM: + if (WARN_ON(idx >= 8)) + idx = 0; + idx += 4; + + status->rate_idx = idx; + return; + case MT_PHY_TYPE_CCK: + if (idx >= 8) { + idx -= 8; + status->flag |= RX_FLAG_SHORTPRE; + } + + if (WARN_ON(idx >= 4)) + idx = 0; + + status->rate_idx = idx; + return; + case MT_PHY_TYPE_HT_GF: + status->flag |= RX_FLAG_HT_GF; + /* fall through */ + case MT_PHY_TYPE_HT: + status->flag |= RX_FLAG_HT; + status->rate_idx = idx; + break; + default: + WARN_ON(1); + return; + } + + if (rate & MT_RXWI_RATE_SGI) + status->flag |= RX_FLAG_SHORT_GI; + + if (rate & MT_RXWI_RATE_STBC) + status->flag |= 1 << RX_FLAG_STBC_SHIFT; + + if (rate & MT_RXWI_RATE_BW) + status->flag |= RX_FLAG_40MHZ; +} + +static void +mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, + u16 rate, int rssi) +{ + dev->bcn_freq_off = rxwi->freq_off; + dev->bcn_phy_mode = MT76_GET(MT_RXWI_RATE_PHY, rate); + dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8); +} + +static int +mt7601u_rx_is_our_beacon(struct mt7601u_dev *dev, u8 *data) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; + + return ieee80211_is_beacon(hdr->frame_control) && + ether_addr_equal(hdr->addr2, dev->ap_bssid); +} + +u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb, + u8 *data, void *rxi) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct mt7601u_rxwi *rxwi = rxi; + u32 ctl = le32_to_cpu(rxwi->ctl); + u16 rate = le16_to_cpu(rxwi->rate); + int rssi; + + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; + } + + status->chains = BIT(0); + rssi = mt7601u_phy_get_rssi(dev, rxwi, rate); + status->chain_signal[0] = status->signal = rssi; + status->freq = dev->chandef.chan->center_freq; + status->band = dev->chandef.chan->band; + + mt76_mac_process_rate(status, rate); + + spin_lock_bh(&dev->con_mon_lock); + if (mt7601u_rx_is_our_beacon(dev, data)) + mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi); + else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) + dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8); + spin_unlock_bh(&dev->con_mon_lock); + + return MT76_GET(MT_RXWI_CTL_MPDU_LEN, ctl); +} + +static enum mt76_cipher_type +mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) +{ + memset(key_data, 0, 32); + if (!key) + return MT_CIPHER_NONE; + + if (key->keylen > 32) + return MT_CIPHER_NONE; + + memcpy(key_data, key->key, key->keylen); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return MT_CIPHER_WEP40; + case WLAN_CIPHER_SUITE_WEP104: + return MT_CIPHER_WEP104; + case WLAN_CIPHER_SUITE_TKIP: + return MT_CIPHER_TKIP; + case WLAN_CIPHER_SUITE_CCMP: + return MT_CIPHER_AES_CCMP; + default: + return MT_CIPHER_NONE; + } +} + +int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx, + struct ieee80211_key_conf *key) +{ + enum mt76_cipher_type cipher; + u8 key_data[32]; + u8 iv_data[8]; + u32 val; + + cipher = mt76_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EINVAL; + + trace_set_key(dev, idx); + + mt7601u_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); + + memset(iv_data, 0, sizeof(iv_data)); + if (key) { + iv_data[3] = key->keyidx << 6; + if (cipher >= MT_CIPHER_TKIP) { + /* Note: start with 1 to comply with spec, + * (see comment on common/cmm_wpa.c:4291). + */ + iv_data[0] |= 1; + iv_data[3] |= 0x20; + } + } + mt7601u_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); + + val = mt7601u_rr(dev, MT_WCID_ATTR(idx)); + val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT; + val |= MT76_SET(MT_WCID_ATTR_PKEY_MODE, cipher & 7) | + MT76_SET(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3); + val &= ~MT_WCID_ATTR_PAIRWISE; + val |= MT_WCID_ATTR_PAIRWISE * + !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE); + mt7601u_wr(dev, MT_WCID_ATTR(idx), val); + + return 0; +} + +int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx, + struct ieee80211_key_conf *key) +{ + enum mt76_cipher_type cipher; + u8 key_data[32]; + u32 val; + + cipher = mt76_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EINVAL; + + trace_set_shared_key(dev, vif_idx, key_idx); + + mt7601u_wr_copy(dev, MT_SKEY(vif_idx, key_idx), + key_data, sizeof(key_data)); + + val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); + val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); + val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); + mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.h b/drivers/net/wireless/mediatek/mt7601u/mac.h new file mode 100644 index 000000000000..2c22d63c63a2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/mac.h @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76_MAC_H +#define __MT76_MAC_H + +struct mt76_tx_status { + u8 valid:1; + u8 success:1; + u8 aggr:1; + u8 ack_req:1; + u8 is_probe:1; + u8 wcid; + u8 pktid; + u8 retry; + u16 rate; +} __packed __aligned(2); + +/* Note: values in original "RSSI" and "SNR" fields are not actually what they + * are called for MT7601U, names used by this driver are educated guesses + * (see vendor mac/ral_omac.c). + */ +struct mt7601u_rxwi { + __le32 rxinfo; + + __le32 ctl; + + __le16 frag_sn; + __le16 rate; + + u8 unknown; + u8 zero[3]; + + u8 snr; + u8 ant; + u8 gain; + u8 freq_off; + + __le32 resv2; + __le32 expert_ant; +} __packed __aligned(4); + +#define MT_RXINFO_BA BIT(0) +#define MT_RXINFO_DATA BIT(1) +#define MT_RXINFO_NULL BIT(2) +#define MT_RXINFO_FRAG BIT(3) +#define MT_RXINFO_U2M BIT(4) +#define MT_RXINFO_MULTICAST BIT(5) +#define MT_RXINFO_BROADCAST BIT(6) +#define MT_RXINFO_MYBSS BIT(7) +#define MT_RXINFO_CRCERR BIT(8) +#define MT_RXINFO_ICVERR BIT(9) +#define MT_RXINFO_MICERR BIT(10) +#define MT_RXINFO_AMSDU BIT(11) +#define MT_RXINFO_HTC BIT(12) +#define MT_RXINFO_RSSI BIT(13) +#define MT_RXINFO_L2PAD BIT(14) +#define MT_RXINFO_AMPDU BIT(15) +#define MT_RXINFO_DECRYPT BIT(16) +#define MT_RXINFO_BSSIDX3 BIT(17) +#define MT_RXINFO_WAPI_KEY BIT(18) +#define MT_RXINFO_PN_LEN GENMASK(21, 19) +#define MT_RXINFO_SW_PKT_80211 BIT(22) +#define MT_RXINFO_TCP_SUM_BYPASS BIT(28) +#define MT_RXINFO_IP_SUM_BYPASS BIT(29) +#define MT_RXINFO_TCP_SUM_ERR BIT(30) +#define MT_RXINFO_IP_SUM_ERR BIT(31) + +#define MT_RXWI_CTL_WCID GENMASK(7, 0) +#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8) +#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10) +#define MT_RXWI_CTL_UDF GENMASK(15, 13) +#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16) +#define MT_RXWI_CTL_TID GENMASK(31, 28) + +#define MT_RXWI_FRAG GENMASK(3, 0) +#define MT_RXWI_SN GENMASK(15, 4) + +#define MT_RXWI_RATE_MCS GENMASK(6, 0) +#define MT_RXWI_RATE_BW BIT(7) +#define MT_RXWI_RATE_SGI BIT(8) +#define MT_RXWI_RATE_STBC GENMASK(10, 9) +#define MT_RXWI_RATE_ETXBF BIT(11) +#define MT_RXWI_RATE_SND BIT(12) +#define MT_RXWI_RATE_ITXBF BIT(13) +#define MT_RXWI_RATE_PHY GENMASK(15, 14) + +#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0) +#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6) +#define MT_RXWI_ANT_AUX_LNA BIT(7) + +#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0) + +enum mt76_phy_type { + MT_PHY_TYPE_CCK, + MT_PHY_TYPE_OFDM, + MT_PHY_TYPE_HT, + MT_PHY_TYPE_HT_GF, +}; + +enum mt76_phy_bandwidth { + MT_PHY_BW_20, + MT_PHY_BW_40, +}; + +struct mt76_txwi { + __le16 flags; + __le16 rate_ctl; + + u8 ack_ctl; + u8 wcid; + __le16 len_ctl; + + __le32 iv; + + __le32 eiv; + + u8 aid; + u8 txstream; + __le16 ctl; +} __packed __aligned(4); + +#define MT_TXWI_FLAGS_FRAG BIT(0) +#define MT_TXWI_FLAGS_MMPS BIT(1) +#define MT_TXWI_FLAGS_CFACK BIT(2) +#define MT_TXWI_FLAGS_TS BIT(3) +#define MT_TXWI_FLAGS_AMPDU BIT(4) +#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5) +#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8) +#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10) +#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13) +#define MT_TXWI_FLAGS_TX_RPT BIT(14) +#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15) + +#define MT_TXWI_RATE_MCS GENMASK(6, 0) +#define MT_TXWI_RATE_BW BIT(7) +#define MT_TXWI_RATE_SGI BIT(8) +#define MT_TXWI_RATE_STBC GENMASK(10, 9) +#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14) + +#define MT_TXWI_ACK_CTL_REQ BIT(0) +#define MT_TXWI_ACK_CTL_NSEQ BIT(1) +#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2) + +#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0) +#define MT_TXWI_LEN_PKTID GENMASK(15, 12) + +#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0) +#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4) +#define MT_TXWI_CTL_PIFS_REV BIT(6) + +u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb, + u8 *data, void *rxi); +int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx, + struct ieee80211_key_conf *key); +void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate); + +int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx, + struct ieee80211_key_conf *key); +u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev, + const struct ieee80211_tx_rate *rate, u8 *nss_val); +struct mt76_tx_status +mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev); +void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat); + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c new file mode 100644 index 000000000000..ced82abb414f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/main.c @@ -0,0 +1,412 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "mac.h" +#include +#include + +static int mt7601u_start(struct ieee80211_hw *hw) +{ + struct mt7601u_dev *dev = hw->priv; + int ret; + + mutex_lock(&dev->mutex); + + ret = mt7601u_mac_start(dev); + if (ret) + goto out; + + ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, + MT_CALIBRATE_INTERVAL); + ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); +out: + mutex_unlock(&dev->mutex); + return ret; +} + +static void mt7601u_stop(struct ieee80211_hw *hw) +{ + struct mt7601u_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + + cancel_delayed_work_sync(&dev->cal_work); + cancel_delayed_work_sync(&dev->mac_work); + mt7601u_mac_stop(dev); + + mutex_unlock(&dev->mutex); +} + +static int mt7601u_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + unsigned int idx = 0; + unsigned int wcid = GROUP_WCID(idx); + + /* Note: for AP do the AP-STA things mt76 does: + * - beacon offsets + * - do mac address tricks + * - shift vif idx + */ + mvif->idx = idx; + + if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG)) + return -ENOSPC; + dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG); + mvif->group_wcid.idx = wcid; + mvif->group_wcid.hw_key_idx = -1; + + return 0; +} + +static void mt7601u_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + unsigned int wcid = mvif->group_wcid.idx; + + dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG); +} + +static int mt7601u_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt7601u_dev *dev = hw->priv; + int ret = 0; + + mutex_lock(&dev->mutex); + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + ieee80211_stop_queues(hw); + ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef); + ieee80211_wake_queues(hw); + } + + mutex_unlock(&dev->mutex); + + return ret; +} + +static void +mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct mt7601u_dev *dev = hw->priv; + u32 flags = 0; + +#define MT76_FILTER(_flag, _hw) do { \ + flags |= *total_flags & FIF_##_flag; \ + dev->rxfilter &= ~(_hw); \ + dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + } while (0) + + mutex_lock(&dev->mutex); + + dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; + + MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); + MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); + MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK | + MT_RX_FILTR_CFG_CTS | + MT_RX_FILTR_CFG_CFEND | + MT_RX_FILTR_CFG_CFACK | + MT_RX_FILTR_CFG_BA | + MT_RX_FILTR_CFG_CTRL_RSV); + MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); + + *total_flags = flags; + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mutex_unlock(&dev->mutex); +} + +static void +mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + struct mt7601u_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + + if (changed & BSS_CHANGED_ASSOC) + mt7601u_phy_con_cal_onoff(dev, info); + + if (changed & BSS_CHANGED_BSSID) { + mt7601u_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid); + + /* Note: this is a hack because beacon_int is not changed + * on leave nor is any more appropriate event generated. + * rt2x00 doesn't seem to be bothered though. + */ + if (is_zero_ether_addr(info->bssid)) + mt7601u_mac_config_tsf(dev, false, 0); + } + + if (changed & BSS_CHANGED_BASIC_RATES) { + mt7601u_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates); + mt7601u_wr(dev, MT_HT_FBK_CFG0, 0x65432100); + mt7601u_wr(dev, MT_HT_FBK_CFG1, 0xedcba980); + mt7601u_wr(dev, MT_LG_FBK_CFG0, 0xedcba988); + mt7601u_wr(dev, MT_LG_FBK_CFG1, 0x00002100); + } + + if (changed & BSS_CHANGED_BEACON_INT) + mt7601u_mac_config_tsf(dev, true, info->beacon_int); + + if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) + mt7601u_mac_set_protection(dev, info->use_cts_prot, + info->ht_operation_mode); + + if (changed & BSS_CHANGED_ERP_PREAMBLE) + mt7601u_mac_set_short_preamble(dev, info->use_short_preamble); + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, + MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); + } + + if (changed & BSS_CHANGED_ASSOC) + mt7601u_phy_recalibrate_after_assoc(dev); + + mutex_unlock(&dev->mutex); +} + +static int +mt76_wcid_alloc(struct mt7601u_dev *dev) +{ + int i, idx = 0; + + for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { + idx = ffs(~dev->wcid_mask[i]); + if (!idx) + continue; + + idx--; + dev->wcid_mask[i] |= BIT(idx); + break; + } + + idx = i * BITS_PER_LONG + idx; + if (idx > 119) + return -1; + + return idx; +} + +static int +mt7601u_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + int ret = 0; + int idx = 0; + + mutex_lock(&dev->mutex); + + idx = mt76_wcid_alloc(dev); + if (idx < 0) { + ret = -ENOSPC; + goto out; + } + + msta->wcid.idx = idx; + msta->wcid.hw_key_idx = -1; + mt7601u_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); + mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx)); + rcu_assign_pointer(dev->wcid[idx], &msta->wcid); + mt7601u_mac_set_ampdu_factor(dev); + +out: + mutex_unlock(&dev->mutex); + + return ret; +} + +static int +mt7601u_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + int idx = msta->wcid.idx; + + mutex_lock(&dev->mutex); + rcu_assign_pointer(dev->wcid[idx], NULL); + mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx)); + dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG); + mt7601u_mac_wcid_setup(dev, idx, 0, NULL); + mt7601u_mac_set_ampdu_factor(dev); + mutex_unlock(&dev->mutex); + + return 0; +} + +static void +mt7601u_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, struct ieee80211_sta *sta) +{ +} + +static void +mt7601u_sw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) +{ + struct mt7601u_dev *dev = hw->priv; + + mt7601u_agc_save(dev); + set_bit(MT7601U_STATE_SCANNING, &dev->state); +} + +static void +mt7601u_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7601u_dev *dev = hw->priv; + + mt7601u_agc_restore(dev); + clear_bit(MT7601U_STATE_SCANNING, &dev->state); +} + +static int +mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL; + struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid; + int idx = key->keyidx; + int ret; + + if (cmd == SET_KEY) { + key->hw_key_idx = wcid->idx; + wcid->hw_key_idx = idx; + } else { + if (idx == wcid->hw_key_idx) + wcid->hw_key_idx = -1; + + key = NULL; + } + + if (!msta) { + if (key || wcid->hw_key_idx == idx) { + ret = mt76_mac_wcid_set_key(dev, wcid->idx, key); + if (ret) + return ret; + } + + return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key); + } + + return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key); +} + +static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct mt7601u_dev *dev = hw->priv; + + mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value); + + return 0; +} + +static int +mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + + WARN_ON(msta->wcid.idx > GROUP_WCID(0)); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, + BIT(16 + tid)); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + break; + case IEEE80211_AMPDU_TX_START: + msta->agg_ssn[tid] = *ssn << 4; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + + return 0; +} + +static void +mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + struct ieee80211_sta_rates *rates; + struct ieee80211_tx_rate rate = {}; + + rcu_read_lock(); + rates = rcu_dereference(sta->rates); + + if (!rates) + goto out; + + rate.idx = rates->rate[0].idx; + rate.flags = rates->rate[0].flags; + mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate); + +out: + rcu_read_unlock(); +} + +const struct ieee80211_ops mt7601u_ops = { + .tx = mt7601u_tx, + .start = mt7601u_start, + .stop = mt7601u_stop, + .add_interface = mt7601u_add_interface, + .remove_interface = mt7601u_remove_interface, + .config = mt7601u_config, + .configure_filter = mt76_configure_filter, + .bss_info_changed = mt7601u_bss_info_changed, + .sta_add = mt7601u_sta_add, + .sta_remove = mt7601u_sta_remove, + .sta_notify = mt7601u_sta_notify, + .set_key = mt7601u_set_key, + .conf_tx = mt7601u_conf_tx, + .sw_scan_start = mt7601u_sw_scan, + .sw_scan_complete = mt7601u_sw_scan_complete, + .ampdu_action = mt76_ampdu_action, + .sta_rate_tbl_update = mt76_sta_rate_tbl_update, + .set_rts_threshold = mt7601u_set_rts_threshold, +}; diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c new file mode 100644 index 000000000000..fbb1986eda3c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c @@ -0,0 +1,534 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "mt7601u.h" +#include "dma.h" +#include "mcu.h" +#include "usb.h" +#include "trace.h" + +#define MCU_FW_URB_MAX_PAYLOAD 0x3800 +#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12) +#define MCU_RESP_URB_SIZE 1024 + +static inline int firmware_running(struct mt7601u_dev *dev) +{ + return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1; +} + +static inline void skb_put_le32(struct sk_buff *skb, u32 val) +{ + put_unaligned_le32(val, skb_put(skb, 4)); +} + +static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb, + u8 seq, enum mcu_cmd cmd) +{ + WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND, + MT76_SET(MT_TXD_CMD_INFO_SEQ, seq) | + MT76_SET(MT_TXD_CMD_INFO_TYPE, cmd))); +} + +static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev, + struct sk_buff *skb, bool need_resp) +{ + u32 i, csum = 0; + + for (i = 0; i < skb->len / 4; i++) + csum ^= get_unaligned_le32(skb->data + i * 4); + + trace_mt_mcu_msg_send(dev, skb, csum, need_resp); +} + +static struct sk_buff * +mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len) +{ + struct sk_buff *skb; + + WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */ + + skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + skb_reserve(skb, MT_DMA_HDR_LEN); + memcpy(skb_put(skb, len), data, len); + + return skb; +} + +static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq) +{ + struct urb *urb = dev->mcu.resp.urb; + u32 rxfce; + int urb_status, ret, i = 5; + + while (i--) { + if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl, + msecs_to_jiffies(300))) { + dev_warn(dev->dev, "Warning: %s retrying\n", __func__); + continue; + } + + /* Make copies of important data before reusing the urb */ + rxfce = get_unaligned_le32(dev->mcu.resp.buf); + urb_status = urb->status * mt7601u_urb_has_error(urb); + + ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, + &dev->mcu.resp, GFP_KERNEL, + mt7601u_complete_urb, + &dev->mcu.resp_cmpl); + if (ret) + return ret; + + if (urb_status) + dev_err(dev->dev, "Error: MCU resp urb failed:%d\n", + urb_status); + + if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq && + MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE) + return 0; + + dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n", + MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce), + seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce)); + } + + dev_err(dev->dev, "Error: %s timed out\n", __func__); + return -ETIMEDOUT; +} + +static int +mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb, + enum mcu_cmd cmd, bool wait_resp) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + unsigned cmd_pipe = usb_sndbulkpipe(usb_dev, + dev->out_eps[MT_EP_OUT_INBAND_CMD]); + int sent, ret; + u8 seq = 0; + + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return 0; + + mutex_lock(&dev->mcu.mutex); + + if (wait_resp) + while (!seq) + seq = ++dev->mcu.msg_seq & 0xf; + + mt7601u_dma_skb_wrap_cmd(skb, seq, cmd); + + if (dev->mcu.resp_cmpl.done) + dev_err(dev->dev, "Error: MCU response pre-completed!\n"); + + trace_mt_mcu_msg_send_cs(dev, skb, wait_resp); + trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len); + ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500); + if (ret) { + dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret); + goto out; + } + if (sent != skb->len) + dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__); + + if (wait_resp) + ret = mt7601u_mcu_wait_resp(dev, seq); +out: + mutex_unlock(&dev->mcu.mutex); + + consume_skb(skb); + + return ret; +} + +static int mt7601u_mcu_function_select(struct mt7601u_dev *dev, + enum mcu_function func, u32 val) +{ + struct sk_buff *skb; + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(func), + .value = cpu_to_le32(val), + }; + + skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); +} + +int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga) +{ + int ret; + + if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state)) + return 0; + + ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING, + use_hvga); + if (ret) { + dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n"); + return ret; + } + + dev->tssi_read_trig = true; + + return 0; +} + +int +mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val) +{ + struct sk_buff *skb; + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(cal), + .value = cpu_to_le32(val), + }; + + skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); +} + +int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base, + const struct mt76_reg_pair *data, int n) +{ + const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8; + struct sk_buff *skb; + int cnt, i, ret; + + if (!n) + return 0; + + cnt = min(max_vals_per_cmd, n); + + skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MT_DMA_HDR_LEN); + + for (i = 0; i < cnt; i++) { + skb_put_le32(skb, base + data[i].reg); + skb_put_le32(skb, data[i].value); + } + + ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n); + if (ret) + return ret; + + return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt); +} + +int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset, + const u32 *data, int n) +{ + const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1; + struct sk_buff *skb; + int cnt, i, ret; + + if (!n) + return 0; + + cnt = min(max_regs_per_cmd, n); + + skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MT_DMA_HDR_LEN); + + skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset); + for (i = 0; i < cnt; i++) + skb_put_le32(skb, data[i]); + + ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n); + if (ret) + return ret; + + return mt7601u_burst_write_regs(dev, offset + cnt * 4, + data + cnt, n - cnt); +} + +struct mt76_fw_header { + __le32 ilm_len; + __le32 dlm_len; + __le16 build_ver; + __le16 fw_ver; + u8 pad[4]; + char build_time[16]; +}; + +struct mt76_fw { + struct mt76_fw_header hdr; + u8 ivb[MT_MCU_IVB_SIZE]; + u8 ilm[]; +}; + +static int __mt7601u_dma_fw(struct mt7601u_dev *dev, + const struct mt7601u_dma_buf *dma_buf, + const void *data, u32 len, u32 dst_addr) +{ + DECLARE_COMPLETION_ONSTACK(cmpl); + struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */ + __le32 reg; + u32 val; + int ret; + + reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) | + MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) | + MT76_SET(MT_TXD_INFO_LEN, len)); + memcpy(buf.buf, ®, sizeof(reg)); + memcpy(buf.buf + sizeof(reg), data, len); + memset(buf.buf + sizeof(reg) + len, 0, 8); + + ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE, + MT_FCE_DMA_ADDR, dst_addr); + if (ret) + return ret; + len = roundup(len, 4); + ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE, + MT_FCE_DMA_LEN, len << 16); + if (ret) + return ret; + + buf.len = MT_DMA_HDR_LEN + len + 4; + ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD, + &buf, GFP_KERNEL, + mt7601u_complete_urb, &cmpl); + if (ret) + return ret; + + if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) { + dev_err(dev->dev, "Error: firmware upload timed out\n"); + usb_kill_urb(buf.urb); + return -ETIMEDOUT; + } + if (mt7601u_urb_has_error(buf.urb)) { + dev_err(dev->dev, "Error: firmware upload urb failed:%d\n", + buf.urb->status); + return buf.urb->status; + } + + val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); + val++; + mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); + + return 0; +} + +static int +mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf, + const void *data, int len, u32 dst_addr) +{ + int n, ret; + + if (len == 0) + return 0; + + n = min(MCU_FW_URB_MAX_PAYLOAD, len); + ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500)) + return -ETIMEDOUT; + + return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n); +} + +static int +mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw) +{ + struct mt7601u_dma_buf dma_buf; + void *ivb; + u32 ilm_len, dlm_len; + int i, ret; + + ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL); + if (!ivb || mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) { + ret = -ENOMEM; + goto error; + } + + ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb); + dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n", + ilm_len, sizeof(fw->ivb)); + ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb)); + if (ret) + goto error; + + dlm_len = le32_to_cpu(fw->hdr.dlm_len); + dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len); + ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len, + dlm_len, MT_MCU_DLM_OFFSET); + if (ret) + goto error; + + ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, + 0x12, 0, ivb, sizeof(fw->ivb)); + if (ret < 0) + goto error; + ret = 0; + + for (i = 100; i && !firmware_running(dev); i--) + msleep(10); + if (!i) { + ret = -ETIMEDOUT; + goto error; + } + + dev_dbg(dev->dev, "Firmware running!\n"); +error: + kfree(ivb); + mt7601u_usb_free_buf(dev, &dma_buf); + + return ret; +} + +static int mt7601u_load_firmware(struct mt7601u_dev *dev) +{ + const struct firmware *fw; + const struct mt76_fw_header *hdr; + int len, ret; + u32 val; + + mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN)); + + if (firmware_running(dev)) + return 0; + + ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) + goto err_inv_fw; + + hdr = (const struct mt76_fw_header *) fw->data; + + if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE) + goto err_inv_fw; + + len = sizeof(*hdr); + len += le32_to_cpu(hdr->ilm_len); + len += le32_to_cpu(hdr->dlm_len); + + if (fw->size != len) + goto err_inv_fw; + + val = le16_to_cpu(hdr->fw_ver); + dev_info(dev->dev, + "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n", + (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf, + le16_to_cpu(hdr->build_ver), hdr->build_time); + + len = le32_to_cpu(hdr->ilm_len); + + mt7601u_wr(dev, 0x94c, 0); + mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0); + + mt7601u_vendor_reset(dev); + msleep(5); + + mt7601u_wr(dev, 0xa44, 0); + mt7601u_wr(dev, 0x230, 0x84210); + mt7601u_wr(dev, 0x400, 0x80c00); + mt7601u_wr(dev, 0x800, 1); + + mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN | + MT_PBF_CFG_TX1Q_EN | + MT_PBF_CFG_TX2Q_EN | + MT_PBF_CFG_TX3Q_EN)); + + mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1); + + mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN)); + val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR); + val &= ~MT_USB_DMA_CFG_TX_CLR; + mt7601u_wr(dev, MT_USB_DMA_CFG, val); + + /* FCE tx_fs_base_ptr */ + mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); + /* FCE tx_fs_max_cnt */ + mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1); + /* FCE pdma enable */ + mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); + /* FCE skip_fs_en */ + mt7601u_wr(dev, MT_FCE_SKIP_FS, 3); + + ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data); + + release_firmware(fw); + + return ret; + +err_inv_fw: + dev_err(dev->dev, "Invalid firmware image\n"); + release_firmware(fw); + return -ENOENT; +} + +int mt7601u_mcu_init(struct mt7601u_dev *dev) +{ + int ret; + + mutex_init(&dev->mcu.mutex); + + ret = mt7601u_load_firmware(dev); + if (ret) + return ret; + + set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state); + + return 0; +} + +int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev) +{ + int ret; + + ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1); + if (ret) + return ret; + + init_completion(&dev->mcu.resp_cmpl); + if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) { + mt7601u_usb_free_buf(dev, &dev->mcu.resp); + return -ENOMEM; + } + + ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, + &dev->mcu.resp, GFP_KERNEL, + mt7601u_complete_urb, &dev->mcu.resp_cmpl); + if (ret) { + mt7601u_usb_free_buf(dev, &dev->mcu.resp); + return ret; + } + + return 0; +} + +void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev) +{ + usb_kill_urb(dev->mcu.resp.urb); + mt7601u_usb_free_buf(dev, &dev->mcu.resp); +} diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.h b/drivers/net/wireless/mediatek/mt7601u/mcu.h new file mode 100644 index 000000000000..4a66d1092a18 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_MCU_H +#define __MT7601U_MCU_H + +struct mt7601u_dev; + +/* Register definitions */ +#define MT_MCU_RESET_CTL 0x070C +#define MT_MCU_INT_LEVEL 0x0718 +#define MT_MCU_COM_REG0 0x0730 +#define MT_MCU_COM_REG1 0x0734 +#define MT_MCU_COM_REG2 0x0738 +#define MT_MCU_COM_REG3 0x073C + +#define MT_MCU_IVB_SIZE 0x40 +#define MT_MCU_DLM_OFFSET 0x80000 + +#define MT_MCU_MEMMAP_WLAN 0x00410000 +#define MT_MCU_MEMMAP_BBP 0x40000000 +#define MT_MCU_MEMMAP_RF 0x80000000 + +#define INBAND_PACKET_MAX_LEN 192 + +enum mcu_cmd { + CMD_FUN_SET_OP = 1, + CMD_LOAD_CR = 2, + CMD_INIT_GAIN_OP = 3, + CMD_DYNC_VGA_OP = 6, + CMD_TDLS_CH_SW = 7, + CMD_BURST_WRITE = 8, + CMD_READ_MODIFY_WRITE = 9, + CMD_RANDOM_READ = 10, + CMD_BURST_READ = 11, + CMD_RANDOM_WRITE = 12, + CMD_LED_MODE_OP = 16, + CMD_POWER_SAVING_OP = 20, + CMD_WOW_CONFIG = 21, + CMD_WOW_QUERY = 22, + CMD_WOW_FEATURE = 24, + CMD_CARRIER_DETECT_OP = 28, + CMD_RADOR_DETECT_OP = 29, + CMD_SWITCH_CHANNEL_OP = 30, + CMD_CALIBRATION_OP = 31, + CMD_BEACON_OP = 32, + CMD_ANTENNA_OP = 33, +}; + +enum mcu_function { + Q_SELECT = 1, + ATOMIC_TSSI_SETTING = 5, +}; + +enum mcu_power_mode { + RADIO_OFF = 0x30, + RADIO_ON = 0x31, + RADIO_OFF_AUTO_WAKEUP = 0x32, + RADIO_OFF_ADVANCE = 0x33, + RADIO_ON_ADVANCE = 0x34, +}; + +enum mcu_calibrate { + MCU_CAL_R = 1, + MCU_CAL_DCOC, + MCU_CAL_LC, + MCU_CAL_LOFT, + MCU_CAL_TXIQ, + MCU_CAL_BW, + MCU_CAL_DPD, + MCU_CAL_RXIQ, + MCU_CAL_TXDCOC, +}; + +int mt7601u_mcu_init(struct mt7601u_dev *dev); +int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev); +void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev); + +int +mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val); +int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga); + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h new file mode 100644 index 000000000000..9102be6b95cb --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h @@ -0,0 +1,390 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef MT7601U_H +#define MT7601U_H + +#include +#include +#include +#include +#include +#include +#include + +#include "regs.h" +#include "util.h" + +#define MT_CALIBRATE_INTERVAL (4 * HZ) + +#define MT_FREQ_CAL_INIT_DELAY (30 * HZ) +#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ) +#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2) + +#define MT_BBP_REG_VERSION 0x00 + +#define MT_USB_AGGR_SIZE_LIMIT 28 /* * 1024B */ +#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */ +#define MT_RX_ORDER 3 +#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER) + +struct mt7601u_dma_buf { + struct urb *urb; + void *buf; + dma_addr_t dma; + size_t len; +}; + +struct mt7601u_mcu { + struct mutex mutex; + + u8 msg_seq; + + struct mt7601u_dma_buf resp; + struct completion resp_cmpl; +}; + +struct mt7601u_freq_cal { + struct delayed_work work; + u8 freq; + bool enabled; + bool adjusting; +}; + +struct mac_stats { + u64 rx_stat[6]; + u64 tx_stat[6]; + u64 aggr_stat[2]; + u64 aggr_n[32]; + u64 zero_len_del[2]; +}; + +#define N_RX_ENTRIES 16 +struct mt7601u_rx_queue { + struct mt7601u_dev *dev; + + struct mt7601u_dma_buf_rx { + struct urb *urb; + struct page *p; + } e[N_RX_ENTRIES]; + + unsigned int start; + unsigned int end; + unsigned int entries; + unsigned int pending; +}; + +#define N_TX_ENTRIES 64 + +struct mt7601u_tx_queue { + struct mt7601u_dev *dev; + + struct mt7601u_dma_buf_tx { + struct urb *urb; + struct sk_buff *skb; + } e[N_TX_ENTRIES]; + + unsigned int start; + unsigned int end; + unsigned int entries; + unsigned int used; + unsigned int fifo_seq; +}; + +/* WCID allocation: + * 0: mcast wcid + * 1: bssid wcid + * 1...: STAs + * ...7e: group wcids + * 7f: reserved + */ +#define N_WCIDS 128 +#define GROUP_WCID(idx) (N_WCIDS - 2 - idx) + +struct mt7601u_eeprom_params; + +#define MT_EE_TEMPERATURE_SLOPE 39 +#define MT_FREQ_OFFSET_INVALID -128 + +enum mt_temp_mode { + MT_TEMP_MODE_NORMAL, + MT_TEMP_MODE_HIGH, + MT_TEMP_MODE_LOW, +}; + +enum mt_bw { + MT_BW_20, + MT_BW_40, +}; + +enum { + MT7601U_STATE_INITIALIZED, + MT7601U_STATE_REMOVED, + MT7601U_STATE_WLAN_RUNNING, + MT7601U_STATE_MCU_RUNNING, + MT7601U_STATE_SCANNING, + MT7601U_STATE_READING_STATS, + MT7601U_STATE_MORE_STATS, +}; + +/** + * struct mt7601u_dev - adapter structure + * @lock: protects @wcid->tx_rate. + * @tx_lock: protects @tx_q and changes of MT7601U_STATE_*_STATS + flags in @state. + * @rx_lock: protects @rx_q. + * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi. + * @mutex: ensures exclusive access from mac80211 callbacks. + * @vendor_req_mutex: ensures atomicity of vendor requests. + * @reg_atomic_mutex: ensures atomicity of indirect register accesses + * (accesses to RF and BBP). + * @hw_atomic_mutex: ensures exclusive access to HW during critical + * operations (power management, channel switch). + */ +struct mt7601u_dev { + struct ieee80211_hw *hw; + struct device *dev; + + unsigned long state; + + struct mutex mutex; + + unsigned long wcid_mask[N_WCIDS / BITS_PER_LONG]; + + struct cfg80211_chan_def chandef; + struct ieee80211_supported_band *sband_2g; + + struct mt7601u_mcu mcu; + + struct delayed_work cal_work; + struct delayed_work mac_work; + + struct workqueue_struct *stat_wq; + struct delayed_work stat_work; + + struct mt76_wcid *mon_wcid; + struct mt76_wcid __rcu *wcid[N_WCIDS]; + + spinlock_t lock; + + const u16 *beacon_offsets; + + u8 macaddr[ETH_ALEN]; + struct mt7601u_eeprom_params *ee; + + struct mutex vendor_req_mutex; + struct mutex reg_atomic_mutex; + struct mutex hw_atomic_mutex; + + u32 rxfilter; + u32 debugfs_reg; + + u8 out_eps[8]; + u8 in_eps[8]; + u16 out_max_packet; + u16 in_max_packet; + + /* TX */ + spinlock_t tx_lock; + struct mt7601u_tx_queue *tx_q; + + atomic_t avg_ampdu_len; + + /* RX */ + spinlock_t rx_lock; + struct tasklet_struct rx_tasklet; + struct mt7601u_rx_queue rx_q; + + /* Connection monitoring things */ + spinlock_t con_mon_lock; + u8 ap_bssid[ETH_ALEN]; + + s8 bcn_freq_off; + u8 bcn_phy_mode; + + int avg_rssi; /* starts at 0 and converges */ + + u8 agc_save; + + struct mt7601u_freq_cal freq_cal; + + bool tssi_read_trig; + + s8 tssi_init; + s8 tssi_init_hvga; + s16 tssi_init_hvga_offset_db; + + int prev_pwr_diff; + + enum mt_temp_mode temp_mode; + int curr_temp; + int dpd_temp; + s8 raw_temp; + bool pll_lock_protect; + + u8 bw; + bool chan_ext_below; + + /* PA mode */ + u32 rf_pa_mode[2]; + + struct mac_stats stats; +}; + +struct mt7601u_tssi_params { + char tssi0; + int trgt_power; +}; + +struct mt76_wcid { + u8 idx; + u8 hw_key_idx; + + u16 tx_rate; + bool tx_rate_set; + u8 tx_rate_nss; +}; + +struct mt76_vif { + u8 idx; + + struct mt76_wcid group_wcid; +}; + +struct mt76_sta { + struct mt76_wcid wcid; + u16 agg_ssn[IEEE80211_NUM_TIDS]; +}; + +struct mt76_reg_pair { + u32 reg; + u32 value; +}; + +struct mt7601u_rxwi; + +extern const struct ieee80211_ops mt7601u_ops; + +void mt7601u_init_debugfs(struct mt7601u_dev *dev); + +u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset); +void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val); +u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val); +u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val); +void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset, + const void *data, int len); + +int mt7601u_wait_asic_ready(struct mt7601u_dev *dev); +bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val, + int timeout); +bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val, + int timeout); + +/* Compatibility with mt76 */ +#define mt76_rmw_field(_dev, _reg, _field, _val) \ + mt76_rmw(_dev, _reg, _field, MT76_SET(_field, _val)) + +static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset) +{ + return mt7601u_rr(dev, offset); +} + +static inline void mt76_wr(struct mt7601u_dev *dev, u32 offset, u32 val) +{ + return mt7601u_wr(dev, offset, val); +} + +static inline u32 +mt76_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) +{ + return mt7601u_rmw(dev, offset, mask, val); +} + +static inline u32 mt76_set(struct mt7601u_dev *dev, u32 offset, u32 val) +{ + return mt76_rmw(dev, offset, 0, val); +} + +static inline u32 mt76_clear(struct mt7601u_dev *dev, u32 offset, u32 val) +{ + return mt76_rmw(dev, offset, val, 0); +} + +int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base, + const struct mt76_reg_pair *data, int len); +int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset, + const u32 *data, int n); +void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr); + +/* Init */ +struct mt7601u_dev *mt7601u_alloc_device(struct device *dev); +int mt7601u_init_hardware(struct mt7601u_dev *dev); +int mt7601u_register_device(struct mt7601u_dev *dev); +void mt7601u_cleanup(struct mt7601u_dev *dev); + +int mt7601u_mac_start(struct mt7601u_dev *dev); +void mt7601u_mac_stop(struct mt7601u_dev *dev); + +/* PHY */ +int mt7601u_phy_init(struct mt7601u_dev *dev); +int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev); +void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path); +void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 path); +int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw); +void mt7601u_agc_save(struct mt7601u_dev *dev); +void mt7601u_agc_restore(struct mt7601u_dev *dev); +int mt7601u_phy_set_channel(struct mt7601u_dev *dev, + struct cfg80211_chan_def *chandef); +void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev); +int mt7601u_phy_get_rssi(struct mt7601u_dev *dev, + struct mt7601u_rxwi *rxwi, u16 rate); +void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev, + struct ieee80211_bss_conf *info); + +/* MAC */ +void mt7601u_mac_work(struct work_struct *work); +void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot, + int ht_mode); +void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb); +void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval); +void +mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac); +void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev); + +/* TX */ +void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb); +int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb); +void mt7601u_tx_stat(struct work_struct *work); + +/* util */ +void mt76_remove_hdr_pad(struct sk_buff *skb); +int mt76_insert_hdr_pad(struct sk_buff *skb); + +u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below); + +static inline u32 mt7601u_mac_set_ctrlch(struct mt7601u_dev *dev, bool below) +{ + return mt7601u_rmc(dev, MT_TX_BAND_CFG, 1, below); +} + +int mt7601u_dma_init(struct mt7601u_dev *dev); +void mt7601u_dma_cleanup(struct mt7601u_dev *dev); + +int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb, + struct mt76_wcid *wcid, int hw_q); + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c new file mode 100644 index 000000000000..1908af6add87 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/phy.c @@ -0,0 +1,1251 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "mcu.h" +#include "eeprom.h" +#include "trace.h" +#include "initvals_phy.h" + +#include + +static void mt7601u_agc_reset(struct mt7601u_dev *dev); + +static int +mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value) +{ + int ret = 0; + + if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) || + WARN_ON(offset > 63)) + return -EINVAL; + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return 0; + + mutex_lock(&dev->reg_atomic_mutex); + + if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) { + ret = -ETIMEDOUT; + goto out; + } + + mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_DATA, value) | + MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) | + MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) | + MT_RF_CSR_CFG_WR | + MT_RF_CSR_CFG_KICK); + trace_rf_write(dev, bank, offset, value); +out: + mutex_unlock(&dev->reg_atomic_mutex); + + if (ret < 0) + dev_err(dev->dev, "Error: RF write %02hhx:%02hhx failed:%d!!\n", + bank, offset, ret); + + return ret; +} + +static int +mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset) +{ + int ret = -ETIMEDOUT; + u32 val; + + if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) || + WARN_ON(offset > 63)) + return -EINVAL; + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return 0xff; + + mutex_lock(&dev->reg_atomic_mutex); + + if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) + goto out; + + mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) | + MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) | + MT_RF_CSR_CFG_KICK); + + if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) + goto out; + + val = mt7601u_rr(dev, MT_RF_CSR_CFG); + if (MT76_GET(MT_RF_CSR_CFG_REG_ID, val) == offset && + MT76_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) { + ret = MT76_GET(MT_RF_CSR_CFG_DATA, val); + trace_rf_read(dev, bank, offset, ret); + } +out: + mutex_unlock(&dev->reg_atomic_mutex); + + if (ret < 0) + dev_err(dev->dev, "Error: RF read %02hhx:%02hhx failed:%d!!\n", + bank, offset, ret); + + return ret; +} + +static int +mt7601u_rf_rmw(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask, u8 val) +{ + int ret; + + ret = mt7601u_rf_rr(dev, bank, offset); + if (ret < 0) + return ret; + val |= ret & ~mask; + ret = mt7601u_rf_wr(dev, bank, offset, val); + if (ret) + return ret; + + return val; +} + +static int +mt7601u_rf_set(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 val) +{ + return mt7601u_rf_rmw(dev, bank, offset, 0, val); +} + +static int +mt7601u_rf_clear(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask) +{ + return mt7601u_rf_rmw(dev, bank, offset, mask, 0); +} + +static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val) +{ + if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) || + test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return; + + mutex_lock(&dev->reg_atomic_mutex); + + if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) { + dev_err(dev->dev, "Error: BBP write %02hhx failed!!\n", offset); + goto out; + } + + mt7601u_wr(dev, MT_BBP_CSR_CFG, + MT76_SET(MT_BBP_CSR_CFG_VAL, val) | + MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) | + MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY); + trace_bbp_write(dev, offset, val); +out: + mutex_unlock(&dev->reg_atomic_mutex); +} + +static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset) +{ + u32 val; + int ret = -ETIMEDOUT; + + if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state))) + return -EINVAL; + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return 0xff; + + mutex_lock(&dev->reg_atomic_mutex); + + if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) + goto out; + + mt7601u_wr(dev, MT_BBP_CSR_CFG, + MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) | + MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY | + MT_BBP_CSR_CFG_READ); + + if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) + goto out; + + val = mt7601u_rr(dev, MT_BBP_CSR_CFG); + if (MT76_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) { + ret = MT76_GET(MT_BBP_CSR_CFG_VAL, val); + trace_bbp_read(dev, offset, ret); + } +out: + mutex_unlock(&dev->reg_atomic_mutex); + + if (ret < 0) + dev_err(dev->dev, "Error: BBP read %02hhx failed:%d!!\n", + offset, ret); + + return ret; +} + +static int mt7601u_bbp_rmw(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val) +{ + int ret; + + ret = mt7601u_bbp_rr(dev, offset); + if (ret < 0) + return ret; + val |= ret & ~mask; + mt7601u_bbp_wr(dev, offset, val); + + return val; +} + +static u8 mt7601u_bbp_rmc(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val) +{ + int ret; + + ret = mt7601u_bbp_rr(dev, offset); + if (ret < 0) + return ret; + val |= ret & ~mask; + if (ret != val) + mt7601u_bbp_wr(dev, offset, val); + + return val; +} + +int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev) +{ + int i = 20; + u8 val; + + do { + val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION); + if (val && ~val) + break; + } while (--i); + + if (!i) { + dev_err(dev->dev, "Error: BBP is not ready\n"); + return -EIO; + } + + return 0; +} + +u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below) +{ + return mt7601u_bbp_rmc(dev, 3, 0x20, below ? 0x20 : 0); +} + +int mt7601u_phy_get_rssi(struct mt7601u_dev *dev, + struct mt7601u_rxwi *rxwi, u16 rate) +{ + static const s8 lna[2][2][3] = { + /* main LNA */ { + /* bw20 */ { -2, 15, 33 }, + /* bw40 */ { 0, 16, 34 } + }, + /* aux LNA */ { + /* bw20 */ { -2, 15, 33 }, + /* bw40 */ { -2, 16, 34 } + } + }; + int bw = MT76_GET(MT_RXWI_RATE_BW, rate); + int aux_lna = MT76_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant); + int lna_id = MT76_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain); + int val; + + if (lna_id) /* LNA id can be 0, 2, 3. */ + lna_id--; + + val = 8; + val -= lna[aux_lna][bw][lna_id]; + val -= MT76_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain); + val -= dev->ee->lna_gain; + val -= dev->ee->rssi_offset[0]; + + return val; +} + +static void mt7601u_vco_cal(struct mt7601u_dev *dev) +{ + mt7601u_rf_wr(dev, 0, 4, 0x0a); + mt7601u_rf_wr(dev, 0, 5, 0x20); + mt7601u_rf_set(dev, 0, 4, BIT(7)); + msleep(2); +} + +static int mt7601u_set_bw_filter(struct mt7601u_dev *dev, bool cal) +{ + u32 filter = 0; + int ret; + + if (!cal) + filter |= 0x10000; + if (dev->bw != MT_BW_20) + filter |= 0x00100; + + /* TX */ + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter | 1); + if (ret) + return ret; + /* RX */ + return mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter); +} + +static int mt7601u_load_bbp_temp_table_bw(struct mt7601u_dev *dev) +{ + const struct reg_table *t; + + if (WARN_ON(dev->temp_mode > MT_TEMP_MODE_LOW)) + return -EINVAL; + + t = &bbp_mode_table[dev->temp_mode][dev->bw]; + + return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n); +} + +static int mt7601u_bbp_temp(struct mt7601u_dev *dev, int mode, const char *name) +{ + const struct reg_table *t; + int ret; + + if (dev->temp_mode == mode) + return 0; + + dev->temp_mode = mode; + trace_temp_mode(dev, mode); + + t = bbp_mode_table[dev->temp_mode]; + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, + t[2].regs, t[2].n); + if (ret) + return ret; + + return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, + t[dev->bw].regs, t[dev->bw].n); +} + +static void mt7601u_apply_ch14_fixup(struct mt7601u_dev *dev, int hw_chan) +{ + struct mt7601u_rate_power *t = &dev->ee->power_rate_table; + + if (hw_chan != 14 || dev->bw != MT_BW_20) { + mt7601u_bbp_rmw(dev, 4, 0x20, 0); + mt7601u_bbp_wr(dev, 178, 0xff); + + t->cck[0].bw20 = dev->ee->real_cck_bw20[0]; + t->cck[1].bw20 = dev->ee->real_cck_bw20[1]; + } else { /* Apply CH14 OBW fixup */ + mt7601u_bbp_wr(dev, 4, 0x60); + mt7601u_bbp_wr(dev, 178, 0); + + /* Note: vendor code is buggy here for negative values */ + t->cck[0].bw20 = dev->ee->real_cck_bw20[0] - 2; + t->cck[1].bw20 = dev->ee->real_cck_bw20[1] - 2; + } +} + +static int __mt7601u_phy_set_channel(struct mt7601u_dev *dev, + struct cfg80211_chan_def *chandef) +{ +#define FREQ_PLAN_REGS 4 + static const u8 freq_plan[14][FREQ_PLAN_REGS] = { + { 0x99, 0x99, 0x09, 0x50 }, + { 0x46, 0x44, 0x0a, 0x50 }, + { 0xec, 0xee, 0x0a, 0x50 }, + { 0x99, 0x99, 0x0b, 0x50 }, + { 0x46, 0x44, 0x08, 0x51 }, + { 0xec, 0xee, 0x08, 0x51 }, + { 0x99, 0x99, 0x09, 0x51 }, + { 0x46, 0x44, 0x0a, 0x51 }, + { 0xec, 0xee, 0x0a, 0x51 }, + { 0x99, 0x99, 0x0b, 0x51 }, + { 0x46, 0x44, 0x08, 0x52 }, + { 0xec, 0xee, 0x08, 0x52 }, + { 0x99, 0x99, 0x09, 0x52 }, + { 0x33, 0x33, 0x0b, 0x52 }, + }; + struct mt76_reg_pair channel_freq_plan[FREQ_PLAN_REGS] = { + { 17, 0 }, { 18, 0 }, { 19, 0 }, { 20, 0 }, + }; + struct mt76_reg_pair bbp_settings[3] = { + { 62, 0x37 - dev->ee->lna_gain }, + { 63, 0x37 - dev->ee->lna_gain }, + { 64, 0x37 - dev->ee->lna_gain }, + }; + + struct ieee80211_channel *chan = chandef->chan; + enum nl80211_channel_type chan_type = + cfg80211_get_chandef_type(chandef); + struct mt7601u_rate_power *t = &dev->ee->power_rate_table; + int chan_idx; + bool chan_ext_below; + u8 bw; + int i, ret; + + bw = MT_BW_20; + chan_ext_below = (chan_type == NL80211_CHAN_HT40MINUS); + chan_idx = chan->hw_value - 1; + + if (chandef->width == NL80211_CHAN_WIDTH_40) { + bw = MT_BW_40; + + if (chan_idx > 1 && chan_type == NL80211_CHAN_HT40MINUS) + chan_idx -= 2; + else if (chan_idx < 12 && chan_type == NL80211_CHAN_HT40PLUS) + chan_idx += 2; + else + dev_err(dev->dev, "Error: invalid 40MHz channel!!\n"); + } + + if (bw != dev->bw || chan_ext_below != dev->chan_ext_below) { + dev_dbg(dev->dev, "Info: switching HT mode bw:%d below:%d\n", + bw, chan_ext_below); + + mt7601u_bbp_set_bw(dev, bw); + + mt7601u_bbp_set_ctrlch(dev, chan_ext_below); + mt7601u_mac_set_ctrlch(dev, chan_ext_below); + dev->chan_ext_below = chan_ext_below; + } + + for (i = 0; i < FREQ_PLAN_REGS; i++) + channel_freq_plan[i].value = freq_plan[chan_idx][i]; + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, + channel_freq_plan, FREQ_PLAN_REGS); + if (ret) + return ret; + + mt7601u_rmw(dev, MT_TX_ALC_CFG_0, 0x3f3f, + dev->ee->chan_pwr[chan_idx] & 0x3f); + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, + bbp_settings, ARRAY_SIZE(bbp_settings)); + if (ret) + return ret; + + mt7601u_vco_cal(dev); + mt7601u_bbp_set_bw(dev, bw); + ret = mt7601u_set_bw_filter(dev, false); + if (ret) + return ret; + + mt7601u_apply_ch14_fixup(dev, chan->hw_value); + mt7601u_wr(dev, MT_TX_PWR_CFG_0, int_to_s6(t->ofdm[1].bw20) << 24 | + int_to_s6(t->ofdm[0].bw20) << 16 | + int_to_s6(t->cck[1].bw20) << 8 | + int_to_s6(t->cck[0].bw20)); + + if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) + mt7601u_agc_reset(dev); + + dev->chandef = *chandef; + + return 0; +} + +int mt7601u_phy_set_channel(struct mt7601u_dev *dev, + struct cfg80211_chan_def *chandef) +{ + int ret; + + cancel_delayed_work_sync(&dev->cal_work); + cancel_delayed_work_sync(&dev->freq_cal.work); + + mutex_lock(&dev->hw_atomic_mutex); + ret = __mt7601u_phy_set_channel(dev, chandef); + mutex_unlock(&dev->hw_atomic_mutex); + if (ret) + return ret; + + if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) + return 0; + + ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); + if (dev->freq_cal.enabled) + ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, + MT_FREQ_CAL_INIT_DELAY); + return 0; +} + +#define BBP_R47_FLAG GENMASK(2, 0) +#define BBP_R47_F_TSSI 0 +#define BBP_R47_F_PKT_T 1 +#define BBP_R47_F_TX_RATE 2 +#define BBP_R47_F_TEMP 4 +/** + * mt7601u_bbp_r47_get - read value through BBP R47/R49 pair + * @dev: pointer to adapter structure + * @reg: value of BBP R47 before the operation + * @flag: one of the BBP_R47_F_* flags + * + * Convenience helper for reading values through BBP R47/R49 pair. + * Takes old value of BBP R47 as @reg, because callers usually have it + * cached already. + * + * Return: value of BBP R49. + */ +static u8 mt7601u_bbp_r47_get(struct mt7601u_dev *dev, u8 reg, u8 flag) +{ + flag |= reg & ~BBP_R47_FLAG; + mt7601u_bbp_wr(dev, 47, flag); + usleep_range(500, 700); + return mt7601u_bbp_rr(dev, 49); +} + +static s8 mt7601u_read_bootup_temp(struct mt7601u_dev *dev) +{ + u8 bbp_val, temp; + u32 rf_bp, rf_set; + int i; + + rf_set = mt7601u_rr(dev, MT_RF_SETTING_0); + rf_bp = mt7601u_rr(dev, MT_RF_BYPASS_0); + + mt7601u_wr(dev, MT_RF_BYPASS_0, 0); + mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000010); + mt7601u_wr(dev, MT_RF_BYPASS_0, 0x00000010); + + bbp_val = mt7601u_bbp_rmw(dev, 47, 0, 0x10); + + mt7601u_bbp_wr(dev, 22, 0x40); + + for (i = 100; i && (bbp_val & 0x10); i--) + bbp_val = mt7601u_bbp_rr(dev, 47); + + temp = mt7601u_bbp_r47_get(dev, bbp_val, BBP_R47_F_TEMP); + + mt7601u_bbp_wr(dev, 22, 0); + + bbp_val = mt7601u_bbp_rr(dev, 21); + bbp_val |= 0x02; + mt7601u_bbp_wr(dev, 21, bbp_val); + bbp_val &= ~0x02; + mt7601u_bbp_wr(dev, 21, bbp_val); + + mt7601u_wr(dev, MT_RF_BYPASS_0, 0); + mt7601u_wr(dev, MT_RF_SETTING_0, rf_set); + mt7601u_wr(dev, MT_RF_BYPASS_0, rf_bp); + + trace_read_temp(dev, temp); + return temp; +} + +static s8 mt7601u_read_temp(struct mt7601u_dev *dev) +{ + int i; + u8 val; + s8 temp; + + val = mt7601u_bbp_rmw(dev, 47, 0x7f, 0x10); + + /* Note: this rarely succeeds, temp can change even if it fails. */ + for (i = 100; i && (val & 0x10); i--) + val = mt7601u_bbp_rr(dev, 47); + + temp = mt7601u_bbp_r47_get(dev, val, BBP_R47_F_TEMP); + + trace_read_temp(dev, temp); + return temp; +} + +static void mt7601u_rxdc_cal(struct mt7601u_dev *dev) +{ + static const struct mt76_reg_pair intro[] = { + { 158, 0x8d }, { 159, 0xfc }, + { 158, 0x8c }, { 159, 0x4c }, + }, outro[] = { + { 158, 0x8d }, { 159, 0xe0 }, + }; + u32 mac_ctrl; + int i, ret; + + mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL); + mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX); + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, + intro, ARRAY_SIZE(intro)); + if (ret) + dev_err(dev->dev, "%s intro failed:%d\n", __func__, ret); + + for (i = 20; i; i--) { + usleep_range(300, 500); + + mt7601u_bbp_wr(dev, 158, 0x8c); + if (mt7601u_bbp_rr(dev, 159) == 0x0c) + break; + } + if (!i) + dev_err(dev->dev, "%s timed out\n", __func__); + + mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0); + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, + outro, ARRAY_SIZE(outro)); + if (ret) + dev_err(dev->dev, "%s outro failed:%d\n", __func__, ret); + + mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl); +} + +void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev) +{ + mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->curr_temp); + + mt7601u_rxdc_cal(dev); +} + +/* Note: function copied from vendor driver */ +static s16 lin2dBd(u16 linear) +{ + short exp = 0; + unsigned int mantisa; + int app, dBd; + + if (WARN_ON(!linear)) + return -10000; + + mantisa = linear; + + exp = fls(mantisa) - 16; + if (exp > 0) + mantisa >>= exp; + else + mantisa <<= abs(exp); + + if (mantisa <= 0xb800) + app = (mantisa + (mantisa >> 3) + (mantisa >> 4) - 0x9600); + else + app = (mantisa - (mantisa >> 3) - (mantisa >> 6) - 0x5a00); + if (app < 0) + app = 0; + + dBd = ((15 + exp) << 15) + app; + dBd = (dBd << 2) + (dBd << 1) + (dBd >> 6) + (dBd >> 7); + dBd = (dBd >> 10); + + return dBd; +} + +static void +mt7601u_set_initial_tssi(struct mt7601u_dev *dev, s16 tssi_db, s16 tssi_hvga_db) +{ + struct tssi_data *d = &dev->ee->tssi_data; + int init_offset; + + init_offset = -((tssi_db * d->slope + d->offset[1]) / 4096) + 10; + + mt76_rmw(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, + int_to_s6(init_offset) & MT_TX_ALC_CFG_1_TEMP_COMP); +} + +static void mt7601u_tssi_dc_gain_cal(struct mt7601u_dev *dev) +{ + u8 rf_vga, rf_mixer, bbp_r47; + int i, j; + s8 res[4]; + s16 tssi_init_db, tssi_init_hvga_db; + + mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000030); + mt7601u_wr(dev, MT_RF_BYPASS_0, 0x000c0030); + mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0); + + mt7601u_bbp_wr(dev, 58, 0); + mt7601u_bbp_wr(dev, 241, 0x2); + mt7601u_bbp_wr(dev, 23, 0x8); + bbp_r47 = mt7601u_bbp_rr(dev, 47); + + /* Set VGA gain */ + rf_vga = mt7601u_rf_rr(dev, 5, 3); + mt7601u_rf_wr(dev, 5, 3, 8); + + /* Mixer disable */ + rf_mixer = mt7601u_rf_rr(dev, 4, 39); + mt7601u_rf_wr(dev, 4, 39, 0); + + for (i = 0; i < 4; i++) { + mt7601u_rf_wr(dev, 4, 39, (i & 1) ? rf_mixer : 0); + + mt7601u_bbp_wr(dev, 23, (i < 2) ? 0x08 : 0x02); + mt7601u_rf_wr(dev, 5, 3, (i < 2) ? 0x08 : 0x11); + + /* BBP TSSI initial and soft reset */ + mt7601u_bbp_wr(dev, 22, 0); + mt7601u_bbp_wr(dev, 244, 0); + + mt7601u_bbp_wr(dev, 21, 1); + udelay(1); + mt7601u_bbp_wr(dev, 21, 0); + + /* TSSI measurement */ + mt7601u_bbp_wr(dev, 47, 0x50); + mt7601u_bbp_wr(dev, (i & 1) ? 244 : 22, (i & 1) ? 0x31 : 0x40); + + for (j = 20; j; j--) + if (!(mt7601u_bbp_rr(dev, 47) & 0x10)) + break; + if (!j) + dev_err(dev->dev, "%s timed out\n", __func__); + + /* TSSI read */ + mt7601u_bbp_wr(dev, 47, 0x40); + res[i] = mt7601u_bbp_rr(dev, 49); + } + + tssi_init_db = lin2dBd((short)res[1] - res[0]); + tssi_init_hvga_db = lin2dBd(((short)res[3] - res[2]) * 4); + dev->tssi_init = res[0]; + dev->tssi_init_hvga = res[2]; + dev->tssi_init_hvga_offset_db = tssi_init_hvga_db - tssi_init_db; + + dev_dbg(dev->dev, + "TSSI_init:%hhx db:%hx hvga:%hhx hvga_db:%hx off_db:%hx\n", + dev->tssi_init, tssi_init_db, dev->tssi_init_hvga, + tssi_init_hvga_db, dev->tssi_init_hvga_offset_db); + + mt7601u_bbp_wr(dev, 22, 0); + mt7601u_bbp_wr(dev, 244, 0); + + mt7601u_bbp_wr(dev, 21, 1); + udelay(1); + mt7601u_bbp_wr(dev, 21, 0); + + mt7601u_wr(dev, MT_RF_BYPASS_0, 0); + mt7601u_wr(dev, MT_RF_SETTING_0, 0); + + mt7601u_rf_wr(dev, 5, 3, rf_vga); + mt7601u_rf_wr(dev, 4, 39, rf_mixer); + mt7601u_bbp_wr(dev, 47, bbp_r47); + + mt7601u_set_initial_tssi(dev, tssi_init_db, tssi_init_hvga_db); +} + +static int mt7601u_temp_comp(struct mt7601u_dev *dev, bool on) +{ + int ret, temp, hi_temp = 400, lo_temp = -200; + + temp = (dev->raw_temp - dev->ee->ref_temp) * MT_EE_TEMPERATURE_SLOPE; + dev->curr_temp = temp; + + /* DPD Calibration */ + if (temp - dev->dpd_temp > 450 || temp - dev->dpd_temp < -450) { + dev->dpd_temp = temp; + + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp); + if (ret) + return ret; + + mt7601u_vco_cal(dev); + + dev_dbg(dev->dev, "Recalibrate DPD\n"); + } + + /* PLL Lock Protect */ + if (temp < -50 && !dev->pll_lock_protect) { /* < 20C */ + dev->pll_lock_protect = true; + + mt7601u_rf_wr(dev, 4, 4, 6); + mt7601u_rf_clear(dev, 4, 10, 0x30); + + dev_dbg(dev->dev, "PLL lock protect on - too cold\n"); + } else if (temp > 50 && dev->pll_lock_protect) { /* > 30C */ + dev->pll_lock_protect = false; + + mt7601u_rf_wr(dev, 4, 4, 0); + mt7601u_rf_rmw(dev, 4, 10, 0x30, 0x10); + + dev_dbg(dev->dev, "PLL lock protect off\n"); + } + + if (on) { + hi_temp -= 50; + lo_temp -= 50; + } + + /* BBP CR for H, L, N temperature */ + if (temp > hi_temp) + return mt7601u_bbp_temp(dev, MT_TEMP_MODE_HIGH, "high"); + else if (temp > lo_temp) + return mt7601u_bbp_temp(dev, MT_TEMP_MODE_NORMAL, "normal"); + else + return mt7601u_bbp_temp(dev, MT_TEMP_MODE_LOW, "low"); +} + +/* Note: this is used only with TSSI, we can just use trgt_pwr from eeprom. */ +static int mt7601u_current_tx_power(struct mt7601u_dev *dev) +{ + return dev->ee->chan_pwr[dev->chandef.chan->hw_value - 1]; +} + +static bool mt7601u_use_hvga(struct mt7601u_dev *dev) +{ + return !(mt7601u_current_tx_power(dev) > 20); +} + +static s16 +mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate) +{ + static const s16 decode_tb[] = { 0, 8847, -5734, -5734 }; + u32 reg; + + switch (phy_mode) { + case MT_PHY_TYPE_OFDM: + tx_rate += 4; + case MT_PHY_TYPE_CCK: + reg = dev->rf_pa_mode[0]; + break; + default: + reg = dev->rf_pa_mode[1]; + break; + } + + return decode_tb[(reg >> (tx_rate * 2)) & 0x3]; +} + +static struct mt7601u_tssi_params +mt7601u_tssi_params_get(struct mt7601u_dev *dev) +{ + static const u8 ofdm_pkt2rate[8] = { 6, 4, 2, 0, 7, 5, 3, 1 }; + static const int static_power[4] = { 0, -49152, -98304, 49152 }; + struct mt7601u_tssi_params p; + u8 bbp_r47, pkt_type, tx_rate; + struct power_per_rate *rate_table; + + bbp_r47 = mt7601u_bbp_rr(dev, 47); + + p.tssi0 = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TSSI); + dev->raw_temp = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TEMP); + pkt_type = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_PKT_T); + + p.trgt_power = mt7601u_current_tx_power(dev); + + switch (pkt_type & 0x03) { + case MT_PHY_TYPE_CCK: + tx_rate = (pkt_type >> 4) & 0x03; + rate_table = dev->ee->power_rate_table.cck; + break; + + case MT_PHY_TYPE_OFDM: + tx_rate = ofdm_pkt2rate[(pkt_type >> 4) & 0x07]; + rate_table = dev->ee->power_rate_table.ofdm; + break; + + default: + tx_rate = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TX_RATE); + tx_rate &= 0x7f; + rate_table = dev->ee->power_rate_table.ht; + break; + } + + if (dev->bw == MT_BW_20) + p.trgt_power += rate_table[tx_rate / 2].bw20; + else + p.trgt_power += rate_table[tx_rate / 2].bw40; + + p.trgt_power <<= 12; + + dev_dbg(dev->dev, "tx_rate:%02hhx pwr:%08x\n", tx_rate, p.trgt_power); + + p.trgt_power += mt7601u_phy_rf_pa_mode_val(dev, pkt_type & 0x03, + tx_rate); + + /* Channel 14, cck, bw20 */ + if ((pkt_type & 0x03) == MT_PHY_TYPE_CCK) { + if (mt7601u_bbp_rr(dev, 4) & 0x20) + p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 18022 : 9830; + else + p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 819 : 24576; + } + + p.trgt_power += static_power[mt7601u_bbp_rr(dev, 1) & 0x03]; + + p.trgt_power += dev->ee->tssi_data.tx0_delta_offset; + + dev_dbg(dev->dev, + "tssi:%02hhx t_power:%08x temp:%02hhx pkt_type:%02hhx\n", + p.tssi0, p.trgt_power, dev->raw_temp, pkt_type); + + return p; +} + +static bool mt7601u_tssi_read_ready(struct mt7601u_dev *dev) +{ + return !(mt7601u_bbp_rr(dev, 47) & 0x10); +} + +static int mt7601u_tssi_cal(struct mt7601u_dev *dev) +{ + struct mt7601u_tssi_params params; + int curr_pwr, diff_pwr; + char tssi_offset; + s8 tssi_init; + s16 tssi_m_dc, tssi_db; + bool hvga; + u32 val; + + if (!dev->ee->tssi_enabled) + return 0; + + hvga = mt7601u_use_hvga(dev); + if (!dev->tssi_read_trig) + return mt7601u_mcu_tssi_read_kick(dev, hvga); + + if (!mt7601u_tssi_read_ready(dev)) + return 0; + + params = mt7601u_tssi_params_get(dev); + + tssi_init = (hvga ? dev->tssi_init_hvga : dev->tssi_init); + tssi_m_dc = params.tssi0 - tssi_init; + tssi_db = lin2dBd(tssi_m_dc); + dev_dbg(dev->dev, "tssi dc:%04hx db:%04hx hvga:%d\n", + tssi_m_dc, tssi_db, hvga); + + if (dev->chandef.chan->hw_value < 5) + tssi_offset = dev->ee->tssi_data.offset[0]; + else if (dev->chandef.chan->hw_value < 9) + tssi_offset = dev->ee->tssi_data.offset[1]; + else + tssi_offset = dev->ee->tssi_data.offset[2]; + + if (hvga) + tssi_db -= dev->tssi_init_hvga_offset_db; + + curr_pwr = tssi_db * dev->ee->tssi_data.slope + (tssi_offset << 9); + diff_pwr = params.trgt_power - curr_pwr; + dev_dbg(dev->dev, "Power curr:%08x diff:%08x\n", curr_pwr, diff_pwr); + + if (params.tssi0 > 126 && diff_pwr > 0) { + dev_err(dev->dev, "Error: TSSI upper saturation\n"); + diff_pwr = 0; + } + if (params.tssi0 - tssi_init < 1 && diff_pwr < 0) { + dev_err(dev->dev, "Error: TSSI lower saturation\n"); + diff_pwr = 0; + } + + if ((dev->prev_pwr_diff ^ diff_pwr) < 0 && abs(diff_pwr) < 4096 && + (abs(diff_pwr) > abs(dev->prev_pwr_diff) || + (diff_pwr > 0 && diff_pwr == -dev->prev_pwr_diff))) + diff_pwr = 0; + else + dev->prev_pwr_diff = diff_pwr; + + diff_pwr += (diff_pwr > 0) ? 2048 : -2048; + diff_pwr /= 4096; + + dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr); + + val = mt7601u_rr(dev, MT_TX_ALC_CFG_1); + curr_pwr = s6_to_int(MT76_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val)); + diff_pwr += curr_pwr; + val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr); + mt7601u_wr(dev, MT_TX_ALC_CFG_1, val); + + return mt7601u_mcu_tssi_read_kick(dev, hvga); +} + +static u8 mt7601u_agc_default(struct mt7601u_dev *dev) +{ + return (dev->ee->lna_gain - 8) * 2 + 0x34; +} + +static void mt7601u_agc_reset(struct mt7601u_dev *dev) +{ + u8 agc = mt7601u_agc_default(dev); + + mt7601u_bbp_wr(dev, 66, agc); +} + +void mt7601u_agc_save(struct mt7601u_dev *dev) +{ + dev->agc_save = mt7601u_bbp_rr(dev, 66); +} + +void mt7601u_agc_restore(struct mt7601u_dev *dev) +{ + mt7601u_bbp_wr(dev, 66, dev->agc_save); +} + +static void mt7601u_agc_tune(struct mt7601u_dev *dev) +{ + u8 val = mt7601u_agc_default(dev); + + if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) + return; + + /* Note: only in STA mode and not dozing; perhaps do this only if + * there is enough rssi updates since last run? + * Rssi updates are only on beacons and U2M so should work... + */ + spin_lock_bh(&dev->con_mon_lock); + if (dev->avg_rssi <= -70) + val -= 0x20; + else if (dev->avg_rssi <= -60) + val -= 0x10; + spin_unlock_bh(&dev->con_mon_lock); + + if (val != mt7601u_bbp_rr(dev, 66)) + mt7601u_bbp_wr(dev, 66, val); + + /* TODO: also if lost a lot of beacons try resetting + * (see RTMPSetAGCInitValue() call in mlme.c). + */ +} + +static void mt7601u_phy_calibrate(struct work_struct *work) +{ + struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, + cal_work.work); + + mt7601u_agc_tune(dev); + mt7601u_tssi_cal(dev); + /* If TSSI calibration was run it already updated temperature. */ + if (!dev->ee->tssi_enabled) + dev->raw_temp = mt7601u_read_temp(dev); + mt7601u_temp_comp(dev, true); /* TODO: find right value for @on */ + + ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); +} + +static unsigned long +__mt7601u_phy_freq_cal(struct mt7601u_dev *dev, s8 last_offset, u8 phy_mode) +{ + u8 activate_threshold, deactivate_threshold; + + trace_freq_cal_offset(dev, phy_mode, last_offset); + + /* No beacons received - reschedule soon */ + if (last_offset == MT_FREQ_OFFSET_INVALID) + return MT_FREQ_CAL_ADJ_INTERVAL; + + switch (phy_mode) { + case MT_PHY_TYPE_CCK: + activate_threshold = 19; + deactivate_threshold = 5; + break; + case MT_PHY_TYPE_OFDM: + activate_threshold = 102; + deactivate_threshold = 32; + break; + case MT_PHY_TYPE_HT: + case MT_PHY_TYPE_HT_GF: + activate_threshold = 82; + deactivate_threshold = 20; + break; + default: + WARN_ON(1); + return MT_FREQ_CAL_CHECK_INTERVAL; + } + + if (abs(last_offset) >= activate_threshold) + dev->freq_cal.adjusting = true; + else if (abs(last_offset) <= deactivate_threshold) + dev->freq_cal.adjusting = false; + + if (!dev->freq_cal.adjusting) + return MT_FREQ_CAL_CHECK_INTERVAL; + + if (last_offset > deactivate_threshold) { + if (dev->freq_cal.freq > 0) + dev->freq_cal.freq--; + else + dev->freq_cal.adjusting = false; + } else if (last_offset < -deactivate_threshold) { + if (dev->freq_cal.freq < 0xbf) + dev->freq_cal.freq++; + else + dev->freq_cal.adjusting = false; + } + + trace_freq_cal_adjust(dev, dev->freq_cal.freq); + mt7601u_rf_wr(dev, 0, 12, dev->freq_cal.freq); + mt7601u_vco_cal(dev); + + return dev->freq_cal.adjusting ? MT_FREQ_CAL_ADJ_INTERVAL : + MT_FREQ_CAL_CHECK_INTERVAL; +} + +static void mt7601u_phy_freq_cal(struct work_struct *work) +{ + struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, + freq_cal.work.work); + s8 last_offset; + u8 phy_mode; + unsigned long delay; + + spin_lock_bh(&dev->con_mon_lock); + last_offset = dev->bcn_freq_off; + phy_mode = dev->bcn_phy_mode; + spin_unlock_bh(&dev->con_mon_lock); + + delay = __mt7601u_phy_freq_cal(dev, last_offset, phy_mode); + ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay); + + spin_lock_bh(&dev->con_mon_lock); + dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; + spin_unlock_bh(&dev->con_mon_lock); +} + +void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev, + struct ieee80211_bss_conf *info) +{ + if (!info->assoc) + cancel_delayed_work_sync(&dev->freq_cal.work); + + /* Start/stop collecting beacon data */ + spin_lock_bh(&dev->con_mon_lock); + ether_addr_copy(dev->ap_bssid, info->bssid); + dev->avg_rssi = 0; + dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; + spin_unlock_bh(&dev->con_mon_lock); + + dev->freq_cal.freq = dev->ee->rf_freq_off; + dev->freq_cal.enabled = info->assoc; + dev->freq_cal.adjusting = false; + + if (info->assoc) + ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, + MT_FREQ_CAL_INIT_DELAY); +} + +static int mt7601u_init_cal(struct mt7601u_dev *dev) +{ + u32 mac_ctrl; + int ret; + + dev->raw_temp = mt7601u_read_bootup_temp(dev); + dev->curr_temp = (dev->raw_temp - dev->ee->ref_temp) * + MT_EE_TEMPERATURE_SLOPE; + dev->dpd_temp = dev->curr_temp; + + mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL); + + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_R, 0); + if (ret) + return ret; + + ret = mt7601u_rf_rr(dev, 0, 4); + if (ret < 0) + return ret; + ret |= 0x80; + ret = mt7601u_rf_wr(dev, 0, 4, ret); + if (ret) + return ret; + msleep(2); + + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXDCOC, 0); + if (ret) + return ret; + + mt7601u_rxdc_cal(dev); + + ret = mt7601u_set_bw_filter(dev, true); + if (ret) + return ret; + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_LOFT, 0); + if (ret) + return ret; + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXIQ, 0); + if (ret) + return ret; + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_RXIQ, 0); + if (ret) + return ret; + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp); + if (ret) + return ret; + + mt7601u_rxdc_cal(dev); + + mt7601u_tssi_dc_gain_cal(dev); + + mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl); + + mt7601u_temp_comp(dev, true); + + return 0; +} + +int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw) +{ + u32 val, old; + + if (bw == dev->bw) { + /* Vendor driver does the rmc even when no change is needed. */ + mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10); + + return 0; + } + dev->bw = bw; + + /* Stop MAC for the time of bw change */ + old = mt7601u_rr(dev, MT_MAC_SYS_CTRL); + val = old & ~(MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); + mt7601u_wr(dev, MT_MAC_SYS_CTRL, val); + mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, + 0, 500000); + + mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10); + + mt7601u_wr(dev, MT_MAC_SYS_CTRL, old); + + return mt7601u_load_bbp_temp_table_bw(dev); +} + +/** + * mt7601u_set_rx_path - set rx path in BBP + * @dev: pointer to adapter structure + * @path: rx path to set values are 0-based + */ +void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path) +{ + mt7601u_bbp_rmw(dev, 3, 0x18, path << 3); +} + +/** + * mt7601u_set_tx_dac - set which tx DAC to use + * @dev: pointer to adapter structure + * @path: DAC index, values are 0-based + */ +void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac) +{ + mt7601u_bbp_rmc(dev, 1, 0x18, dac << 3); +} + +int mt7601u_phy_init(struct mt7601u_dev *dev) +{ + int ret; + + dev->rf_pa_mode[0] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG0); + dev->rf_pa_mode[1] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG1); + + ret = mt7601u_rf_wr(dev, 0, 12, dev->ee->rf_freq_off); + if (ret) + return ret; + ret = mt7601u_write_reg_pairs(dev, 0, rf_central, + ARRAY_SIZE(rf_central)); + if (ret) + return ret; + ret = mt7601u_write_reg_pairs(dev, 0, rf_channel, + ARRAY_SIZE(rf_channel)); + if (ret) + return ret; + ret = mt7601u_write_reg_pairs(dev, 0, rf_vga, ARRAY_SIZE(rf_vga)); + if (ret) + return ret; + + ret = mt7601u_init_cal(dev); + if (ret) + return ret; + + dev->prev_pwr_diff = 100; + + INIT_DELAYED_WORK(&dev->cal_work, mt7601u_phy_calibrate); + INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h new file mode 100644 index 000000000000..afd8978e83fa --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/regs.h @@ -0,0 +1,636 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76_REGS_H +#define __MT76_REGS_H + +#include + +#ifndef GENMASK +#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) +#endif + +#define MT_ASIC_VERSION 0x0000 + +#define MT76XX_REV_E3 0x22 +#define MT76XX_REV_E4 0x33 + +#define MT_CMB_CTRL 0x0020 +#define MT_CMB_CTRL_XTAL_RDY BIT(22) +#define MT_CMB_CTRL_PLL_LD BIT(23) + +#define MT_EFUSE_CTRL 0x0024 +#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0) +#define MT_EFUSE_CTRL_MODE GENMASK(7, 6) +#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8) +#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14) +#define MT_EFUSE_CTRL_AIN GENMASK(25, 16) +#define MT_EFUSE_CTRL_KICK BIT(30) +#define MT_EFUSE_CTRL_SEL BIT(31) + +#define MT_EFUSE_DATA_BASE 0x0028 +#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2)) + +#define MT_COEXCFG0 0x0040 +#define MT_COEXCFG0_COEX_EN BIT(0) + +#define MT_WLAN_FUN_CTRL 0x0080 +#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0) +#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1) +#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2) + +#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */ + +#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4) +#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5) +#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6) +#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7) + +#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */ +#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */ + +#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */ + +#define MT_XO_CTRL0 0x0100 +#define MT_XO_CTRL1 0x0104 +#define MT_XO_CTRL2 0x0108 +#define MT_XO_CTRL3 0x010c +#define MT_XO_CTRL4 0x0110 + +#define MT_XO_CTRL5 0x0114 +#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8) + +#define MT_XO_CTRL6 0x0118 +#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8) + +#define MT_XO_CTRL7 0x011c + +#define MT_WLAN_MTC_CTRL 0x10148 +#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0) +#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12) +#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13) +#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16) +#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20) +#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21) +#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22) +#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24) +#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25) +#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26) +#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27) +#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28) + +#define MT_INT_SOURCE_CSR 0x0200 +#define MT_INT_MASK_CSR 0x0204 + +#define MT_INT_RX_DONE(_n) BIT(_n) +#define MT_INT_RX_DONE_ALL GENMASK(1, 0) +#define MT_INT_TX_DONE_ALL GENMASK(13, 4) +#define MT_INT_TX_DONE(_n) BIT(_n + 4) +#define MT_INT_RX_COHERENT BIT(16) +#define MT_INT_TX_COHERENT BIT(17) +#define MT_INT_ANY_COHERENT BIT(18) +#define MT_INT_MCU_CMD BIT(19) +#define MT_INT_TBTT BIT(20) +#define MT_INT_PRE_TBTT BIT(21) +#define MT_INT_TX_STAT BIT(22) +#define MT_INT_AUTO_WAKEUP BIT(23) +#define MT_INT_GPTIMER BIT(24) +#define MT_INT_RXDELAYINT BIT(26) +#define MT_INT_TXDELAYINT BIT(27) + +#define MT_WPDMA_GLO_CFG 0x0208 +#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0) +#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1) +#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2) +#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3) +#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4) +#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6) +#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) +#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8) +#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30) +#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) + +#define MT_WPDMA_RST_IDX 0x020c + +#define MT_WPDMA_DELAY_INT_CFG 0x0210 + +#define MT_WMM_AIFSN 0x0214 +#define MT_WMM_AIFSN_MASK GENMASK(3, 0) +#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_CWMIN 0x0218 +#define MT_WMM_CWMIN_MASK GENMASK(3, 0) +#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_CWMAX 0x021c +#define MT_WMM_CWMAX_MASK GENMASK(3, 0) +#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_TXOP_BASE 0x0220 +#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2)) +#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16) +#define MT_WMM_TXOP_MASK GENMASK(15, 0) + +#define MT_FCE_DMA_ADDR 0x0230 +#define MT_FCE_DMA_LEN 0x0234 + +#define MT_USB_DMA_CFG 0x238 +#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0) +#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8) +#define MT_USB_DMA_CFG_PHY_CLR BIT(16) +#define MT_USB_DMA_CFG_TX_CLR BIT(19) +#define MT_USB_DMA_CFG_TXOP_HALT BIT(20) +#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21) +#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22) +#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23) +#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25) +#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 27) +#define MT_USB_DMA_CFG_RX_BUSY BIT(30) +#define MT_USB_DMA_CFG_TX_BUSY BIT(31) + +#define MT_TSO_CTRL 0x0250 +#define MT_HEADER_TRANS_CTRL_REG 0x0260 + +#define MT_US_CYC_CFG 0x02a4 +#define MT_US_CYC_CNT GENMASK(7, 0) + +#define MT_TX_RING_BASE 0x0300 +#define MT_RX_RING_BASE 0x03c0 +#define MT_RING_SIZE 0x10 + +#define MT_TX_HW_QUEUE_MCU 8 +#define MT_TX_HW_QUEUE_MGMT 9 + +#define MT_PBF_SYS_CTRL 0x0400 +#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0) +#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1) +#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2) +#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3) +#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4) + +#define MT_PBF_CFG 0x0404 +#define MT_PBF_CFG_TX0Q_EN BIT(0) +#define MT_PBF_CFG_TX1Q_EN BIT(1) +#define MT_PBF_CFG_TX2Q_EN BIT(2) +#define MT_PBF_CFG_TX3Q_EN BIT(3) +#define MT_PBF_CFG_RX0Q_EN BIT(4) +#define MT_PBF_CFG_RX_DROP_EN BIT(8) + +#define MT_PBF_TX_MAX_PCNT 0x0408 +#define MT_PBF_RX_MAX_PCNT 0x040c + +#define MT_BCN_OFFSET_BASE 0x041c +#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2)) + +#define MT_RF_CSR_CFG 0x0500 +#define MT_RF_CSR_CFG_DATA GENMASK(7, 0) +#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8) +#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14) +#define MT_RF_CSR_CFG_WR BIT(30) +#define MT_RF_CSR_CFG_KICK BIT(31) + +#define MT_RF_BYPASS_0 0x0504 +#define MT_RF_BYPASS_1 0x0508 +#define MT_RF_SETTING_0 0x050c + +#define MT_RF_DATA_WRITE 0x0524 + +#define MT_RF_CTRL 0x0528 +#define MT_RF_CTRL_ADDR GENMASK(11, 0) +#define MT_RF_CTRL_WRITE BIT(12) +#define MT_RF_CTRL_BUSY BIT(13) +#define MT_RF_CTRL_IDX BIT(16) + +#define MT_RF_DATA_READ 0x052c + +#define MT_FCE_PSE_CTRL 0x0800 +#define MT_FCE_PARAMETERS 0x0804 +#define MT_FCE_CSO 0x0808 + +#define MT_FCE_L2_STUFF 0x080c +#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0) +#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1) +#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2) +#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3) +#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4) +#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5) +#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8) +#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16) +#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24) + +#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824 + +#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0 +#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4 +#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8 + +#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4 + +#define MT_PAUSE_ENABLE_CONTROL1 0x0a38 + +#define MT_FCE_SKIP_FS 0x0a6c + +#define MT_MAC_CSR0 0x1000 + +#define MT_MAC_SYS_CTRL 0x1004 +#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0) +#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1) +#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2) +#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3) + +#define MT_MAC_ADDR_DW0 0x1008 +#define MT_MAC_ADDR_DW1 0x100c +#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16) + +#define MT_MAC_BSSID_DW0 0x1010 +#define MT_MAC_BSSID_DW1 0x1014 +#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0) +#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16) +#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18) +#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21) +#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22) +#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23) +#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24) + +#define MT_MAX_LEN_CFG 0x1018 +#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12) + +#define MT_BBP_CSR_CFG 0x101c +#define MT_BBP_CSR_CFG_VAL GENMASK(7, 0) +#define MT_BBP_CSR_CFG_REG_NUM GENMASK(15, 8) +#define MT_BBP_CSR_CFG_READ BIT(16) +#define MT_BBP_CSR_CFG_BUSY BIT(17) +#define MT_BBP_CSR_CFG_PAR_DUR BIT(18) +#define MT_BBP_CSR_CFG_RW_MODE BIT(19) + +#define MT_AMPDU_MAX_LEN_20M1S 0x1030 +#define MT_AMPDU_MAX_LEN_20M2S 0x1034 +#define MT_AMPDU_MAX_LEN_40M1S 0x1038 +#define MT_AMPDU_MAX_LEN_40M2S 0x103c +#define MT_AMPDU_MAX_LEN 0x1040 + +#define MT_WCID_DROP_BASE 0x106c +#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4) +#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32) + +#define MT_BCN_BYPASS_MASK 0x108c + +#define MT_MAC_APC_BSSID_BASE 0x1090 +#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8)) +#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4)) +#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0) +#define MT_MAC_APC_BSSID0_H_EN BIT(16) + +#define MT_XIFS_TIME_CFG 0x1100 +#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0) +#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8) +#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16) +#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20) +#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29) + +#define MT_BKOFF_SLOT_CFG 0x1104 +#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0) +#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8) + +#define MT_BEACON_TIME_CFG 0x1114 +#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0) +#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16) +#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17) +#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19) +#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20) +#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24) + +#define MT_TBTT_SYNC_CFG 0x1118 +#define MT_TBTT_TIMER_CFG 0x1124 + +#define MT_INT_TIMER_CFG 0x1128 +#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0) +#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16) + +#define MT_INT_TIMER_EN 0x112c +#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0) +#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1) + +#define MT_MAC_STATUS 0x1200 +#define MT_MAC_STATUS_TX BIT(0) +#define MT_MAC_STATUS_RX BIT(1) + +#define MT_PWR_PIN_CFG 0x1204 +#define MT_AUX_CLK_CFG 0x120c + +#define MT_BB_PA_MODE_CFG0 0x1214 +#define MT_BB_PA_MODE_CFG1 0x1218 +#define MT_RF_PA_MODE_CFG0 0x121c +#define MT_RF_PA_MODE_CFG1 0x1220 + +#define MT_RF_PA_MODE_ADJ0 0x1228 +#define MT_RF_PA_MODE_ADJ1 0x122c + +#define MT_DACCLK_EN_DLY_CFG 0x1264 + +#define MT_EDCA_CFG_BASE 0x1300 +#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2)) +#define MT_EDCA_CFG_TXOP GENMASK(7, 0) +#define MT_EDCA_CFG_AIFSN GENMASK(11, 8) +#define MT_EDCA_CFG_CWMIN GENMASK(15, 12) +#define MT_EDCA_CFG_CWMAX GENMASK(19, 16) + +#define MT_TX_PWR_CFG_0 0x1314 +#define MT_TX_PWR_CFG_1 0x1318 +#define MT_TX_PWR_CFG_2 0x131c +#define MT_TX_PWR_CFG_3 0x1320 +#define MT_TX_PWR_CFG_4 0x1324 + +#define MT_TX_BAND_CFG 0x132c +#define MT_TX_BAND_CFG_UPPER_40M BIT(0) +#define MT_TX_BAND_CFG_5G BIT(1) +#define MT_TX_BAND_CFG_2G BIT(2) + +#define MT_HT_FBK_TO_LEGACY 0x1384 +#define MT_TX_MPDU_ADJ_INT 0x1388 + +#define MT_TX_PWR_CFG_7 0x13d4 +#define MT_TX_PWR_CFG_8 0x13d8 +#define MT_TX_PWR_CFG_9 0x13dc + +#define MT_TX_SW_CFG0 0x1330 +#define MT_TX_SW_CFG1 0x1334 +#define MT_TX_SW_CFG2 0x1338 + +#define MT_TXOP_CTRL_CFG 0x1340 +#define MT_TXOP_TRUN_EN GENMASK(5, 0) +#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8) +#define MT_TXOP_CTRL + +#define MT_TX_RTS_CFG 0x1344 +#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0) +#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8) +#define MT_TX_RTS_FALLBACK BIT(24) + +#define MT_TX_TIMEOUT_CFG 0x1348 +#define MT_TX_RETRY_CFG 0x134c +#define MT_TX_LINK_CFG 0x1350 +#define MT_HT_FBK_CFG0 0x1354 +#define MT_HT_FBK_CFG1 0x1358 +#define MT_LG_FBK_CFG0 0x135c +#define MT_LG_FBK_CFG1 0x1360 + +#define MT_CCK_PROT_CFG 0x1364 +#define MT_OFDM_PROT_CFG 0x1368 +#define MT_MM20_PROT_CFG 0x136c +#define MT_MM40_PROT_CFG 0x1370 +#define MT_GF20_PROT_CFG 0x1374 +#define MT_GF40_PROT_CFG 0x1378 + +#define MT_PROT_RATE GENMASK(15, 0) +#define MT_PROT_CTRL_RTS_CTS BIT(16) +#define MT_PROT_CTRL_CTS2SELF BIT(17) +#define MT_PROT_NAV_SHORT BIT(18) +#define MT_PROT_NAV_LONG BIT(19) +#define MT_PROT_TXOP_ALLOW_CCK BIT(20) +#define MT_PROT_TXOP_ALLOW_OFDM BIT(21) +#define MT_PROT_TXOP_ALLOW_MM20 BIT(22) +#define MT_PROT_TXOP_ALLOW_MM40 BIT(23) +#define MT_PROT_TXOP_ALLOW_GF20 BIT(24) +#define MT_PROT_TXOP_ALLOW_GF40 BIT(25) +#define MT_PROT_RTS_THR_EN BIT(26) +#define MT_PROT_RATE_CCK_11 0x0003 +#define MT_PROT_RATE_OFDM_6 0x4000 +#define MT_PROT_RATE_OFDM_24 0x4004 +#define MT_PROT_RATE_DUP_OFDM_24 0x4084 +#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20) +#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \ + ~MT_PROT_TXOP_ALLOW_MM40 & \ + ~MT_PROT_TXOP_ALLOW_GF40) + +#define MT_EXP_ACK_TIME 0x1380 + +#define MT_TX_PWR_CFG_0_EXT 0x1390 +#define MT_TX_PWR_CFG_1_EXT 0x1394 + +#define MT_TX_FBK_LIMIT 0x1398 +#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0) +#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8) +#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16) +#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17) +#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18) + +#define MT_TX0_RF_GAIN_CORR 0x13a0 +#define MT_TX1_RF_GAIN_CORR 0x13a4 +#define MT_TX0_RF_GAIN_ATTEN 0x13a8 + +#define MT_TX_ALC_CFG_0 0x13b0 +#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0) +#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8) +#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16) +#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24) + +#define MT_TX_ALC_CFG_1 0x13b4 +#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0) + +#define MT_TX_ALC_CFG_2 0x13a8 +#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0) + +#define MT_TX0_BB_GAIN_ATTEN 0x13c0 + +#define MT_TX_ALC_VGA3 0x13c8 + +#define MT_TX_PROT_CFG6 0x13e0 +#define MT_TX_PROT_CFG7 0x13e4 +#define MT_TX_PROT_CFG8 0x13e8 + +#define MT_PIFS_TX_CFG 0x13ec + +#define MT_RX_FILTR_CFG 0x1400 + +#define MT_RX_FILTR_CFG_CRC_ERR BIT(0) +#define MT_RX_FILTR_CFG_PHY_ERR BIT(1) +#define MT_RX_FILTR_CFG_PROMISC BIT(2) +#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3) +#define MT_RX_FILTR_CFG_VER_ERR BIT(4) +#define MT_RX_FILTR_CFG_MCAST BIT(5) +#define MT_RX_FILTR_CFG_BCAST BIT(6) +#define MT_RX_FILTR_CFG_DUP BIT(7) +#define MT_RX_FILTR_CFG_CFACK BIT(8) +#define MT_RX_FILTR_CFG_CFEND BIT(9) +#define MT_RX_FILTR_CFG_ACK BIT(10) +#define MT_RX_FILTR_CFG_CTS BIT(11) +#define MT_RX_FILTR_CFG_RTS BIT(12) +#define MT_RX_FILTR_CFG_PSPOLL BIT(13) +#define MT_RX_FILTR_CFG_BA BIT(14) +#define MT_RX_FILTR_CFG_BAR BIT(15) +#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16) + +#define MT_AUTO_RSP_CFG 0x1404 + +#define MT_AUTO_RSP_PREAMB_SHORT BIT(4) + +#define MT_LEGACY_BASIC_RATE 0x1408 +#define MT_HT_BASIC_RATE 0x140c + +#define MT_RX_PARSER_CFG 0x1418 +#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0) + +#define MT_EXT_CCA_CFG 0x141c +#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0) +#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2) +#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4) +#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6) +#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8) +#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12) + +#define MT_TX_SW_CFG3 0x1478 + +#define MT_PN_PAD_MODE 0x150c + +#define MT_TXOP_HLDR_ET 0x1608 + +#define MT_PROT_AUTO_TX_CFG 0x1648 + +#define MT_RX_STA_CNT0 0x1700 +#define MT_RX_STA_CNT1 0x1704 +#define MT_RX_STA_CNT2 0x1708 +#define MT_TX_STA_CNT0 0x170c +#define MT_TX_STA_CNT1 0x1710 +#define MT_TX_STA_CNT2 0x1714 + +/* Vendor driver defines content of the second word of STAT_FIFO as follows: + * MT_TX_STAT_FIFO_RATE GENMASK(26, 16) + * MT_TX_STAT_FIFO_ETXBF BIT(27) + * MT_TX_STAT_FIFO_SND BIT(28) + * MT_TX_STAT_FIFO_ITXBF BIT(29) + * However, tests show that b16-31 have the same layout as TXWI rate_ctl + * with rate set to rate at which frame was acked. + */ +#define MT_TX_STAT_FIFO 0x1718 +#define MT_TX_STAT_FIFO_VALID BIT(0) +#define MT_TX_STAT_FIFO_PID_TYPE GENMASK(4, 1) +#define MT_TX_STAT_FIFO_SUCCESS BIT(5) +#define MT_TX_STAT_FIFO_AGGR BIT(6) +#define MT_TX_STAT_FIFO_ACKREQ BIT(7) +#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8) +#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16) + +#define MT_TX_AGG_STAT 0x171c + +#define MT_TX_AGG_CNT_BASE0 0x1720 + +#define MT_MPDU_DENSITY_CNT 0x1740 + +#define MT_TX_AGG_CNT_BASE1 0x174c + +#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \ + MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \ + MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2)) + +#define MT_TX_STAT_FIFO_EXT 0x1798 +#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0) + +#define MT_BBP_CORE_BASE 0x2000 +#define MT_BBP_IBI_BASE 0x2100 +#define MT_BBP_AGC_BASE 0x2300 +#define MT_BBP_TXC_BASE 0x2400 +#define MT_BBP_RXC_BASE 0x2500 +#define MT_BBP_TXO_BASE 0x2600 +#define MT_BBP_TXBE_BASE 0x2700 +#define MT_BBP_RXFE_BASE 0x2800 +#define MT_BBP_RXO_BASE 0x2900 +#define MT_BBP_DFS_BASE 0x2a00 +#define MT_BBP_TR_BASE 0x2b00 +#define MT_BBP_CAL_BASE 0x2c00 +#define MT_BBP_DSC_BASE 0x2e00 +#define MT_BBP_PFMU_BASE 0x2f00 + +#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2)) + +#define MT_BBP_CORE_R1_BW GENMASK(4, 3) + +#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8) +#define MT_BBP_AGC_R0_BW GENMASK(14, 12) + +/* AGC, R4/R5 */ +#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16) + +/* AGC, R8/R9 */ +#define MT_BBP_AGC_GAIN GENMASK(14, 8) + +#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0) +#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8) + +#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0) + +#define MT_WCID_ADDR_BASE 0x1800 +#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8) + +#define MT_SRAM_BASE 0x4000 + +#define MT_WCID_KEY_BASE 0x8000 +#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32) + +#define MT_WCID_IV_BASE 0xa000 +#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8) + +#define MT_WCID_ATTR_BASE 0xa800 +#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4) + +#define MT_WCID_ATTR_PAIRWISE BIT(0) +#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1) +#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4) +#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7) +#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10) +#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11) +#define MT_WCID_ATTR_WAPI_MCBC BIT(15) +#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24) + +#define MT_SKEY_BASE_0 0xac00 +#define MT_SKEY_BASE_1 0xb400 +#define MT_SKEY_0(_bss, _idx) \ + (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32) +#define MT_SKEY_1(_bss, _idx) \ + (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32) +#define MT_SKEY(_bss, _idx) \ + ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx)) + +#define MT_SKEY_MODE_BASE_0 0xb000 +#define MT_SKEY_MODE_BASE_1 0xb3f0 +#define MT_SKEY_MODE_0(_bss) \ + (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2)) +#define MT_SKEY_MODE_1(_bss) \ + (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2)) +#define MT_SKEY_MODE(_bss) \ + ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss)) +#define MT_SKEY_MODE_MASK GENMASK(3, 0) +#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1))) + +#define MT_BEACON_BASE 0xc000 + +#define MT_TEMP_SENSOR 0x1d000 +#define MT_TEMP_SENSOR_VAL GENMASK(6, 0) + +enum mt76_cipher_type { + MT_CIPHER_NONE, + MT_CIPHER_WEP40, + MT_CIPHER_WEP104, + MT_CIPHER_TKIP, + MT_CIPHER_AES_CCMP, + MT_CIPHER_CKIP40, + MT_CIPHER_CKIP104, + MT_CIPHER_CKIP128, + MT_CIPHER_WAPI, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.c b/drivers/net/wireless/mediatek/mt7601u/trace.c new file mode 100644 index 000000000000..8abdd3cd546d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/trace.c @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "trace.h" + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.h b/drivers/net/wireless/mediatek/mt7601u/trace.h new file mode 100644 index 000000000000..289897300ef0 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/trace.h @@ -0,0 +1,400 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#if !defined(__MT7601U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT7601U_TRACE_H + +#include +#include "mt7601u.h" +#include "mac.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mt7601u + +#define MAXNAME 32 +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ + wiphy_name(dev->hw->wiphy), MAXNAME) +#define DEV_PR_FMT "%s " +#define DEV_PR_ARG __entry->wiphy_name + +#define REG_ENTRY __field(u32, reg) __field(u32, val) +#define REG_ASSIGN __entry->reg = reg; __entry->val = val +#define REG_PR_FMT "%04x=%08x" +#define REG_PR_ARG __entry->reg, __entry->val + +DECLARE_EVENT_CLASS(dev_reg_evt, + TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + REG_ENTRY + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + ), + TP_printk( + DEV_PR_FMT REG_PR_FMT, + DEV_PR_ARG, REG_PR_ARG + ) +); + +DEFINE_EVENT(dev_reg_evt, reg_read, + TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val) +); + +DEFINE_EVENT(dev_reg_evt, reg_write, + TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val) +); + +TRACE_EVENT(mt_submit_urb, + TP_PROTO(struct mt7601u_dev *dev, struct urb *u), + TP_ARGS(dev, u), + TP_STRUCT__entry( + DEV_ENTRY __field(unsigned, pipe) __field(u32, len) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->pipe = u->pipe; + __entry->len = u->transfer_buffer_length; + ), + TP_printk(DEV_PR_FMT "p:%08x len:%u", + DEV_PR_ARG, __entry->pipe, __entry->len) +); + +#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({ \ + struct urb u; \ + u.pipe = __pipe; \ + u.transfer_buffer_length = __len; \ + trace_mt_submit_urb(__dev, &u); \ +}) + +TRACE_EVENT(mt_mcu_msg_send, + TP_PROTO(struct mt7601u_dev *dev, + struct sk_buff *skb, u32 csum, bool resp), + TP_ARGS(dev, skb, csum, resp), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, info) + __field(u32, csum) + __field(bool, resp) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->info = *(u32 *)skb->data; + __entry->csum = csum; + __entry->resp = resp; + ), + TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d", + DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp) +); + +TRACE_EVENT(mt_vend_req, + TP_PROTO(struct mt7601u_dev *dev, unsigned pipe, u8 req, u8 req_type, + u16 val, u16 offset, void *buf, size_t buflen, int ret), + TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret), + TP_STRUCT__entry( + DEV_ENTRY + __field(unsigned, pipe) __field(u8, req) __field(u8, req_type) + __field(u16, val) __field(u16, offset) __field(void*, buf) + __field(int, buflen) __field(int, ret) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->pipe = pipe; + __entry->req = req; + __entry->req_type = req_type; + __entry->val = val; + __entry->offset = offset; + __entry->buf = buf; + __entry->buflen = buflen; + __entry->ret = ret; + ), + TP_printk(DEV_PR_FMT + "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d", + DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req, + __entry->req_type, __entry->val, __entry->offset, + !!__entry->buf, __entry->buflen) +); + +TRACE_EVENT(ee_read, + TP_PROTO(struct mt7601u_dev *dev, int offset, u16 val), + TP_ARGS(dev, offset, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(int, o) __field(u16, v) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->o = offset; + __entry->v = val; + ), + TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v) +); + +DECLARE_EVENT_CLASS(dev_rf_reg_evt, + TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val), + TP_ARGS(dev, bank, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, bank) + __field(u8, reg) + __field(u8, val) + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + __entry->bank = bank; + ), + TP_printk( + DEV_PR_FMT "%02hhx:%02hhx=%02hhx", + DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val + ) +); + +DEFINE_EVENT(dev_rf_reg_evt, rf_read, + TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val), + TP_ARGS(dev, bank, reg, val) +); + +DEFINE_EVENT(dev_rf_reg_evt, rf_write, + TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val), + TP_ARGS(dev, bank, reg, val) +); + +DECLARE_EVENT_CLASS(dev_bbp_reg_evt, + TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val), + TP_ARGS(dev, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, reg) + __field(u8, val) + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + ), + TP_printk( + DEV_PR_FMT "%02hhx=%02hhx", + DEV_PR_ARG, __entry->reg, __entry->val + ) +); + +DEFINE_EVENT(dev_bbp_reg_evt, bbp_read, + TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val), + TP_ARGS(dev, reg, val) +); + +DEFINE_EVENT(dev_bbp_reg_evt, bbp_write, + TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val), + TP_ARGS(dev, reg, val) +); + +DECLARE_EVENT_CLASS(dev_simple_evt, + TP_PROTO(struct mt7601u_dev *dev, u8 val), + TP_ARGS(dev, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, val) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->val = val; + ), + TP_printk( + DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val + ) +); + +DEFINE_EVENT(dev_simple_evt, temp_mode, + TP_PROTO(struct mt7601u_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +DEFINE_EVENT(dev_simple_evt, read_temp, + TP_PROTO(struct mt7601u_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +DEFINE_EVENT(dev_simple_evt, freq_cal_adjust, + TP_PROTO(struct mt7601u_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +TRACE_EVENT(freq_cal_offset, + TP_PROTO(struct mt7601u_dev *dev, u8 phy_mode, s8 freq_off), + TP_ARGS(dev, phy_mode, freq_off), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, phy_mode) + __field(s8, freq_off) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->phy_mode = phy_mode; + __entry->freq_off = freq_off; + ), + TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx", + DEV_PR_ARG, __entry->phy_mode, __entry->freq_off) +); + +TRACE_EVENT(mt_rx, + TP_PROTO(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u32 f), + TP_ARGS(dev, rxwi, f), + TP_STRUCT__entry( + DEV_ENTRY + __field_struct(struct mt7601u_rxwi, rxwi) + __field(u32, fce_info) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->rxwi = *rxwi; + __entry->fce_info = f; + ), + TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x frag_sn:%04hx rate:%04hx " + "uknw:%02hhx z:%02hhx%02hhx%02hhx snr:%02hhx " + "ant:%02hhx gain:%02hhx freq_o:%02hhx " + "r:%08x ea:%08x fce:%08x", DEV_PR_ARG, + le32_to_cpu(__entry->rxwi.rxinfo), + le32_to_cpu(__entry->rxwi.ctl), + le16_to_cpu(__entry->rxwi.frag_sn), + le16_to_cpu(__entry->rxwi.rate), + __entry->rxwi.unknown, + __entry->rxwi.zero[0], __entry->rxwi.zero[1], + __entry->rxwi.zero[2], + __entry->rxwi.snr, __entry->rxwi.ant, + __entry->rxwi.gain, __entry->rxwi.freq_off, + __entry->rxwi.resv2, __entry->rxwi.expert_ant, + __entry->fce_info) +); + +TRACE_EVENT(mt_tx, + TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb, + struct mt76_sta *sta, struct mt76_txwi *h), + TP_ARGS(dev, skb, sta, h), + TP_STRUCT__entry( + DEV_ENTRY + __field_struct(struct mt76_txwi, h) + __field(struct sk_buff *, skb) + __field(struct mt76_sta *, sta) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->h = *h; + __entry->skb = skb; + __entry->sta = sta; + ), + TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx " + "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG, + __entry->skb, __entry->sta, + le16_to_cpu(__entry->h.flags), + le16_to_cpu(__entry->h.rate_ctl), + __entry->h.ack_ctl, __entry->h.wcid, + le16_to_cpu(__entry->h.len_ctl)) +); + +TRACE_EVENT(mt_tx_dma_done, + TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb), + TP_ARGS(dev, skb), + TP_STRUCT__entry( + DEV_ENTRY + __field(struct sk_buff *, skb) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->skb = skb; + ), + TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb) +); + +TRACE_EVENT(mt_tx_status_cleaned, + TP_PROTO(struct mt7601u_dev *dev, int cleaned), + TP_ARGS(dev, cleaned), + TP_STRUCT__entry( + DEV_ENTRY + __field(int, cleaned) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->cleaned = cleaned; + ), + TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned) +); + +TRACE_EVENT(mt_tx_status, + TP_PROTO(struct mt7601u_dev *dev, u32 stat1, u32 stat2), + TP_ARGS(dev, stat1, stat2), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, stat1) __field(u32, stat2) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->stat1 = stat1; + __entry->stat2 = stat2; + ), + TP_printk(DEV_PR_FMT "%08x %08x", + DEV_PR_ARG, __entry->stat1, __entry->stat2) +); + +TRACE_EVENT(mt_rx_dma_aggr, + TP_PROTO(struct mt7601u_dev *dev, int cnt, bool paged), + TP_ARGS(dev, cnt, paged), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, cnt) + __field(bool, paged) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->cnt = cnt; + __entry->paged = paged; + ), + TP_printk(DEV_PR_FMT "cnt:%d paged:%d", + DEV_PR_ARG, __entry->cnt, __entry->paged) +); + +DEFINE_EVENT(dev_simple_evt, set_key, + TP_PROTO(struct mt7601u_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +TRACE_EVENT(set_shared_key, + TP_PROTO(struct mt7601u_dev *dev, u8 vid, u8 key), + TP_ARGS(dev, vid, key), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, vid) + __field(u8, key) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->vid = vid; + __entry->key = key; + ), + TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx", + DEV_PR_ARG, __entry->vid, __entry->key) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +#include diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c new file mode 100644 index 000000000000..0be2080ceab3 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/tx.c @@ -0,0 +1,319 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "trace.h" + +enum mt76_txq_id { + MT_TXQ_VO = IEEE80211_AC_VO, + MT_TXQ_VI = IEEE80211_AC_VI, + MT_TXQ_BE = IEEE80211_AC_BE, + MT_TXQ_BK = IEEE80211_AC_BK, + MT_TXQ_PSD, + MT_TXQ_MCU, + __MT_TXQ_MAX +}; + +/* Hardware uses mirrored order of queues with Q0 having the highest priority */ +static u8 q2hwq(u8 q) +{ + return q ^ 0x3; +} + +/* Take mac80211 Q id from the skb and translate it to hardware Q id */ +static u8 skb2q(struct sk_buff *skb) +{ + int qid = skb_get_queue_mapping(skb); + + if (WARN_ON(qid >= MT_TXQ_PSD)) { + qid = MT_TXQ_BE; + skb_set_queue_mapping(skb, qid); + } + + return q2hwq(qid); +} + +/* Note: TX retry reporting is a bit broken. + * Retries are reported only once per AMPDU and often come a frame early + * i.e. they are reported in the last status preceding the AMPDU. Apart + * from the fact that it's hard to know the length of the AMPDU (which is + * required to know to how many consecutive frames retries should be + * applied), if status comes early on full FIFO it gets lost and retries + * of the whole AMPDU become invisible. + * As a work-around encode the desired rate in PKT_ID of TX descriptor + * and based on that guess the retries (every rate is tried once). + * Only downside here is that for MCS0 we have to rely solely on + * transmission failures as no retries can ever be reported. + * Not having to read EXT_FIFO has a nice effect of doubling the number + * of reports which can be fetched. + * Also the vendor driver never uses the EXT_FIFO register so it may be + * undertested. + */ +static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe) +{ + u8 encoded = (rate + 1) + is_probe * 8; + + /* Because PKT_ID 0 disables status reporting only 15 values are + * available but 16 are needed (8 MCS * 2 for encoding is_probe) + * - we need to cram together two rates. MCS0 and MCS7 with is_probe + * share PKT_ID 9. + */ + if (is_probe && rate == 7) + return encoded - 7; + + return encoded; +} + +static void +mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat) +{ + u8 req_rate = stat->pktid; + u8 eff_rate = stat->rate & 0x7; + + req_rate -= 1; + + if (req_rate > 7) { + stat->is_probe = true; + req_rate -= 8; + + /* Decide between MCS0 and MCS7 which share pktid 9 */ + if (!req_rate && eff_rate) + req_rate = 7; + } + + stat->retry = req_rate - eff_rate; +} + +static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb, + struct ieee80211_tx_info *info) +{ + int pkt_len = (unsigned long)info->status.status_driver_data[0]; + + skb_pull(skb, sizeof(struct mt76_txwi) + 4); + if (ieee80211_get_hdrlen_from_skb(skb) % 4) + mt76_remove_hdr_pad(skb); + + skb_trim(skb, pkt_len); +} + +void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + mt7601u_tx_skb_remove_dma_overhead(skb, info); + + ieee80211_tx_info_clear_status(info); + info->status.rates[0].idx = -1; + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status(dev->hw, skb); +} + +static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb) +{ + int hdr_len = ieee80211_get_hdrlen_from_skb(skb); + u32 need_head; + + need_head = sizeof(struct mt76_txwi) + 4; + if (hdr_len % 4) + need_head += 2; + + return skb_cow(skb, need_head); +} + +static struct mt76_txwi * +mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb, + struct ieee80211_sta *sta, struct mt76_wcid *wcid, + int pkt_len) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + struct mt76_txwi *txwi; + unsigned long flags; + bool is_probe; + u32 pkt_id; + u16 rate_ctl; + u8 nss; + + txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi)); + memset(txwi, 0, sizeof(*txwi)); + + if (!wcid->tx_rate_set) + ieee80211_get_tx_rates(info->control.vif, sta, skb, + info->control.rates, 1); + + spin_lock_irqsave(&dev->lock, flags); + if (rate->idx < 0 || !rate->count) + rate_ctl = wcid->tx_rate; + else + rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss); + spin_unlock_irqrestore(&dev->lock, flags); + txwi->rate_ctl = cpu_to_le16(rate_ctl); + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; + + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { + u8 ba_size = IEEE80211_MIN_AMPDU_BUF; + + ba_size <<= sta->ht_cap.ampdu_factor; + ba_size = min_t(int, 63, ba_size); + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + ba_size = 0; + txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); + + txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU | + MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY, + sta->ht_cap.ampdu_density)); + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + txwi->flags = 0; + } + + txwi->wcid = wcid->idx; + + is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); + pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe); + pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id); + txwi->len_ctl = cpu_to_le16(pkt_len); + + return txwi; +} + +void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt7601u_dev *dev = hw->priv; + struct ieee80211_vif *vif = info->control.vif; + struct ieee80211_sta *sta = control->sta; + struct mt76_sta *msta = NULL; + struct mt76_wcid *wcid = dev->mon_wcid; + struct mt76_txwi *txwi; + int pkt_len = skb->len; + int hw_q = skb2q(skb); + + BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); + info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len; + + if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) { + ieee80211_free_txskb(dev->hw, skb); + return; + } + + if (sta) { + msta = (struct mt76_sta *) sta->drv_priv; + wcid = &msta->wcid; + } else if (vif) { + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + + wcid = &mvif->group_wcid; + } + + txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len); + + if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q)) + return; + + trace_mt_tx(dev, skb, msta, txwi); +} + +void mt7601u_tx_stat(struct work_struct *work) +{ + struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, + stat_work.work); + struct mt76_tx_status stat; + unsigned long flags; + int cleaned = 0; + + while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) { + stat = mt7601u_mac_fetch_tx_status(dev); + if (!stat.valid) + break; + + mt7601u_tx_pktid_dec(dev, &stat); + mt76_send_tx_status(dev, &stat); + + cleaned++; + } + trace_mt_tx_status_cleaned(dev, cleaned); + + spin_lock_irqsave(&dev->tx_lock, flags); + if (cleaned) + queue_delayed_work(dev->stat_wq, &dev->stat_work, + msecs_to_jiffies(10)); + else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state)) + queue_delayed_work(dev->stat_wq, &dev->stat_work, + msecs_to_jiffies(20)); + else + clear_bit(MT7601U_STATE_READING_STATS, &dev->state); + spin_unlock_irqrestore(&dev->tx_lock, flags); +} + +int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params) +{ + struct mt7601u_dev *dev = hw->priv; + u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue); + u32 val; + + /* TODO: should we do funny things with the parameters? + * See what mt7601u_set_default_edca() used to do in init.c. + */ + + if (params->cw_min) + cw_min = fls(params->cw_min); + if (params->cw_max) + cw_max = fls(params->cw_max); + + WARN_ON(params->txop > 0xff); + WARN_ON(params->aifs > 0xf); + WARN_ON(cw_min > 0xf); + WARN_ON(cw_max > 0xf); + + val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) | + MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) | + MT76_SET(MT_EDCA_CFG_CWMAX, cw_max); + /* TODO: based on user-controlled EnableTxBurst var vendor drv sets + * a really long txop on AC0 (see connect.c:2009) but only on + * connect? When not connected should be 0. + */ + if (!hw_q) + val |= 0x60; + else + val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop); + mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val); + + val = mt76_rr(dev, MT_WMM_TXOP(hw_q)); + val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q)); + val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_TXOP(hw_q), val); + + val = mt76_rr(dev, MT_WMM_AIFSN); + val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q)); + val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_AIFSN, val); + + val = mt76_rr(dev, MT_WMM_CWMIN); + val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q)); + val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_CWMIN, val); + + val = mt76_rr(dev, MT_WMM_CWMAX); + val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q)); + val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_CWMAX, val); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c new file mode 100644 index 000000000000..99e2b3997bfa --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -0,0 +1,360 @@ +/* + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#include "mt7601u.h" +#include "usb.h" +#include "trace.h" + +static struct usb_device_id mt7601u_device_table[] = { + { USB_DEVICE(0x0b05, 0x17d3) }, + { USB_DEVICE(0x0e8d, 0x760a) }, + { USB_DEVICE(0x0e8d, 0x760b) }, + { USB_DEVICE(0x13d3, 0x3431) }, + { USB_DEVICE(0x13d3, 0x3434) }, + { USB_DEVICE(0x148f, 0x7601) }, + { USB_DEVICE(0x148f, 0x760a) }, + { USB_DEVICE(0x148f, 0x760b) }, + { USB_DEVICE(0x148f, 0x760c) }, + { USB_DEVICE(0x148f, 0x760d) }, + { USB_DEVICE(0x2001, 0x3d04) }, + { USB_DEVICE(0x2717, 0x4106) }, + { USB_DEVICE(0x2955, 0x0001) }, + { USB_DEVICE(0x2955, 0x1001) }, + { USB_DEVICE(0x2a5f, 0x1000) }, + { USB_DEVICE(0x7392, 0x7710) }, + { 0, } +}; + +bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len, + struct mt7601u_dma_buf *buf) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + + buf->len = len; + buf->urb = usb_alloc_urb(0, GFP_KERNEL); + buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma); + + return !buf->urb || !buf->buf; +} + +void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + + usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma); + usb_free_urb(buf->urb); +} + +int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx, + struct mt7601u_dma_buf *buf, gfp_t gfp, + usb_complete_t complete_fn, void *context) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + unsigned pipe; + int ret; + + if (dir == USB_DIR_IN) + pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[ep_idx]); + else + pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep_idx]); + + usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len, + complete_fn, context); + buf->urb->transfer_dma = buf->dma; + buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + trace_mt_submit_urb(dev, buf->urb); + ret = usb_submit_urb(buf->urb, gfp); + if (ret) + dev_err(dev->dev, "Error: submit URB dir:%d ep:%d failed:%d\n", + dir, ep_idx, ret); + return ret; +} + +void mt7601u_complete_urb(struct urb *urb) +{ + struct completion *cmpl = urb->context; + + complete(cmpl); +} + +static int +__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, + const u8 direction, const u16 val, const u16 offset, + void *buf, const size_t buflen) +{ + int i, ret; + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE; + const unsigned int pipe = (direction == USB_DIR_IN) ? + usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); + + for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { + ret = usb_control_msg(usb_dev, pipe, req, req_type, + val, offset, buf, buflen, + MT_VEND_REQ_TOUT_MS); + trace_mt_vend_req(dev, pipe, req, req_type, val, offset, + buf, buflen, ret); + + if (ret >= 0 || ret == -ENODEV) + return ret; + + msleep(5); + } + + dev_err(dev->dev, "Vendor request req:%02x off:%04x failed:%d\n", + req, offset, ret); + + return ret; +} + +int +mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, + const u8 direction, const u16 val, const u16 offset, + void *buf, const size_t buflen) +{ + int ret; + + mutex_lock(&dev->vendor_req_mutex); + + ret = __mt7601u_vendor_request(dev, req, direction, val, offset, + buf, buflen); + if (ret == -ENODEV) + set_bit(MT7601U_STATE_REMOVED, &dev->state); + + mutex_unlock(&dev->vendor_req_mutex); + + return ret; +} + +void mt7601u_vendor_reset(struct mt7601u_dev *dev) +{ + mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, + MT_VEND_DEV_MODE_RESET, 0, NULL, 0); +} + +u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset) +{ + int ret; + __le32 reg; + u32 val; + + WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset); + + ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN, + 0, offset, ®, sizeof(reg)); + val = le32_to_cpu(reg); + if (ret > 0 && ret != sizeof(reg)) { + dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n", + ret, offset); + val = ~0; + } + + trace_reg_read(dev, offset, val); + return val; +} + +int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, + const u16 offset, const u32 val) +{ + int ret; + + ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, + val & 0xffff, offset, NULL, 0); + if (ret) + return ret; + return mt7601u_vendor_request(dev, req, USB_DIR_OUT, + val >> 16, offset + 2, NULL, 0); +} + +void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val) +{ + WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset); + + mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val); + trace_reg_write(dev, offset, val); +} + +u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) +{ + val |= mt7601u_rr(dev, offset) & ~mask; + mt7601u_wr(dev, offset, val); + return val; +} + +u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) +{ + u32 reg = mt7601u_rr(dev, offset); + + val |= reg & ~mask; + if (reg != val) + mt7601u_wr(dev, offset, val); + return val; +} + +void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset, + const void *data, int len) +{ + WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset); + WARN_ONCE(len & 3, "short write copy off:%08x", offset); + + mt7601u_burst_write_regs(dev, offset, data, len / 4); +} + +void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr) +{ + mt7601u_wr(dev, offset, get_unaligned_le32(addr)); + mt7601u_wr(dev, offset + 4, addr[4] | addr[5] << 8); +} + +static int mt7601u_assign_pipes(struct usb_interface *usb_intf, + struct mt7601u_dev *dev) +{ + struct usb_endpoint_descriptor *ep_desc; + struct usb_host_interface *intf_desc = usb_intf->cur_altsetting; + unsigned i, ep_i = 0, ep_o = 0; + + BUILD_BUG_ON(sizeof(dev->in_eps) < __MT_EP_IN_MAX); + BUILD_BUG_ON(sizeof(dev->out_eps) < __MT_EP_OUT_MAX); + + for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { + ep_desc = &intf_desc->endpoint[i].desc; + + if (usb_endpoint_is_bulk_in(ep_desc) && + ep_i++ < __MT_EP_IN_MAX) { + dev->in_eps[ep_i - 1] = usb_endpoint_num(ep_desc); + dev->in_max_packet = usb_endpoint_maxp(ep_desc); + /* Note: this is ignored by usb sub-system but vendor + * code does it. We can drop this at some point. + */ + dev->in_eps[ep_i - 1] |= USB_DIR_IN; + } else if (usb_endpoint_is_bulk_out(ep_desc) && + ep_o++ < __MT_EP_OUT_MAX) { + dev->out_eps[ep_o - 1] = usb_endpoint_num(ep_desc); + dev->out_max_packet = usb_endpoint_maxp(ep_desc); + } + } + + if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) { + dev_err(dev->dev, "Error: wrong pipe number in:%d out:%d\n", + ep_i, ep_o); + return -EINVAL; + } + + return 0; +} + +static int mt7601u_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) +{ + struct usb_device *usb_dev = interface_to_usbdev(usb_intf); + struct mt7601u_dev *dev; + u32 asic_rev, mac_rev; + int ret; + + dev = mt7601u_alloc_device(&usb_intf->dev); + if (!dev) + return -ENOMEM; + + usb_dev = usb_get_dev(usb_dev); + usb_reset_device(usb_dev); + + usb_set_intfdata(usb_intf, dev); + + ret = mt7601u_assign_pipes(usb_intf, dev); + if (ret) + goto err; + ret = mt7601u_wait_asic_ready(dev); + if (ret) + goto err; + + asic_rev = mt7601u_rr(dev, MT_ASIC_VERSION); + mac_rev = mt7601u_rr(dev, MT_MAC_CSR0); + dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n", + asic_rev, mac_rev); + + /* Note: vendor driver skips this check for MT7601U */ + if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) + dev_warn(dev->dev, "Warning: eFUSE not present\n"); + + ret = mt7601u_init_hardware(dev); + if (ret) + goto err; + ret = mt7601u_register_device(dev); + if (ret) + goto err_hw; + + set_bit(MT7601U_STATE_INITIALIZED, &dev->state); + + return 0; +err_hw: + mt7601u_cleanup(dev); +err: + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + destroy_workqueue(dev->stat_wq); + ieee80211_free_hw(dev->hw); + return ret; +} + +static void mt7601u_disconnect(struct usb_interface *usb_intf) +{ + struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); + + ieee80211_unregister_hw(dev->hw); + mt7601u_cleanup(dev); + + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + destroy_workqueue(dev->stat_wq); + ieee80211_free_hw(dev->hw); +} + +static int mt7601u_suspend(struct usb_interface *usb_intf, pm_message_t state) +{ + struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); + + mt7601u_cleanup(dev); + + return 0; +} + +static int mt7601u_resume(struct usb_interface *usb_intf) +{ + struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); + + return mt7601u_init_hardware(dev); +} + +MODULE_DEVICE_TABLE(usb, mt7601u_device_table); +MODULE_FIRMWARE(MT7601U_FIRMWARE); +MODULE_LICENSE("GPL"); + +static struct usb_driver mt7601u_driver = { + .name = KBUILD_MODNAME, + .id_table = mt7601u_device_table, + .probe = mt7601u_probe, + .disconnect = mt7601u_disconnect, + .suspend = mt7601u_suspend, + .resume = mt7601u_resume, + .reset_resume = mt7601u_resume, + .soft_unbind = 1, + .disable_hub_initiated_lpm = 1, +}; +module_usb_driver(mt7601u_driver); diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h new file mode 100644 index 000000000000..49e188fa3798 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/usb.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_USB_H +#define __MT7601U_USB_H + +#include "mt7601u.h" + +#define MT7601U_FIRMWARE "mt7601u.bin" + +#define MT_VEND_REQ_MAX_RETRY 10 +#define MT_VEND_REQ_TOUT_MS 300 + +#define MT_VEND_DEV_MODE_RESET 1 + +enum mt_vendor_req { + MT_VEND_DEV_MODE = 1, + MT_VEND_WRITE = 2, + MT_VEND_MULTI_READ = 7, + MT_VEND_WRITE_FCE = 0x42, +}; + +enum mt_usb_ep_in { + MT_EP_IN_PKT_RX, + MT_EP_IN_CMD_RESP, + __MT_EP_IN_MAX, +}; + +enum mt_usb_ep_out { + MT_EP_OUT_INBAND_CMD, + MT_EP_OUT_AC_BK, + MT_EP_OUT_AC_BE, + MT_EP_OUT_AC_VI, + MT_EP_OUT_AC_VO, + MT_EP_OUT_HCCA, + __MT_EP_OUT_MAX, +}; + +static inline struct usb_device *mt7601u_to_usb_dev(struct mt7601u_dev *mt7601u) +{ + return interface_to_usbdev(to_usb_interface(mt7601u->dev)); +} + +static inline bool mt7601u_urb_has_error(struct urb *urb) +{ + return urb->status && + urb->status != -ENOENT && + urb->status != -ECONNRESET && + urb->status != -ESHUTDOWN; +} + +bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len, + struct mt7601u_dma_buf *buf); +void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf); +int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx, + struct mt7601u_dma_buf *buf, gfp_t gfp, + usb_complete_t complete_fn, void *context); +void mt7601u_complete_urb(struct urb *urb); + +int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, + const u8 direction, const u16 val, const u16 offset, + void *buf, const size_t buflen); +void mt7601u_vendor_reset(struct mt7601u_dev *dev); +int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, + const u16 offset, const u32 val); + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/util.c b/drivers/net/wireless/mediatek/mt7601u/util.c new file mode 100644 index 000000000000..7c1787c1ddcd --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/util.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" + +void mt76_remove_hdr_pad(struct sk_buff *skb) +{ + int len = ieee80211_get_hdrlen_from_skb(skb); + + memmove(skb->data + 2, skb->data, len); + skb_pull(skb, 2); +} + +int mt76_insert_hdr_pad(struct sk_buff *skb) +{ + int len = ieee80211_get_hdrlen_from_skb(skb); + int ret; + + if (len % 4 == 0) + return 0; + + ret = skb_cow(skb, 2); + if (ret) + return ret; + + skb_push(skb, 2); + memmove(skb->data, skb->data + 2, len); + + skb->data[len] = 0; + skb->data[len + 1] = 0; + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/util.h b/drivers/net/wireless/mediatek/mt7601u/util.h new file mode 100644 index 000000000000..b89140bf1210 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/util.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2004 - 2009 Ivo van Doorn + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76_UTIL_H +#define __MT76_UTIL_H + +/* + * Power of two check, this will check + * if the mask that has been given contains and contiguous set of bits. + * Note that we cannot use the is_power_of_2() function since this + * check must be done at compile-time. + */ +#define is_power_of_two(x) ( !((x) & ((x)-1)) ) +#define low_bit_mask(x) ( ((x)-1) & ~(x) ) +#define is_valid_mask(x) is_power_of_two(1LU + (x) + low_bit_mask(x)) + +/* + * Macros to find first set bit in a variable. + * These macros behave the same as the __ffs() functions but + * the most important difference that this is done during + * compile-time rather then run-time. + */ +#define compile_ffs2(__x) \ + __builtin_choose_expr(((__x) & 0x1), 0, 1) + +#define compile_ffs4(__x) \ + __builtin_choose_expr(((__x) & 0x3), \ + (compile_ffs2((__x))), \ + (compile_ffs2((__x) >> 2) + 2)) + +#define compile_ffs8(__x) \ + __builtin_choose_expr(((__x) & 0xf), \ + (compile_ffs4((__x))), \ + (compile_ffs4((__x) >> 4) + 4)) + +#define compile_ffs16(__x) \ + __builtin_choose_expr(((__x) & 0xff), \ + (compile_ffs8((__x))), \ + (compile_ffs8((__x) >> 8) + 8)) + +#define compile_ffs32(__x) \ + __builtin_choose_expr(((__x) & 0xffff), \ + (compile_ffs16((__x))), \ + (compile_ffs16((__x) >> 16) + 16)) + +/* + * This macro will check the requirements for the FIELD{8,16,32} macros + * The mask should be a constant non-zero contiguous set of bits which + * does not exceed the given typelimit. + */ +#define FIELD_CHECK(__mask) \ + BUILD_BUG_ON(!(__mask) || !is_valid_mask(__mask)) + +#define MT76_SET(_mask, _val) \ + ({ \ + FIELD_CHECK(_mask); \ + (((u32) (_val)) << compile_ffs32(_mask)) & _mask; \ + }) + +#define MT76_GET(_mask, _val) \ + ({ \ + FIELD_CHECK(_mask); \ + (u32) (((_val) & _mask) >> compile_ffs32(_mask)); \ + }) + +#endif diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c index 3ab87a855122..65cd461c88db 100644 --- a/drivers/net/wireless/mwifiex/11h.c +++ b/drivers/net/wireless/mwifiex/11h.c @@ -134,8 +134,8 @@ void mwifiex_dfs_cac_work_queue(struct work_struct *work) chandef = priv->dfs_chandef; if (priv->wdev.cac_started) { - dev_dbg(priv->adapter->dev, - "CAC timer finished; No radar detected\n"); + mwifiex_dbg(priv->adapter, MSG, + "CAC timer finished; No radar detected\n"); cfg80211_cac_event(priv->netdev, &chandef, NL80211_RADAR_CAC_FINISHED, GFP_KERNEL); @@ -161,9 +161,9 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv, cr_req->chan_desc.chan_width = radar_params->chandef->width; cr_req->msec_dwell_time = cpu_to_le32(radar_params->cac_time_ms); - dev_dbg(priv->adapter->dev, - "11h: issuing DFS Radar check for channel=%d\n", - radar_params->chandef->chan->hw_value); + mwifiex_dbg(priv->adapter, MSG, + "11h: issuing DFS Radar check for channel=%d\n", + radar_params->chandef->chan->hw_value); return 0; } @@ -174,8 +174,8 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv, void mwifiex_abort_cac(struct mwifiex_private *priv) { if (priv->wdev.cac_started) { - dev_dbg(priv->adapter->dev, - "Aborting delayed work for CAC.\n"); + mwifiex_dbg(priv->adapter, MSG, + "Aborting delayed work for CAC.\n"); cancel_delayed_work_sync(&priv->dfs_cac_work); cfg80211_cac_event(priv->netdev, &priv->dfs_chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); @@ -199,7 +199,8 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv, sizeof(u32)); if (le32_to_cpu(rpt_event->result) != HostCmd_RESULT_OK) { - dev_err(priv->adapter->dev, "Error in channel report event\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Error in channel report event\n"); return -1; } @@ -212,8 +213,8 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv, switch (le16_to_cpu(rpt->header.type)) { case TLV_TYPE_CHANRPT_11H_BASIC: if (rpt->map.radar) { - dev_notice(priv->adapter->dev, - "RADAR Detected on channel %d!\n", + mwifiex_dbg(priv->adapter, MSG, + "RADAR Detected on channel %d!\n", priv->dfs_chandef.chan->hw_value); cancel_delayed_work_sync(&priv->dfs_cac_work); cfg80211_cac_event(priv->netdev, @@ -242,16 +243,17 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv, rdr_event = (void *)(skb->data + sizeof(u32)); if (le32_to_cpu(rdr_event->passed)) { - dev_notice(priv->adapter->dev, - "radar detected; indicating kernel\n"); + mwifiex_dbg(priv->adapter, MSG, + "radar detected; indicating kernel\n"); cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef, GFP_KERNEL); - dev_dbg(priv->adapter->dev, "regdomain: %d\n", - rdr_event->reg_domain); - dev_dbg(priv->adapter->dev, "radar detection type: %d\n", - rdr_event->det_type); + mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n", + rdr_event->reg_domain); + mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n", + rdr_event->det_type); } else { - dev_dbg(priv->adapter->dev, "false radar detection event!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "false radar detection event!\n"); } return 0; @@ -276,20 +278,20 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) bss_cfg = &priv->bss_cfg; if (!bss_cfg->beacon_period) { - dev_err(priv->adapter->dev, - "channel switch: AP already stopped\n"); + mwifiex_dbg(priv->adapter, ERROR, + "channel switch: AP already stopped\n"); return; } mwifiex_uap_set_channel(bss_cfg, priv->dfs_chandef); if (mwifiex_config_start_uap(priv, bss_cfg)) { - dev_dbg(priv->adapter->dev, - "Failed to start AP after channel switch\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to start AP after channel switch\n"); return; } - dev_notice(priv->adapter->dev, - "indicating channel switch completion to kernel\n"); + mwifiex_dbg(priv->adapter, MSG, + "indicating channel switch completion to kernel\n"); cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef); } diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index 433bd6837c79..8422986cd7a9 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c @@ -42,7 +42,7 @@ int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type, priv->wdev.wiphy->bands[radio_type]; if (WARN_ON_ONCE(!sband)) { - dev_err(priv->adapter->dev, "Invalid radio type!\n"); + mwifiex_dbg(priv->adapter, ERROR, "Invalid radio type!\n"); return -EINVAL; } @@ -184,7 +184,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv, tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr); if (tx_ba_tbl) { - dev_dbg(priv->adapter->dev, "info: BA stream complete\n"); + mwifiex_dbg(priv->adapter, EVENT, "info: BA stream complete\n"); tx_ba_tbl->ba_status = BA_SETUP_COMPLETE; if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) && priv->add_ba_param.tx_amsdu && @@ -197,7 +197,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv, ra_list->ba_status = BA_SETUP_COMPLETE; } } else { - dev_err(priv->adapter->dev, "BA stream not created\n"); + mwifiex_dbg(priv->adapter, ERROR, "BA stream not created\n"); } return 0; @@ -224,7 +224,8 @@ int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv, tx_buf->action = cpu_to_le16(action); switch (action) { case HostCmd_ACT_GEN_SET: - dev_dbg(priv->adapter->dev, "cmd: set tx_buf=%d\n", *buf_size); + mwifiex_dbg(priv->adapter, CMD, + "cmd: set tx_buf=%d\n", *buf_size); tx_buf->buff_size = cpu_to_le16(*buf_size); break; case HostCmd_ACT_GEN_GET: @@ -466,7 +467,8 @@ void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv, mwifiex_is_tx_ba_stream_ptr_valid(priv, tx_ba_tsr_tbl)) return; - dev_dbg(priv->adapter->dev, "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl); + mwifiex_dbg(priv->adapter, INFO, + "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl); list_del(&tx_ba_tsr_tbl->list); @@ -563,7 +565,7 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac) unsigned long flags; u16 block_ack_param_set; - dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid); + mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid); if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && @@ -575,9 +577,9 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac) sta_ptr = mwifiex_get_sta_entry(priv, peer_mac); if (!sta_ptr) { spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); - dev_warn(priv->adapter->dev, - "BA setup with unknown TDLS peer %pM!\n", - peer_mac); + mwifiex_dbg(priv->adapter, ERROR, + "BA setup with unknown TDLS peer %pM!\n", + peer_mac); return -1; } if (sta_ptr->is_11ac_enabled) @@ -706,8 +708,8 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv, spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags); list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) { rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid; - dev_dbg(priv->adapter->dev, "data: %s tid=%d\n", - __func__, rx_reo_tbl->tid); + mwifiex_dbg(priv->adapter, DATA, "data: %s tid=%d\n", + __func__, rx_reo_tbl->tid); memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN); rx_reo_tbl->amsdu = tx_ba_tsr_tbl->amsdu; rx_reo_tbl++; diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index 6183e255e62a..f7c717253a66 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -187,7 +187,6 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size, GFP_ATOMIC | GFP_DMA); if (!skb_aggr) { - dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); return -1; @@ -297,13 +296,13 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); - dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); break; case -1: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; - dev_err(adapter->dev, "%s: host_to_card failed: %#x\n", - __func__, ret); + mwifiex_dbg(adapter, ERROR, "%s: host_to_card failed: %#x\n", + __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb_aggr, 1, ret); return 0; diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c index f75f8acfaca0..39d7a957674c 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c @@ -51,8 +51,8 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv, rx_skb = __skb_dequeue(&list); ret = mwifiex_recv_packet(priv, rx_skb); if (ret == -1) - dev_err(priv->adapter->dev, - "Rx of A-MSDU failed"); + mwifiex_dbg(priv->adapter, ERROR, + "Rx of A-MSDU failed"); } return 0; } @@ -304,7 +304,7 @@ mwifiex_flush_data(unsigned long context) if (seq_num < 0) return; - dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num); + mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num); start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1); mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr, start_win); @@ -367,8 +367,9 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, } spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); - dev_dbg(priv->adapter->dev, "info: last_seq=%d start_win=%d\n", - last_seq, new_node->start_win); + mwifiex_dbg(priv->adapter, INFO, + "info: last_seq=%d start_win=%d\n", + last_seq, new_node->start_win); if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM && last_seq >= new_node->start_win) { @@ -382,8 +383,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, GFP_KERNEL); if (!new_node->rx_reorder_ptr) { kfree((u8 *) new_node); - dev_err(priv->adapter->dev, - "%s: failed to alloc reorder_ptr\n", __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: failed to alloc reorder_ptr\n", __func__); return; } @@ -467,9 +468,9 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv, cmd_addba_req->peer_mac_addr); if (!sta_ptr) { spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); - dev_warn(priv->adapter->dev, - "BA setup with unknown TDLS peer %pM!\n", - cmd_addba_req->peer_mac_addr); + mwifiex_dbg(priv->adapter, ERROR, + "BA setup with unknown TDLS peer %pM!\n", + cmd_addba_req->peer_mac_addr); return -1; } if (sta_ptr->is_11ac_enabled) @@ -573,14 +574,14 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, } if (tbl->flags & RXREOR_FORCE_NO_DROP) { - dev_dbg(priv->adapter->dev, - "RXREOR_FORCE_NO_DROP when HS is activated\n"); + mwifiex_dbg(priv->adapter, INFO, + "RXREOR_FORCE_NO_DROP when HS is activated\n"); tbl->flags &= ~RXREOR_FORCE_NO_DROP; } else if (init_window_shift && seq_num < start_win && seq_num >= tbl->init_win) { - dev_dbg(priv->adapter->dev, - "Sender TID sequence number reset %d->%d for SSN %d\n", - start_win, seq_num, tbl->init_win); + mwifiex_dbg(priv->adapter, INFO, + "Sender TID sequence number reset %d->%d for SSN %d\n", + start_win, seq_num, tbl->init_win); tbl->start_win = start_win = seq_num; end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); } else { @@ -668,23 +669,23 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, else cleanup_rx_reorder_tbl = (initiator) ? false : true; - dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n", - peer_mac, tid, initiator); + mwifiex_dbg(priv->adapter, EVENT, "event: DELBA: %pM tid=%d initiator=%d\n", + peer_mac, tid, initiator); if (cleanup_rx_reorder_tbl) { tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, peer_mac); if (!tbl) { - dev_dbg(priv->adapter->dev, - "event: TID, TA not found in table\n"); + mwifiex_dbg(priv->adapter, EVENT, + "event: TID, TA not found in table\n"); return; } mwifiex_del_rx_reorder_entry(priv, tbl); } else { ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); if (!ptx_tbl) { - dev_dbg(priv->adapter->dev, - "event: TID, RA not found in table\n"); + mwifiex_dbg(priv->adapter, EVENT, + "event: TID, RA not found in table\n"); return; } ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac); @@ -721,8 +722,8 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, * the stream */ if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) { - dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n", - add_ba_rsp->peer_mac_addr, tid); + mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n", + add_ba_rsp->peer_mac_addr, tid); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, add_ba_rsp->peer_mac_addr); @@ -746,8 +747,8 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, tbl->amsdu = false; } - dev_dbg(priv->adapter->dev, - "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", + mwifiex_dbg(priv->adapter, CMD, + "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size); return 0; diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index aa01c9bc77f9..48edf387683e 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -12,6 +12,7 @@ config MWIFIEX_SDIO tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897" depends on MWIFIEX && MMC select FW_LOADER + select WANT_DEV_COREDUMP ---help--- This adds support for wireless adapters based on Marvell 8786/8787/8797/8887/8897 chipsets with SDIO interface. @@ -23,6 +24,7 @@ config MWIFIEX_PCIE tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897" depends on MWIFIEX && PCI select FW_LOADER + select WANT_DEV_COREDUMP ---help--- This adds support for wireless adapters based on Marvell 8766/8897 chipsets with PCIe interface. diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README index 31928caeeed2..2f0f9b5609d0 100644 --- a/drivers/net/wireless/mwifiex/README +++ b/drivers/net/wireless/mwifiex/README @@ -230,9 +230,9 @@ getlog cat getlog -fw_dump - This command is used to dump firmware memory into files. - Separate file will be created for each memory segment. +device_dump + This command is used to dump driver information and firmware memory + segments. Usage: cat fw_dump diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index bf9020ff2d33..4eecedadefbf 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -104,11 +104,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer_mac = pairwise ? mac_addr : bc_mac; if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) { - wiphy_err(wiphy, "deleting the crypto keys\n"); + mwifiex_dbg(priv->adapter, ERROR, "deleting the crypto keys\n"); return -EFAULT; } - wiphy_dbg(wiphy, "info: crypto keys deleted\n"); + mwifiex_dbg(priv->adapter, INFO, "info: crypto keys deleted\n"); return 0; } @@ -163,7 +163,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); if (!buf || !len) { - wiphy_err(wiphy, "invalid buffer and length\n"); + mwifiex_dbg(priv->adapter, ERROR, "invalid buffer and length\n"); return -EFAULT; } @@ -172,8 +172,8 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, ieee80211_is_probe_resp(mgmt->frame_control)) { /* Since we support offload probe resp, we need to skip probe * resp in AP or GO mode */ - wiphy_dbg(wiphy, - "info: skip to send probe resp in AP or GO mode\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: skip to send probe resp in AP or GO mode\n"); return 0; } @@ -183,7 +183,8 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, pkt_len + sizeof(pkt_len)); if (!skb) { - wiphy_err(wiphy, "allocate skb failed for management frame\n"); + mwifiex_dbg(priv->adapter, ERROR, + "allocate skb failed for management frame\n"); return -ENOMEM; } @@ -206,7 +207,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, mwifiex_queue_tx_pkt(priv, skb); - wiphy_dbg(wiphy, "info: management frame transmitted\n"); + mwifiex_dbg(priv->adapter, INFO, "info: management frame transmitted\n"); return 0; } @@ -231,7 +232,7 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy, mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG, HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask, false); - wiphy_dbg(wiphy, "info: mgmt frame registered\n"); + mwifiex_dbg(priv->adapter, INFO, "info: mgmt frame registered\n"); } } @@ -248,13 +249,14 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy, int ret; if (!chan || !cookie) { - wiphy_err(wiphy, "Invalid parameter for ROC\n"); + mwifiex_dbg(priv->adapter, ERROR, "Invalid parameter for ROC\n"); return -EINVAL; } if (priv->roc_cfg.cookie) { - wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llx\n", - priv->roc_cfg.cookie); + mwifiex_dbg(priv->adapter, INFO, + "info: ongoing ROC, cookie = 0x%llx\n", + priv->roc_cfg.cookie); return -EBUSY; } @@ -269,7 +271,8 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy, cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_ATOMIC); - wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie); + mwifiex_dbg(priv->adapter, INFO, + "info: ROC, cookie = 0x%llx\n", *cookie); } return ret; @@ -298,7 +301,8 @@ mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg)); - wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie); + mwifiex_dbg(priv->adapter, INFO, + "info: cancel ROC, cookie = 0x%llx\n", cookie); } return ret; @@ -344,8 +348,8 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy, u32 ps_mode; if (timeout) - wiphy_dbg(wiphy, - "info: ignore timeout value for IEEE Power Save\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: ignore timeout value for IEEE Power Save\n"); ps_mode = enabled; @@ -370,7 +374,7 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev, priv->wep_key_curr_index = key_index; } else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, NULL, 0)) { - wiphy_err(wiphy, "set default Tx key index\n"); + mwifiex_dbg(priv->adapter, ERROR, "set default Tx key index\n"); return -EFAULT; } @@ -407,7 +411,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev, if (mwifiex_set_encode(priv, params, params->key, params->key_len, key_index, peer_mac, 0)) { - wiphy_err(wiphy, "crypto keys added\n"); + mwifiex_dbg(priv->adapter, ERROR, "crypto keys added\n"); return -EFAULT; } @@ -442,7 +446,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) band = mwifiex_band_to_radio_type(adapter->config_bands); if (!wiphy->bands[band]) { - wiphy_err(wiphy, "11D: setting domain info in FW\n"); + mwifiex_dbg(adapter, ERROR, + "11D: setting domain info in FW\n"); return -1; } @@ -493,7 +498,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO, HostCmd_ACT_GEN_SET, 0, NULL, false)) { - wiphy_err(wiphy, "11D: setting domain info in FW\n"); + mwifiex_dbg(adapter, INFO, + "11D: setting domain info in FW\n"); return -1; } @@ -516,9 +522,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy, struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); - - wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n", - request->alpha2[0], request->alpha2[1]); + mwifiex_dbg(adapter, INFO, + "info: cfg80211 regulatory domain callback for %c%c\n", + request->alpha2[0], request->alpha2[1]); switch (request->initiator) { case NL80211_REGDOM_SET_BY_DRIVER: @@ -527,8 +533,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy, case NL80211_REGDOM_SET_BY_COUNTRY_IE: break; default: - wiphy_err(wiphy, "unknown regdom initiator: %d\n", - request->initiator); + mwifiex_dbg(adapter, ERROR, + "unknown regdom initiator: %d\n", + request->initiator); return; } @@ -597,8 +604,8 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) switch (priv->bss_role) { case MWIFIEX_BSS_ROLE_UAP: if (priv->bss_started) { - dev_err(adapter->dev, - "cannot change wiphy params when bss started"); + mwifiex_dbg(adapter, ERROR, + "cannot change wiphy params when bss started"); return -EINVAL; } @@ -622,15 +629,16 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) kfree(bss_cfg); if (ret) { - wiphy_err(wiphy, "Failed to set wiphy phy params\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to set wiphy phy params\n"); return ret; } break; case MWIFIEX_BSS_ROLE_STA: if (priv->media_connected) { - dev_err(adapter->dev, - "cannot change wiphy params when connected"); + mwifiex_dbg(adapter, ERROR, + "cannot change wiphy params when connected"); return -EINVAL; } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { @@ -724,8 +732,8 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv) if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG, HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask, false)) { - dev_warn(priv->adapter->dev, - "could not unregister mgmt frame rx\n"); + mwifiex_dbg(adapter, ERROR, + "could not unregister mgmt frame rx\n"); return -1; } @@ -789,9 +797,9 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, priv->bss_role = MWIFIEX_BSS_ROLE_UAP; break; default: - dev_err(priv->adapter->dev, - "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } @@ -824,12 +832,13 @@ mwifiex_change_vif_to_p2p(struct net_device *dev, if (adapter->curr_iface_comb.p2p_intf == adapter->iface_limit.p2p_intf) { - dev_err(adapter->dev, - "cannot create multiple P2P ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple P2P ifaces\n"); return -1; } - dev_dbg(priv->adapter->dev, "%s: changing role to p2p\n", dev->name); + mwifiex_dbg(adapter, INFO, + "%s: changing role to p2p\n", dev->name); if (mwifiex_deinit_priv_params(priv)) return -1; @@ -846,9 +855,9 @@ mwifiex_change_vif_to_p2p(struct net_device *dev, return -EFAULT; break; default: - dev_err(priv->adapter->dev, - "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } @@ -897,17 +906,17 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev, curr_iftype != NL80211_IFTYPE_P2P_GO) && (adapter->curr_iface_comb.sta_intf == adapter->iface_limit.sta_intf)) { - dev_err(adapter->dev, - "cannot create multiple station/adhoc ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple station/adhoc ifaces\n"); return -1; } if (type == NL80211_IFTYPE_STATION) - dev_notice(adapter->dev, - "%s: changing role to station\n", dev->name); + mwifiex_dbg(adapter, INFO, + "%s: changing role to station\n", dev->name); else - dev_notice(adapter->dev, - "%s: changing role to adhoc\n", dev->name); + mwifiex_dbg(adapter, INFO, + "%s: changing role to adhoc\n", dev->name); if (mwifiex_deinit_priv_params(priv)) return -1; @@ -954,12 +963,13 @@ mwifiex_change_vif_to_ap(struct net_device *dev, if (adapter->curr_iface_comb.uap_intf == adapter->iface_limit.uap_intf) { - dev_err(adapter->dev, - "cannot create multiple AP ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple AP ifaces\n"); return -1; } - dev_notice(adapter->dev, "%s: changing role to AP\n", dev->name); + mwifiex_dbg(adapter, INFO, + "%s: changing role to AP\n", dev->name); if (mwifiex_deinit_priv_params(priv)) return -1; @@ -1020,12 +1030,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, return mwifiex_change_vif_to_ap(dev, curr_iftype, type, flags, params); case NL80211_IFTYPE_UNSPECIFIED: - wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name); + mwifiex_dbg(priv->adapter, INFO, + "%s: kept type as IBSS\n", dev->name); case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */ return 0; default: - wiphy_err(wiphy, "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(priv->adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } break; @@ -1048,12 +1060,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, return mwifiex_change_vif_to_ap(dev, curr_iftype, type, flags, params); case NL80211_IFTYPE_UNSPECIFIED: - wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name); + mwifiex_dbg(priv->adapter, INFO, + "%s: kept type as STA\n", dev->name); case NL80211_IFTYPE_STATION: /* This shouldn't happen */ return 0; default: - wiphy_err(wiphy, "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(priv->adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } break; @@ -1070,12 +1084,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, return mwifiex_change_vif_to_p2p(dev, curr_iftype, type, flags, params); case NL80211_IFTYPE_UNSPECIFIED: - wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name); + mwifiex_dbg(priv->adapter, INFO, + "%s: kept type as AP\n", dev->name); case NL80211_IFTYPE_AP: /* This shouldn't happen */ return 0; default: - wiphy_err(wiphy, "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(priv->adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } break; @@ -1100,19 +1116,22 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, return mwifiex_change_vif_to_ap(dev, curr_iftype, type, flags, params); case NL80211_IFTYPE_UNSPECIFIED: - wiphy_warn(wiphy, "%s: kept type as P2P\n", dev->name); + mwifiex_dbg(priv->adapter, INFO, + "%s: kept type as P2P\n", dev->name); case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: return 0; default: - wiphy_err(wiphy, "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(priv->adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } break; default: - wiphy_err(wiphy, "%s: unknown iftype: %d\n", - dev->name, dev->ieee80211_ptr->iftype); + mwifiex_dbg(priv->adapter, ERROR, + "%s: unknown iftype: %d\n", + dev->name, dev->ieee80211_ptr->iftype); return -EOPNOTSUPP; } @@ -1206,12 +1225,14 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, /* Get signal information from the firmware */ if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO, HostCmd_ACT_GEN_GET, 0, NULL, true)) { - dev_err(priv->adapter->dev, "failed to get signal information\n"); + mwifiex_dbg(priv->adapter, ERROR, + "failed to get signal information\n"); return -EFAULT; } if (mwifiex_drv_get_data_rate(priv, &rate)) { - dev_err(priv->adapter->dev, "getting data rate\n"); + mwifiex_dbg(priv->adapter, ERROR, + "getting data rate error\n"); return -EFAULT; } @@ -1295,7 +1316,7 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats; enum ieee80211_band band; - dev_dbg(priv->adapter->dev, "dump_survey idx=%d\n", idx); + mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx); memset(survey, 0, sizeof(struct survey_info)); @@ -1472,8 +1493,8 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct mwifiex_adapter *adapter = priv->adapter; if (!priv->media_connected) { - dev_err(adapter->dev, - "Can not set Tx data rate in disconnected state\n"); + mwifiex_dbg(adapter, ERROR, + "Can not set Tx data rate in disconnected state\n"); return -EINVAL; } @@ -1556,17 +1577,20 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) { - wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: bss_type mismatched\n", __func__); return -EINVAL; } if (!priv->bss_started) { - wiphy_err(wiphy, "%s: bss not started\n", __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: bss not started\n", __func__); return -EINVAL; } if (mwifiex_set_mgmt_ies(priv, data)) { - wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: setting mgmt ies failed\n", __func__); return -EFAULT; } @@ -1594,7 +1618,8 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, if (!params->mac || is_broadcast_ether_addr(params->mac)) return 0; - wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac); + mwifiex_dbg(priv->adapter, INFO, "%s: mac address %pM\n", + __func__, params->mac); eth_zero_addr(deauth_mac); @@ -1687,14 +1712,16 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) mwifiex_abort_cac(priv); if (mwifiex_del_mgmt_ies(priv)) - wiphy_err(wiphy, "Failed to delete mgmt IEs!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to delete mgmt IEs!\n"); priv->ap_11n_enabled = 0; memset(&priv->bss_cfg, 0, sizeof(priv->bss_cfg)); if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP, HostCmd_ACT_GEN_SET, 0, NULL, true)) { - wiphy_err(wiphy, "Failed to stop the BSS\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to stop the BSS\n"); return -1; } @@ -1756,7 +1783,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, if (mwifiex_set_secure_params(priv, bss_cfg, params)) { kfree(bss_cfg); - wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to parse secuirty parameters!\n"); return -1; } @@ -1778,17 +1806,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, if (mwifiex_is_11h_active(priv) && !cfg80211_chandef_dfs_required(wiphy, ¶ms->chandef, priv->bss_mode)) { - dev_dbg(priv->adapter->dev, "Disable 11h extensions in FW\n"); + mwifiex_dbg(priv->adapter, INFO, + "Disable 11h extensions in FW\n"); if (mwifiex_11h_activate(priv, false)) { - dev_err(priv->adapter->dev, - "Failed to disable 11h extensions!!"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to disable 11h extensions!!"); return -1; } priv->state_11h.is_11h_active = true; } if (mwifiex_config_start_uap(priv, bss_cfg)) { - wiphy_err(wiphy, "Failed to start AP\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to start AP\n"); kfree(bss_cfg); return -1; } @@ -1816,8 +1846,9 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; - wiphy_dbg(wiphy, "info: successfully disconnected from %pM:" - " reason code %d\n", priv->cfg_bssid, reason_code); + mwifiex_dbg(priv->adapter, MSG, + "info: successfully disconnected from %pM:\t" + "reason code %d\n", priv->cfg_bssid, reason_code); eth_zero_addr(priv->cfg_bssid); priv->hs2_enabled = false; @@ -1899,13 +1930,13 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, req_ssid.ssid_len = ssid_len; if (ssid_len > IEEE80211_MAX_SSID_LEN) { - dev_err(priv->adapter->dev, "invalid SSID - aborting\n"); + mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n"); return -EINVAL; } memcpy(req_ssid.ssid, ssid, ssid_len); if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) { - dev_err(priv->adapter->dev, "invalid SSID - aborting\n"); + mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n"); return -EINVAL; } @@ -1959,9 +1990,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, if (sme->key) { if (mwifiex_is_alg_wep(priv->sec_info.encryption_mode)) { - dev_dbg(priv->adapter->dev, - "info: setting wep encryption" - " with key len %d\n", sme->key_len); + mwifiex_dbg(priv->adapter, INFO, + "info: setting wep encryption\t" + "with key len %d\n", sme->key_len); priv->wep_key_curr_index = sme->key_idx; ret = mwifiex_set_encode(priv, NULL, sme->key, sme->key_len, sme->key_idx, @@ -1978,7 +2009,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, if (is_scanning_required) { /* Do specific SSID scanning */ if (mwifiex_request_scan(priv, &req_ssid)) { - dev_err(priv->adapter->dev, "scan error\n"); + mwifiex_dbg(priv->adapter, ERROR, "scan error\n"); return -EFAULT; } } @@ -1997,15 +2028,15 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, if (!bss) { if (is_scanning_required) { - dev_warn(priv->adapter->dev, - "assoc: requested bss not found in scan results\n"); + mwifiex_dbg(priv->adapter, WARN, + "assoc: requested bss not found in scan results\n"); break; } is_scanning_required = 1; } else { - dev_dbg(priv->adapter->dev, - "info: trying to associate to '%s' bssid %pM\n", - (char *) req_ssid.ssid, bss->bssid); + mwifiex_dbg(priv->adapter, MSG, + "info: trying to associate to '%s' bssid %pM\n", + (char *)req_ssid.ssid, bss->bssid); memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); break; } @@ -2041,26 +2072,29 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, int ret; if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) { - wiphy_err(wiphy, - "%s: reject infra assoc request in non-STA role\n", - dev->name); + mwifiex_dbg(adapter, ERROR, + "%s: reject infra assoc request in non-STA role\n", + dev->name); return -EINVAL; } if (priv->wdev.current_bss) { - wiphy_warn(wiphy, "%s: already connected\n", dev->name); + mwifiex_dbg(adapter, ERROR, + "%s: already connected\n", dev->name); return -EALREADY; } if (adapter->surprise_removed || adapter->is_cmd_timedout) { - wiphy_err(wiphy, - "%s: Ignore connection. Card removed or FW in bad state\n", - dev->name); + mwifiex_dbg(adapter, ERROR, + "%s: Ignore connection.\t" + "Card removed or FW in bad state\n", + dev->name); return -EFAULT; } - wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n", - (char *) sme->ssid, sme->bssid); + mwifiex_dbg(adapter, INFO, + "info: Trying to associate to %s and bssid %pM\n", + (char *)sme->ssid, sme->bssid); ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, priv->bss_mode, sme->channel, sme, 0); @@ -2068,17 +2102,17 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0, NULL, 0, WLAN_STATUS_SUCCESS, GFP_KERNEL); - dev_dbg(priv->adapter->dev, - "info: associated to bssid %pM successfully\n", - priv->cfg_bssid); + mwifiex_dbg(priv->adapter, MSG, + "info: associated to bssid %pM successfully\n", + priv->cfg_bssid); if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && priv->adapter->auto_tdls && priv->bss_type == MWIFIEX_BSS_TYPE_STA) mwifiex_setup_auto_tdls_timer(priv); } else { - dev_dbg(priv->adapter->dev, - "info: association to bssid %pM failed\n", - priv->cfg_bssid); + mwifiex_dbg(priv->adapter, ERROR, + "info: association to bssid %pM failed\n", + priv->cfg_bssid); eth_zero_addr(priv->cfg_bssid); if (ret > 0) @@ -2105,7 +2139,6 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, static int mwifiex_set_ibss_params(struct mwifiex_private *priv, struct cfg80211_ibss_params *params) { - struct wiphy *wiphy = priv->wdev.wiphy; struct mwifiex_adapter *adapter = priv->adapter; int index = 0, i; u8 config_bands = 0; @@ -2162,8 +2195,10 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv, priv->adhoc_channel = ieee80211_frequency_to_channel( params->chandef.chan->center_freq); - wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n", - config_bands, priv->adhoc_channel, adapter->sec_chan_offset); + mwifiex_dbg(adapter, INFO, + "info: set ibss band %d, chan %d, chan offset %d\n", + config_bands, priv->adhoc_channel, + adapter->sec_chan_offset); return 0; } @@ -2182,13 +2217,15 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, int ret = 0; if (priv->bss_mode != NL80211_IFTYPE_ADHOC) { - wiphy_err(wiphy, "request to join ibss received " - "when station is not in ibss mode\n"); + mwifiex_dbg(priv->adapter, ERROR, + "request to join ibss received\t" + "when station is not in ibss mode\n"); goto done; } - wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n", - (char *) params->ssid, params->bssid); + mwifiex_dbg(priv->adapter, MSG, + "info: trying to join to %s and bssid %pM\n", + (char *)params->ssid, params->bssid); mwifiex_set_ibss_params(priv, params); @@ -2200,12 +2237,12 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, if (!ret) { cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, params->chandef.chan, GFP_KERNEL); - dev_dbg(priv->adapter->dev, - "info: joined/created adhoc network with bssid" - " %pM successfully\n", priv->cfg_bssid); + mwifiex_dbg(priv->adapter, MSG, + "info: joined/created adhoc network with bssid\t" + "%pM successfully\n", priv->cfg_bssid); } else { - dev_dbg(priv->adapter->dev, - "info: failed creating/joining adhoc network\n"); + mwifiex_dbg(priv->adapter, ERROR, + "info: failed creating/joining adhoc network\n"); } return ret; @@ -2222,8 +2259,8 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n", - priv->cfg_bssid); + mwifiex_dbg(priv->adapter, MSG, "info: disconnecting from essid %pM\n", + priv->cfg_bssid); if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; @@ -2250,13 +2287,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct ieee_types_header *ie; struct mwifiex_user_scan_cfg *user_scan_cfg; - wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); + mwifiex_dbg(priv->adapter, CMD, + "info: received scan request on %s\n", dev->name); /* Block scan request if scan operation or scan cleanup when interface * is disabled is in process */ if (priv->scan_request || priv->scan_aborting) { - dev_err(priv->adapter->dev, "cmd: Scan already in process..\n"); + mwifiex_dbg(priv->adapter, WARN, + "cmd: Scan already in process..\n"); return -EBUSY; } @@ -2308,7 +2347,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, ret = mwifiex_scan_networks(priv, user_scan_cfg); kfree(user_scan_cfg); if (ret) { - dev_err(priv->adapter->dev, "scan failed: %d\n", ret); + mwifiex_dbg(priv->adapter, ERROR, + "scan failed: %d\n", ret); priv->scan_aborting = false; priv->scan_request = NULL; return ret; @@ -2454,15 +2494,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_ADHOC: if (adapter->curr_iface_comb.sta_intf == adapter->iface_limit.sta_intf) { - wiphy_err(wiphy, - "cannot create multiple sta/adhoc ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple sta/adhoc ifaces\n"); return ERR_PTR(-EINVAL); } priv = mwifiex_get_unused_priv(adapter); if (!priv) { - wiphy_err(wiphy, - "could not get free private struct\n"); + mwifiex_dbg(adapter, ERROR, + "could not get free private struct\n"); return ERR_PTR(-EFAULT); } @@ -2484,15 +2524,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_AP: if (adapter->curr_iface_comb.uap_intf == adapter->iface_limit.uap_intf) { - wiphy_err(wiphy, - "cannot create multiple AP ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple AP ifaces\n"); return ERR_PTR(-EINVAL); } priv = mwifiex_get_unused_priv(adapter); if (!priv) { - wiphy_err(wiphy, - "could not get free private struct\n"); + mwifiex_dbg(adapter, ERROR, + "could not get free private struct\n"); return ERR_PTR(-EFAULT); } @@ -2511,15 +2551,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_P2P_CLIENT: if (adapter->curr_iface_comb.p2p_intf == adapter->iface_limit.p2p_intf) { - wiphy_err(wiphy, - "cannot create multiple P2P ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple P2P ifaces\n"); return ERR_PTR(-EINVAL); } priv = mwifiex_get_unused_priv(adapter); if (!priv) { - wiphy_err(wiphy, - "could not get free private struct\n"); + mwifiex_dbg(adapter, ERROR, + "could not get free private struct\n"); return ERR_PTR(-EFAULT); } @@ -2550,7 +2590,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, break; default: - wiphy_err(wiphy, "type not supported\n"); + mwifiex_dbg(adapter, ERROR, "type not supported\n"); return ERR_PTR(-EINVAL); } @@ -2558,7 +2598,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, name_assign_type, ether_setup, IEEE80211_NUM_ACS, 1); if (!dev) { - wiphy_err(wiphy, "no memory available for netdevice\n"); + mwifiex_dbg(adapter, ERROR, + "no memory available for netdevice\n"); memset(&priv->wdev, 0, sizeof(priv->wdev)); priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; @@ -2599,7 +2640,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, /* Register network device */ if (register_netdevice(dev)) { - wiphy_err(wiphy, "cannot register virtual network device\n"); + mwifiex_dbg(adapter, ERROR, + "cannot register virtual network device\n"); free_netdev(dev); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; priv->netdev = NULL; @@ -2613,7 +2655,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, WQ_MEM_RECLAIM | WQ_UNBOUND, 1, name); if (!priv->dfs_cac_workqueue) { - wiphy_err(wiphy, "cannot register virtual network device\n"); + mwifiex_dbg(adapter, ERROR, + "cannot register virtual network device\n"); free_netdev(dev); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; priv->netdev = NULL; @@ -2628,7 +2671,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1, name); if (!priv->dfs_chan_sw_workqueue) { - wiphy_err(wiphy, "cannot register virtual network device\n"); + mwifiex_dbg(adapter, ERROR, + "cannot register virtual network device\n"); free_netdev(dev); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; priv->netdev = NULL; @@ -2642,7 +2686,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, sema_init(&priv->async_sem, 1); - dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name); + mwifiex_dbg(adapter, INFO, + "info: %s: Marvell 802.11 Adapter\n", dev->name); #ifdef CONFIG_DEBUG_FS mwifiex_dev_debugfs_init(priv); @@ -2661,7 +2706,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, adapter->curr_iface_comb.p2p_intf++; break; default: - wiphy_err(wiphy, "type not supported\n"); + mwifiex_dbg(adapter, ERROR, "type not supported\n"); return ERR_PTR(-EINVAL); } @@ -2721,7 +2766,8 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) adapter->curr_iface_comb.p2p_intf++; break; default: - dev_err(adapter->dev, "del_virtual_intf: type not supported\n"); + mwifiex_dbg(adapter, ERROR, + "del_virtual_intf: type not supported\n"); break; } @@ -2839,7 +2885,8 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv, if (!mwifiex_is_pattern_supported(&wowlan->patterns[i], byte_seq, MWIFIEX_MEF_MAX_BYTESEQ)) { - dev_err(priv->adapter->dev, "Pattern not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Pattern not supported\n"); kfree(mef_entry); return -EOPNOTSUPP; } @@ -2954,21 +3001,22 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, mwifiex_cancel_all_pending_cmd(adapter); if (!wowlan) { - dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n"); + mwifiex_dbg(adapter, ERROR, + "None of the WOWLAN triggers enabled\n"); return 0; } priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); if (!priv->media_connected) { - dev_warn(adapter->dev, - "Can not configure WOWLAN in disconnected state\n"); + mwifiex_dbg(adapter, ERROR, + "Can not configure WOWLAN in disconnected state\n"); return 0; } ret = mwifiex_set_mef_filter(priv, wowlan); if (ret) { - dev_err(adapter->dev, "Failed to set MEF filter\n"); + mwifiex_dbg(adapter, ERROR, "Failed to set MEF filter\n"); return ret; } @@ -2981,7 +3029,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD, &hs_cfg); if (ret) { - dev_err(adapter->dev, "Failed to set HS params\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to set HS params\n"); return ret; } } @@ -3041,7 +3090,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv, if (!mwifiex_is_pattern_supported(&crule->patterns[i], byte_seq, MWIFIEX_COALESCE_MAX_BYTESEQ)) { - dev_err(priv->adapter->dev, "Pattern not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Pattern not supported\n"); return -EOPNOTSUPP; } @@ -3050,8 +3100,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv, pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq); if (pkt_type && mrule->pkt_type) { - dev_err(priv->adapter->dev, - "Multiple packet types not allowed\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Multiple packet types not allowed\n"); return -EOPNOTSUPP; } else if (pkt_type) { mrule->pkt_type = pkt_type; @@ -3074,8 +3124,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv, } if (!mrule->pkt_type) { - dev_err(priv->adapter->dev, - "Packet type can not be determined\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Packet type can not be determined\n"); return -EOPNOTSUPP; } @@ -3093,8 +3143,8 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy, memset(&coalesce_cfg, 0, sizeof(coalesce_cfg)); if (!coalesce) { - dev_dbg(adapter->dev, - "Disable coalesce and reset all previous rules\n"); + mwifiex_dbg(adapter, WARN, + "Disable coalesce and reset all previous rules\n"); return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG, HostCmd_ACT_GEN_SET, 0, &coalesce_cfg, true); @@ -3105,8 +3155,8 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy, ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i], &coalesce_cfg.rule[i]); if (ret) { - dev_err(priv->adapter->dev, - "Recheck the patterns provided for rule %d\n", + mwifiex_dbg(adapter, ERROR, + "Recheck the patterns provided for rule %d\n", i + 1); return ret; } @@ -3138,9 +3188,9 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: - dev_dbg(priv->adapter->dev, - "Send TDLS Setup Request to %pM status_code=%d\n", peer, - status_code); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Setup Request to %pM status_code=%d\n", + peer, status_code); mwifiex_add_auto_tdls_peer(priv, peer); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, @@ -3148,45 +3198,45 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, break; case WLAN_TDLS_SETUP_RESPONSE: mwifiex_add_auto_tdls_peer(priv, peer); - dev_dbg(priv->adapter->dev, - "Send TDLS Setup Response to %pM status_code=%d\n", - peer, status_code); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Setup Response to %pM status_code=%d\n", + peer, status_code); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; case WLAN_TDLS_SETUP_CONFIRM: - dev_dbg(priv->adapter->dev, - "Send TDLS Confirm to %pM status_code=%d\n", peer, - status_code); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Confirm to %pM status_code=%d\n", peer, + status_code); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; case WLAN_TDLS_TEARDOWN: - dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n", - peer); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Tear down to %pM\n", peer); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; case WLAN_TDLS_DISCOVERY_REQUEST: - dev_dbg(priv->adapter->dev, - "Send TDLS Discovery Request to %pM\n", peer); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Discovery Request to %pM\n", peer); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: - dev_dbg(priv->adapter->dev, - "Send TDLS Discovery Response to %pM\n", peer); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Discovery Response to %pM\n", peer); ret = mwifiex_send_tdls_action_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; default: - dev_warn(priv->adapter->dev, - "Unknown TDLS mgmt/action frame %pM\n", peer); + mwifiex_dbg(priv->adapter, ERROR, + "Unknown TDLS mgmt/action frame %pM\n", peer); ret = -EINVAL; break; } @@ -3208,8 +3258,8 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected)) return -ENOTSUPP; - dev_dbg(priv->adapter->dev, - "TDLS peer=%pM, oper=%d\n", peer, action); + mwifiex_dbg(priv->adapter, MSG, + "TDLS peer=%pM, oper=%d\n", peer, action); switch (action) { case NL80211_TDLS_ENABLE_LINK: @@ -3220,22 +3270,22 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, break; case NL80211_TDLS_TEARDOWN: /* shouldn't happen!*/ - dev_warn(priv->adapter->dev, - "tdls_oper: teardown from driver not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "tdls_oper: teardown from driver not supported\n"); return -EINVAL; case NL80211_TDLS_SETUP: /* shouldn't happen!*/ - dev_warn(priv->adapter->dev, - "tdls_oper: setup from driver not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "tdls_oper: setup from driver not supported\n"); return -EINVAL; case NL80211_TDLS_DISCOVERY_REQ: /* shouldn't happen!*/ - dev_warn(priv->adapter->dev, - "tdls_oper: discovery from driver not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "tdls_oper: discovery from driver not supported\n"); return -EINVAL; default: - dev_err(priv->adapter->dev, - "tdls_oper: operation not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "tdls_oper: operation not supported\n"); return -ENOTSUPP; } @@ -3268,8 +3318,8 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (priv->adapter->scan_processing) { - dev_err(priv->adapter->dev, - "radar detection: scan in process...\n"); + mwifiex_dbg(priv->adapter, ERROR, + "radar detection: scan in process...\n"); return -EBUSY; } @@ -3284,8 +3334,8 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, params->beacon_csa.tail, params->beacon_csa.tail_len); if (!chsw_ie) { - dev_err(priv->adapter->dev, - "Could not parse channel switch announcement IE\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Could not parse channel switch announcement IE\n"); return -EINVAL; } @@ -3297,10 +3347,12 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, } if (mwifiex_del_mgmt_ies(priv)) - wiphy_err(wiphy, "Failed to delete mgmt IEs!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to delete mgmt IEs!\n"); if (mwifiex_set_mgmt_ies(priv, ¶ms->beacon_csa)) { - wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: setting mgmt ies failed\n", __func__); return -EFAULT; } @@ -3324,16 +3376,17 @@ mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy, struct mwifiex_radar_params radar_params; if (priv->adapter->scan_processing) { - dev_err(priv->adapter->dev, - "radar detection: scan already in process...\n"); + mwifiex_dbg(priv->adapter, ERROR, + "radar detection: scan already in process...\n"); return -EBUSY; } if (!mwifiex_is_11h_active(priv)) { - dev_dbg(priv->adapter->dev, "Enable 11h extensions in FW\n"); + mwifiex_dbg(priv->adapter, INFO, + "Enable 11h extensions in FW\n"); if (mwifiex_11h_activate(priv, true)) { - dev_err(priv->adapter->dev, - "Failed to activate 11h extensions!!"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to activate 11h extensions!!"); return -1; } priv->state_11h.is_11h_active = true; @@ -3492,7 +3545,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy = wiphy_new(&mwifiex_cfg80211_ops, sizeof(struct mwifiex_adapter *)); if (!wiphy) { - dev_err(adapter->dev, "%s: creating new wiphy\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: creating new wiphy\n", __func__); return -ENOMEM; } wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH; @@ -3563,20 +3617,22 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) ret = wiphy_register(wiphy); if (ret < 0) { - dev_err(adapter->dev, - "%s: wiphy_register failed: %d\n", __func__, ret); + mwifiex_dbg(adapter, ERROR, + "%s: wiphy_register failed: %d\n", __func__, ret); wiphy_free(wiphy); return ret; } if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) { - wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2); + mwifiex_dbg(adapter, INFO, + "driver hint alpha2: %2.2s\n", reg_alpha2); regulatory_hint(wiphy, reg_alpha2); } else { country_code = mwifiex_11d_code_2_region(adapter->region_code); if (country_code) - wiphy_info(wiphy, "ignoring F/W country code %2.2s\n", - country_code); + mwifiex_dbg(adapter, WARN, + "ignoring F/W country code %2.2s\n", + country_code); } mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB, diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c index e9df8826f124..3ddb8ec676ed 100644 --- a/drivers/net/wireless/mwifiex/cfp.c +++ b/drivers/net/wireless/mwifiex/cfp.c @@ -327,8 +327,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq) sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ]; if (!sband) { - dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d\n", - __func__, band); + mwifiex_dbg(priv->adapter, ERROR, + "%s: cannot find cfp by band %d\n", + __func__, band); return cfp; } @@ -349,9 +350,10 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq) } } if (i == sband->n_channels) { - dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d" - " & channel=%d freq=%d\n", __func__, band, channel, - freq); + mwifiex_dbg(priv->adapter, ERROR, + "%s: cannot find cfp by band %d\t" + "& channel=%d freq=%d\n", + __func__, band, channel, freq); } else { if (!ch) return cfp; @@ -431,15 +433,17 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { switch (adapter->config_bands) { case BAND_B: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_b\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_b\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_b, sizeof(supported_rates_b)); break; case BAND_G: case BAND_G | BAND_GN: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_g\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_g\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_g, sizeof(supported_rates_g)); break; @@ -449,15 +453,17 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN: case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC: case BAND_B | BAND_G | BAND_GN: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_bg\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_bg\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_bg, sizeof(supported_rates_bg)); break; case BAND_A: case BAND_A | BAND_G: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_a\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_a\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_a, sizeof(supported_rates_a)); break; @@ -466,14 +472,16 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) case BAND_A | BAND_AN | BAND_AAC: case BAND_A | BAND_G | BAND_AN | BAND_GN: case BAND_A | BAND_G | BAND_AN | BAND_GN | BAND_AAC: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_a\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_a\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_a, sizeof(supported_rates_a)); break; case BAND_GN: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_n\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_n\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_n, sizeof(supported_rates_n)); break; @@ -482,25 +490,25 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) /* Ad-hoc mode */ switch (adapter->adhoc_start_band) { case BAND_B: - dev_dbg(adapter->dev, "info: adhoc B\n"); + mwifiex_dbg(adapter, INFO, "info: adhoc B\n"); k = mwifiex_copy_rates(rates, k, adhoc_rates_b, sizeof(adhoc_rates_b)); break; case BAND_G: case BAND_G | BAND_GN: - dev_dbg(adapter->dev, "info: adhoc G only\n"); + mwifiex_dbg(adapter, INFO, "info: adhoc G only\n"); k = mwifiex_copy_rates(rates, k, adhoc_rates_g, sizeof(adhoc_rates_g)); break; case BAND_B | BAND_G: case BAND_B | BAND_G | BAND_GN: - dev_dbg(adapter->dev, "info: adhoc BG\n"); + mwifiex_dbg(adapter, INFO, "info: adhoc BG\n"); k = mwifiex_copy_rates(rates, k, adhoc_rates_bg, sizeof(adhoc_rates_bg)); break; case BAND_A: case BAND_A | BAND_AN: - dev_dbg(adapter->dev, "info: adhoc A\n"); + mwifiex_dbg(adapter, INFO, "info: adhoc A\n"); k = mwifiex_copy_rates(rates, k, adhoc_rates_a, sizeof(adhoc_rates_a)); break; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index c5a14ff7eb82..a1de83fd1dbe 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -62,7 +62,8 @@ mwifiex_get_cmd_node(struct mwifiex_adapter *adapter) spin_lock_irqsave(&adapter->cmd_free_q_lock, flags); if (list_empty(&adapter->cmd_free_q)) { - dev_err(adapter->dev, "GET_CMD_NODE: cmd node not available\n"); + mwifiex_dbg(adapter, ERROR, + "GET_CMD_NODE: cmd node not available\n"); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); return NULL; } @@ -116,7 +117,8 @@ static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv, { /* Copy the HOST command to command buffer */ memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len); - dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len); + mwifiex_dbg(priv->adapter, CMD, + "cmd: host cmd size = %d\n", pcmd_ptr->len); return 0; } @@ -147,8 +149,9 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, /* Sanity test */ if (host_cmd == NULL || host_cmd->size == 0) { - dev_err(adapter->dev, "DNLD_CMD: host_cmd is null" - " or cmd size is 0, not sending\n"); + mwifiex_dbg(adapter, ERROR, + "DNLD_CMD: host_cmd is null\t" + "or cmd size is 0, not sending\n"); if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, cmd_node); @@ -161,8 +164,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET && cmd_code != HostCmd_CMD_FUNC_SHUTDOWN && cmd_code != HostCmd_CMD_FUNC_INIT) { - dev_err(adapter->dev, - "DNLD_CMD: FW in reset state, ignore cmd %#x\n", + mwifiex_dbg(adapter, ERROR, + "DNLD_CMD: FW in reset state, ignore cmd %#x\n", cmd_code); if (cmd_node->wait_q_enabled) mwifiex_complete_cmd(adapter, cmd_node); @@ -197,10 +200,12 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, */ skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len); - dev_dbg(adapter->dev, - "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code, - le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size, - le16_to_cpu(host_cmd->seq_num)); + mwifiex_dbg(adapter, CMD, + "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", + cmd_code, + le16_to_cpu(*(__le16 *)((u8 *)host_cmd + S_DS_GEN)), + cmd_size, le16_to_cpu(host_cmd->seq_num)); + mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size); if (adapter->iface_type == MWIFIEX_USB) { tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD); @@ -221,7 +226,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, } if (ret == -1) { - dev_err(adapter->dev, "DNLD_CMD: host to card failed\n"); + mwifiex_dbg(adapter, ERROR, + "DNLD_CMD: host to card failed\n"); if (adapter->iface_type == MWIFIEX_USB) adapter->cmd_sent = false; if (cmd_node->wait_q_enabled) @@ -280,12 +286,14 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) (adapter->seq_num, priv->bss_num, priv->bss_type))); - dev_dbg(adapter->dev, - "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", + mwifiex_dbg(adapter, CMD, + "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", le16_to_cpu(sleep_cfm_buf->command), le16_to_cpu(sleep_cfm_buf->action), le16_to_cpu(sleep_cfm_buf->size), le16_to_cpu(sleep_cfm_buf->seq_num)); + mwifiex_dbg_dump(adapter, CMD_D, "SLEEP_CFM buffer: ", sleep_cfm_buf, + le16_to_cpu(sleep_cfm_buf->size)); if (adapter->iface_type == MWIFIEX_USB) { sleep_cfm_tmp = @@ -311,7 +319,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) } if (ret == -1) { - dev_err(adapter->dev, "SLEEP_CFM: failed\n"); + mwifiex_dbg(adapter, ERROR, "SLEEP_CFM: failed\n"); adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++; return -1; } @@ -362,8 +370,9 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter) for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER); if (!cmd_array[i].skb) { - dev_err(adapter->dev, "ALLOC_CMD_BUF: out of memory\n"); - return -1; + mwifiex_dbg(adapter, ERROR, + "unable to allocate command buffer\n"); + return -ENOMEM; } } @@ -386,7 +395,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) /* Need to check if cmd pool is allocated or not */ if (!adapter->cmd_pool) { - dev_dbg(adapter->dev, "info: FREE_CMD_BUF: cmd_pool is null\n"); + mwifiex_dbg(adapter, FATAL, + "info: FREE_CMD_BUF: cmd_pool is null\n"); return 0; } @@ -395,7 +405,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) /* Release shared memory buffers */ for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { if (cmd_array[i].skb) { - dev_dbg(adapter->dev, "cmd: free cmd buffer %d\n", i); + mwifiex_dbg(adapter, CMD, + "cmd: free cmd buffer %d\n", i); dev_kfree_skb_any(cmd_array[i].skb); } if (!cmd_array[i].resp_skb) @@ -409,7 +420,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) } /* Release struct cmd_ctrl_node */ if (adapter->cmd_pool) { - dev_dbg(adapter->dev, "cmd: free cmd pool\n"); + mwifiex_dbg(adapter, CMD, + "cmd: free cmd pool\n"); kfree(adapter->cmd_pool); adapter->cmd_pool = NULL; } @@ -459,7 +471,8 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter) rx_info->bss_type = priv->bss_type; } - dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause); + mwifiex_dbg(adapter, EVENT, "EVENT: cause: %#x\n", eventcause); + mwifiex_dbg_dump(adapter, EVT_D, "Event Buf:", skb->data, skb->len); if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) ret = mwifiex_process_uap_event(priv); @@ -498,28 +511,33 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, } if (adapter->is_suspended) { - dev_err(adapter->dev, "PREP_CMD: device in suspended state\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: device in suspended state\n"); return -1; } if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) { - dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: host entering sleep state\n"); return -1; } if (adapter->surprise_removed) { - dev_err(adapter->dev, "PREP_CMD: card is removed\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: card is removed\n"); return -1; } if (adapter->is_cmd_timedout) { - dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: FW is in bad state\n"); return -1; } if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) { if (cmd_no != HostCmd_CMD_FUNC_INIT) { - dev_err(adapter->dev, "PREP_CMD: FW in reset state\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: FW in reset state\n"); return -1; } } @@ -528,7 +546,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, cmd_node = mwifiex_get_cmd_node(adapter); if (!cmd_node) { - dev_err(adapter->dev, "PREP_CMD: no free cmd node\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: no free cmd node\n"); return -1; } @@ -536,7 +555,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync); if (!cmd_node->cmd_skb) { - dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: no free cmd buf\n"); return -1; } @@ -571,7 +591,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, /* Return error, since the command preparation failed */ if (ret) { - dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n", + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: cmd %#x preparation failed\n", cmd_no); mwifiex_insert_cmd_to_free_q(adapter, cmd_node); return -1; @@ -626,7 +647,8 @@ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, mwifiex_insert_cmd_to_free_q(adapter, cmd_node); atomic_dec(&adapter->cmd_pending); - dev_dbg(adapter->dev, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n", + mwifiex_dbg(adapter, CMD, + "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n", le16_to_cpu(host_cmd->command), atomic_read(&adapter->cmd_pending)); } @@ -648,7 +670,7 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); if (!host_cmd) { - dev_err(adapter->dev, "QUEUE_CMD: host_cmd is NULL\n"); + mwifiex_dbg(adapter, ERROR, "QUEUE_CMD: host_cmd is NULL\n"); return; } @@ -673,7 +695,8 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); atomic_inc(&adapter->cmd_pending); - dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n", + mwifiex_dbg(adapter, CMD, + "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n", command, atomic_read(&adapter->cmd_pending)); } @@ -699,7 +722,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter) /* Check if already in processing */ if (adapter->curr_cmd) { - dev_err(adapter->dev, "EXEC_NEXT_CMD: cmd in processing\n"); + mwifiex_dbg(adapter, FATAL, + "EXEC_NEXT_CMD: cmd in processing\n"); return -1; } @@ -721,8 +745,9 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter) priv = cmd_node->priv; if (adapter->ps_state != PS_STATE_AWAKE) { - dev_err(adapter->dev, "%s: cannot send cmd in sleep state," - " this should not happen\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: cannot send cmd in sleep state,\t" + "this should not happen\n", __func__); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); return ret; } @@ -772,8 +797,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) { resp = (struct host_cmd_ds_command *) adapter->upld_buf; - dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n", - le16_to_cpu(resp->command)); + mwifiex_dbg(adapter, ERROR, + "CMD_RESP: NULL curr_cmd, %#x\n", + le16_to_cpu(resp->command)); return -1; } @@ -781,8 +807,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data; if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) { - dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n", - le16_to_cpu(resp->command)); + mwifiex_dbg(adapter, ERROR, + "CMD_RESP: %#x been canceled\n", + le16_to_cpu(resp->command)); mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; @@ -794,7 +821,8 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) /* Copy original response back to response buffer */ struct mwifiex_ds_misc_cmd *hostcmd; uint16_t size = le16_to_cpu(resp->size); - dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size); + mwifiex_dbg(adapter, INFO, + "info: host cmd resp size = %d\n", size); size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER); if (adapter->curr_cmd->data_buf) { hostcmd = adapter->curr_cmd->data_buf; @@ -822,13 +850,15 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] = orig_cmdresp_no; - dev_dbg(adapter->dev, - "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", - orig_cmdresp_no, cmdresp_result, - le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num)); + mwifiex_dbg(adapter, CMD, + "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", + orig_cmdresp_no, cmdresp_result, + le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num)); + mwifiex_dbg_dump(adapter, CMD_D, "CMD_RESP buffer:", resp, + le16_to_cpu(resp->size)); if (!(orig_cmdresp_no & HostCmd_RET_BIT)) { - dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n"); + mwifiex_dbg(adapter, ERROR, "CMD_RESP: invalid cmd resp\n"); if (adapter->curr_cmd->wait_q_enabled) adapter->cmd_wait_q.status = -1; @@ -852,8 +882,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) /* Check init command response */ if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) { if (ret) { - dev_err(adapter->dev, "%s: cmd %#x failed during " - "initialization\n", __func__, cmdresp_no); + mwifiex_dbg(adapter, ERROR, + "%s: cmd %#x failed during\t" + "initialization\n", __func__, cmdresp_no); mwifiex_init_fw_complete(adapter); return -1; } else if (adapter->last_init_cmd == cmdresp_no) @@ -888,7 +919,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context) adapter->is_cmd_timedout = 1; if (!adapter->curr_cmd) { - dev_dbg(adapter->dev, "cmd: empty curr_cmd\n"); + mwifiex_dbg(adapter, ERROR, + "cmd: empty curr_cmd\n"); return; } cmd_node = adapter->curr_cmd; @@ -897,47 +929,60 @@ mwifiex_cmd_timeout_func(unsigned long function_context) adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; adapter->dbg.timeout_cmd_act = adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index]; - dev_err(adapter->dev, - "%s: Timeout cmd id = %#x, act = %#x\n", __func__, - adapter->dbg.timeout_cmd_id, - adapter->dbg.timeout_cmd_act); - - dev_err(adapter->dev, "num_data_h2c_failure = %d\n", - adapter->dbg.num_tx_host_to_card_failure); - dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n", - adapter->dbg.num_cmd_host_to_card_failure); - - dev_err(adapter->dev, "is_cmd_timedout = %d\n", - adapter->is_cmd_timedout); - dev_err(adapter->dev, "num_tx_timeout = %d\n", - adapter->dbg.num_tx_timeout); - - dev_err(adapter->dev, "last_cmd_index = %d\n", - adapter->dbg.last_cmd_index); - dev_err(adapter->dev, "last_cmd_id: %*ph\n", - (int)sizeof(adapter->dbg.last_cmd_id), - adapter->dbg.last_cmd_id); - dev_err(adapter->dev, "last_cmd_act: %*ph\n", - (int)sizeof(adapter->dbg.last_cmd_act), - adapter->dbg.last_cmd_act); - - dev_err(adapter->dev, "last_cmd_resp_index = %d\n", - adapter->dbg.last_cmd_resp_index); - dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n", - (int)sizeof(adapter->dbg.last_cmd_resp_id), - adapter->dbg.last_cmd_resp_id); - - dev_err(adapter->dev, "last_event_index = %d\n", - adapter->dbg.last_event_index); - dev_err(adapter->dev, "last_event: %*ph\n", - (int)sizeof(adapter->dbg.last_event), - adapter->dbg.last_event); - - dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n", - adapter->data_sent, adapter->cmd_sent); - - dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", - adapter->ps_mode, adapter->ps_state); + mwifiex_dbg(adapter, MSG, + "%s: Timeout cmd id = %#x, act = %#x\n", __func__, + adapter->dbg.timeout_cmd_id, + adapter->dbg.timeout_cmd_act); + + mwifiex_dbg(adapter, MSG, + "num_data_h2c_failure = %d\n", + adapter->dbg.num_tx_host_to_card_failure); + mwifiex_dbg(adapter, MSG, + "num_cmd_h2c_failure = %d\n", + adapter->dbg.num_cmd_host_to_card_failure); + + mwifiex_dbg(adapter, MSG, + "is_cmd_timedout = %d\n", + adapter->is_cmd_timedout); + mwifiex_dbg(adapter, MSG, + "num_tx_timeout = %d\n", + adapter->dbg.num_tx_timeout); + + mwifiex_dbg(adapter, MSG, + "last_cmd_index = %d\n", + adapter->dbg.last_cmd_index); + mwifiex_dbg(adapter, MSG, + "last_cmd_id: %*ph\n", + (int)sizeof(adapter->dbg.last_cmd_id), + adapter->dbg.last_cmd_id); + mwifiex_dbg(adapter, MSG, + "last_cmd_act: %*ph\n", + (int)sizeof(adapter->dbg.last_cmd_act), + adapter->dbg.last_cmd_act); + + mwifiex_dbg(adapter, MSG, + "last_cmd_resp_index = %d\n", + adapter->dbg.last_cmd_resp_index); + mwifiex_dbg(adapter, MSG, + "last_cmd_resp_id: %*ph\n", + (int)sizeof(adapter->dbg.last_cmd_resp_id), + adapter->dbg.last_cmd_resp_id); + + mwifiex_dbg(adapter, MSG, + "last_event_index = %d\n", + adapter->dbg.last_event_index); + mwifiex_dbg(adapter, MSG, + "last_event: %*ph\n", + (int)sizeof(adapter->dbg.last_event), + adapter->dbg.last_event); + + mwifiex_dbg(adapter, MSG, + "data_sent=%d cmd_sent=%d\n", + adapter->data_sent, adapter->cmd_sent); + + mwifiex_dbg(adapter, MSG, + "ps_mode=%d ps_state=%d\n", + adapter->ps_mode, adapter->ps_state); if (cmd_node->wait_q_enabled) { adapter->cmd_wait_q.status = -ETIMEDOUT; @@ -948,8 +993,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context) if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) mwifiex_init_fw_complete(adapter); - if (adapter->if_ops.fw_dump) - adapter->if_ops.fw_dump(adapter); + if (adapter->if_ops.device_dump) + adapter->if_ops.device_dump(adapter); if (adapter->if_ops.card_reset) adapter->if_ops.card_reset(adapter); @@ -1015,7 +1060,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) if (!priv) continue; if (priv->scan_request) { - dev_dbg(adapter->dev, "info: aborting scan\n"); + mwifiex_dbg(adapter, WARN, "info: aborting scan\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; } @@ -1075,7 +1120,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) if (!priv) continue; if (priv->scan_request) { - dev_dbg(adapter->dev, "info: aborting scan\n"); + mwifiex_dbg(adapter, WARN, "info: aborting scan\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; } @@ -1100,11 +1145,11 @@ mwifiex_check_ps_cond(struct mwifiex_adapter *adapter) !adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter)) mwifiex_dnld_sleep_confirm_cmd(adapter); else - dev_dbg(adapter->dev, - "cmd: Delay Sleep Confirm (%s%s%s)\n", - (adapter->cmd_sent) ? "D" : "", - (adapter->curr_cmd) ? "C" : "", - (IS_CARD_RX_RCVD(adapter)) ? "R" : ""); + mwifiex_dbg(adapter, CMD, + "cmd: Delay Sleep Confirm (%s%s%s)\n", + (adapter->cmd_sent) ? "D" : "", + (adapter->curr_cmd) ? "C" : "", + (IS_CARD_RX_RCVD(adapter)) ? "R" : ""); } /* @@ -1120,15 +1165,18 @@ mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated) priv->adapter->hs_activated = true; mwifiex_update_rxreor_flags(priv->adapter, RXREOR_FORCE_NO_DROP); - dev_dbg(priv->adapter->dev, "event: hs_activated\n"); + mwifiex_dbg(priv->adapter, EVENT, + "event: hs_activated\n"); priv->adapter->hs_activate_wait_q_woken = true; wake_up_interruptible( &priv->adapter->hs_activate_wait_q); } else { - dev_dbg(priv->adapter->dev, "event: HS not configured\n"); + mwifiex_dbg(priv->adapter, EVENT, + "event: HS not configured\n"); } } else { - dev_dbg(priv->adapter->dev, "event: hs_deactivated\n"); + mwifiex_dbg(priv->adapter, EVENT, + "event: hs_deactivated\n"); priv->adapter->hs_activated = false; } } @@ -1156,11 +1204,12 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, mwifiex_hs_activated_event(priv, true); return 0; } else { - dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply" - " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n", - resp->result, conditions, - phs_cfg->params.hs_config.gpio, - phs_cfg->params.hs_config.gap); + mwifiex_dbg(adapter, CMD, + "cmd: CMD_RESP: HS_CFG cmd reply\t" + " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n", + resp->result, conditions, + phs_cfg->params.hs_config.gpio, + phs_cfg->params.hs_config.gap); } if (conditions != HS_CFG_CANCEL) { adapter->is_hs_configured = true; @@ -1182,8 +1231,10 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, void mwifiex_process_hs_config(struct mwifiex_adapter *adapter) { - dev_dbg(adapter->dev, "info: %s: auto cancelling host sleep" - " since there is interrupt from the firmware\n", __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: auto cancelling host sleep\t" + "since there is interrupt from the firmware\n", + __func__); adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; @@ -1212,13 +1263,14 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter, uint16_t seq_num = le16_to_cpu(cmd->seq_num); if (!upld_len) { - dev_err(adapter->dev, "%s: cmd size is 0\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: cmd size is 0\n", __func__); return; } - dev_dbg(adapter->dev, - "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", - command, result, le16_to_cpu(cmd->size), seq_num); + mwifiex_dbg(adapter, CMD, + "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", + command, result, le16_to_cpu(cmd->size), seq_num); /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num), @@ -1232,15 +1284,16 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter, command &= HostCmd_CMD_ID_MASK; if (command != HostCmd_CMD_802_11_PS_MODE_ENH) { - dev_err(adapter->dev, - "%s: rcvd unexpected resp for cmd %#x, result = %x\n", - __func__, command, result); + mwifiex_dbg(adapter, ERROR, + "%s: rcvd unexpected resp for cmd %#x, result = %x\n", + __func__, command, result); return; } if (result) { - dev_err(adapter->dev, "%s: sleep confirm cmd failed\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: sleep confirm cmd failed\n", + __func__); adapter->pm_wakeup_card_req = false; adapter->ps_state = PS_STATE_AWAKE; return; @@ -1305,7 +1358,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv, sizeof(struct mwifiex_ie_types_header)); cmd_size += sizeof(*ps_tlv); tlv += sizeof(*ps_tlv); - dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n"); + mwifiex_dbg(priv->adapter, CMD, + "cmd: PS Command: Enter PS\n"); ps_mode->null_pkt_interval = cpu_to_le16(adapter->null_pkt_interval); ps_mode->multiple_dtims = @@ -1335,8 +1389,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv, tlv += sizeof(*auto_ds_tlv); if (auto_ds) idletime = auto_ds->idle_time; - dev_dbg(priv->adapter->dev, - "cmd: PS Command: Enter Auto Deep Sleep\n"); + mwifiex_dbg(priv->adapter, CMD, + "cmd: PS Command: Enter Auto Deep Sleep\n"); auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime); } cmd->size = cpu_to_le16(cmd_size); @@ -1363,27 +1417,31 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv, uint16_t auto_ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap); - dev_dbg(adapter->dev, - "info: %s: PS_MODE cmd reply result=%#x action=%#X\n", - __func__, resp->result, action); + mwifiex_dbg(adapter, INFO, + "info: %s: PS_MODE cmd reply result=%#x action=%#X\n", + __func__, resp->result, action); if (action == EN_AUTO_PS) { if (auto_ps_bitmap & BITMAP_AUTO_DS) { - dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n"); + mwifiex_dbg(adapter, CMD, + "cmd: Enabled auto deep sleep\n"); priv->adapter->is_deep_sleep = true; } if (auto_ps_bitmap & BITMAP_STA_PS) { - dev_dbg(adapter->dev, "cmd: Enabled STA power save\n"); + mwifiex_dbg(adapter, CMD, + "cmd: Enabled STA power save\n"); if (adapter->sleep_period.period) - dev_dbg(adapter->dev, - "cmd: set to uapsd/pps mode\n"); + mwifiex_dbg(adapter, CMD, + "cmd: set to uapsd/pps mode\n"); } } else if (action == DIS_AUTO_PS) { if (ps_bitmap & BITMAP_AUTO_DS) { priv->adapter->is_deep_sleep = false; - dev_dbg(adapter->dev, "cmd: Disabled auto deep sleep\n"); + mwifiex_dbg(adapter, CMD, + "cmd: Disabled auto deep sleep\n"); } if (ps_bitmap & BITMAP_STA_PS) { - dev_dbg(adapter->dev, "cmd: Disabled STA power save\n"); + mwifiex_dbg(adapter, CMD, + "cmd: Disabled STA power save\n"); if (adapter->sleep_period.period) { adapter->delay_null_pkt = false; adapter->tx_lock_flag = false; @@ -1396,7 +1454,8 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv, else adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM; - dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap); + mwifiex_dbg(adapter, CMD, + "cmd: ps_bitmap=%#x\n", ps_bitmap); if (pm_cfg) { /* This section is for get power save mode */ @@ -1533,29 +1592,29 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, api_rev->major_ver; adapter->key_api_minor_ver = api_rev->minor_ver; - dev_dbg(adapter->dev, - "key_api v%d.%d\n", - adapter->key_api_major_ver, - adapter->key_api_minor_ver); + mwifiex_dbg(adapter, INFO, + "key_api v%d.%d\n", + adapter->key_api_major_ver, + adapter->key_api_minor_ver); break; case FW_API_VER_ID: adapter->fw_api_ver = api_rev->major_ver; - dev_dbg(adapter->dev, - "Firmware api version %d\n", - adapter->fw_api_ver); + mwifiex_dbg(adapter, INFO, + "Firmware api version %d\n", + adapter->fw_api_ver); break; default: - dev_warn(adapter->dev, - "Unknown api_id: %d\n", - api_id); + mwifiex_dbg(adapter, FATAL, + "Unknown api_id: %d\n", + api_id); break; } break; default: - dev_warn(adapter->dev, - "Unknown GET_HW_SPEC TLV type: %#x\n", - le16_to_cpu(tlv->type)); + mwifiex_dbg(adapter, FATAL, + "Unknown GET_HW_SPEC TLV type: %#x\n", + le16_to_cpu(tlv->type)); break; } parsed_len += le16_to_cpu(tlv->len) + @@ -1565,14 +1624,16 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, } } - dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n", - adapter->fw_release_number); - dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n", - hw_spec->permanent_addr); - dev_dbg(adapter->dev, - "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n", - le16_to_cpu(hw_spec->hw_if_version), - le16_to_cpu(hw_spec->version)); + mwifiex_dbg(adapter, INFO, + "info: GET_HW_SPEC: fw_release_number- %#x\n", + adapter->fw_release_number); + mwifiex_dbg(adapter, INFO, + "info: GET_HW_SPEC: permanent addr: %pM\n", + hw_spec->permanent_addr); + mwifiex_dbg(adapter, INFO, + "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n", + le16_to_cpu(hw_spec->hw_if_version), + le16_to_cpu(hw_spec->version)); ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr); adapter->region_code = le16_to_cpu(hw_spec->region_code); @@ -1585,8 +1646,8 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, /* If it's unidentified region code, use the default (USA) */ if (i >= MWIFIEX_MAX_REGION_CODE) { adapter->region_code = 0x10; - dev_dbg(adapter->dev, - "cmd: unknown region code, use default (USA)\n"); + mwifiex_dbg(adapter, WARN, + "cmd: unknown region code, use default (USA)\n"); } adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap); diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c index 1fb329dc6744..5a0636d43a1b 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/mwifiex/debugfs.c @@ -152,24 +152,24 @@ mwifiex_info_read(struct file *file, char __user *ubuf, } /* - * Proc firmware dump read handler. + * Proc device dump read handler. * - * This function is called when the 'fw_dump' file is opened for + * This function is called when the 'device_dump' file is opened for * reading. - * This function dumps firmware memory in different files - * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for + * This function dumps driver information and firmware memory segments + * (ex. DTCM, ITCM, SQRAM etc.) for * debugging. */ static ssize_t -mwifiex_fw_dump_read(struct file *file, char __user *ubuf, - size_t count, loff_t *ppos) +mwifiex_device_dump_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) { struct mwifiex_private *priv = file->private_data; - if (!priv->adapter->if_ops.fw_dump) + if (!priv->adapter->if_ops.device_dump) return -EIO; - priv->adapter->if_ops.fw_dump(priv->adapter); + priv->adapter->if_ops.device_dump(priv->adapter); return 0; } @@ -535,6 +535,144 @@ mwifiex_regrdwr_read(struct file *file, char __user *ubuf, return ret; } +/* Proc debug_mask file read handler. + * This function is called when the 'debug_mask' file is opened for reading + * This function can be used read driver debugging mask value. + */ +static ssize_t +mwifiex_debug_mask_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct mwifiex_private *priv = + (struct mwifiex_private *)file->private_data; + unsigned long page = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)page; + size_t ret = 0; + int pos = 0; + + if (!buf) + return -ENOMEM; + + pos += snprintf(buf, PAGE_SIZE, "debug mask=0x%08x\n", + priv->adapter->debug_mask); + ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos); + + free_page(page); + return ret; +} + +/* Proc debug_mask file read handler. + * This function is called when the 'debug_mask' file is opened for reading + * This function can be used read driver debugging mask value. + */ +static ssize_t +mwifiex_debug_mask_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + int ret; + unsigned long debug_mask; + struct mwifiex_private *priv = (void *)file->private_data; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (void *)addr; + size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1)); + + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, ubuf, buf_size)) { + ret = -EFAULT; + goto done; + } + + if (kstrtoul(buf, 0, &debug_mask)) { + ret = -EINVAL; + goto done; + } + + priv->adapter->debug_mask = debug_mask; + ret = count; +done: + free_page(addr); + return ret; +} + +/* Proc memrw file write handler. + * This function is called when the 'memrw' file is opened for writing + * This function can be used to write to a memory location. + */ +static ssize_t +mwifiex_memrw_write(struct file *file, const char __user *ubuf, size_t count, + loff_t *ppos) +{ + int ret; + char cmd; + struct mwifiex_ds_mem_rw mem_rw; + u16 cmd_action; + struct mwifiex_private *priv = (void *)file->private_data; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (void *)addr; + size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1)); + + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, ubuf, buf_size)) { + ret = -EFAULT; + goto done; + } + + ret = sscanf(buf, "%c %x %x", &cmd, &mem_rw.addr, &mem_rw.value); + if (ret != 3) { + ret = -EINVAL; + goto done; + } + + if ((cmd == 'r') || (cmd == 'R')) { + cmd_action = HostCmd_ACT_GEN_GET; + mem_rw.value = 0; + } else if ((cmd == 'w') || (cmd == 'W')) { + cmd_action = HostCmd_ACT_GEN_SET; + } else { + ret = -EINVAL; + goto done; + } + + memcpy(&priv->mem_rw, &mem_rw, sizeof(mem_rw)); + if (mwifiex_send_cmd(priv, HostCmd_CMD_MEM_ACCESS, cmd_action, 0, + &mem_rw, true)) + ret = -1; + else + ret = count; + +done: + free_page(addr); + return ret; +} + +/* Proc memrw file read handler. + * This function is called when the 'memrw' file is opened for reading + * This function can be used to read from a memory location. + */ +static ssize_t +mwifiex_memrw_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct mwifiex_private *priv = (void *)file->private_data; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + int ret, pos = 0; + + if (!buf) + return -ENOMEM; + + pos += snprintf(buf, PAGE_SIZE, "0x%x 0x%x\n", priv->mem_rw.addr, + priv->mem_rw.value); + ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos); + + free_page(addr); + return ret; +} + static u32 saved_offset = -1, saved_bytes = -1; /* @@ -654,7 +792,8 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf, memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg)); if (arg_num > 3) { - dev_err(priv->adapter->dev, "Too many arguments\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Too many arguments\n"); ret = -EINVAL; goto done; } @@ -746,11 +885,13 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \ MWIFIEX_DFS_FILE_READ_OPS(info); MWIFIEX_DFS_FILE_READ_OPS(debug); MWIFIEX_DFS_FILE_READ_OPS(getlog); -MWIFIEX_DFS_FILE_READ_OPS(fw_dump); +MWIFIEX_DFS_FILE_READ_OPS(device_dump); MWIFIEX_DFS_FILE_OPS(regrdwr); MWIFIEX_DFS_FILE_OPS(rdeeprom); +MWIFIEX_DFS_FILE_OPS(memrw); MWIFIEX_DFS_FILE_OPS(hscfg); MWIFIEX_DFS_FILE_OPS(histogram); +MWIFIEX_DFS_FILE_OPS(debug_mask); /* * This function creates the debug FS directory structure and the files. @@ -772,9 +913,11 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) MWIFIEX_DFS_ADD_FILE(getlog); MWIFIEX_DFS_ADD_FILE(regrdwr); MWIFIEX_DFS_ADD_FILE(rdeeprom); - MWIFIEX_DFS_ADD_FILE(fw_dump); + MWIFIEX_DFS_ADD_FILE(device_dump); + MWIFIEX_DFS_ADD_FILE(memrw); MWIFIEX_DFS_ADD_FILE(hscfg); MWIFIEX_DFS_ADD_FILE(histogram); + MWIFIEX_DFS_ADD_FILE(debug_mask); } /* diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c index 65d8d6d4b6ba..58400c69ab26 100644 --- a/drivers/net/wireless/mwifiex/ethtool.c +++ b/drivers/net/wireless/mwifiex/ethtool.c @@ -64,104 +64,7 @@ static int mwifiex_ethtool_set_wol(struct net_device *dev, return 0; } -static int -mwifiex_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) -{ - struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - struct mwifiex_adapter *adapter = priv->adapter; - struct memory_type_mapping *entry; - - if (!adapter->if_ops.fw_dump) - return -ENOTSUPP; - - dump->flag = adapter->curr_mem_idx; - dump->version = 1; - if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) { - dump->len = adapter->drv_info_size; - } else if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) { - entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx]; - dump->len = entry->mem_size; - } else { - dump->len = 0; - } - - return 0; -} - -static int -mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, - void *buffer) -{ - u8 *p = buffer; - struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - struct mwifiex_adapter *adapter = priv->adapter; - struct memory_type_mapping *entry; - - if (!adapter->if_ops.fw_dump) - return -ENOTSUPP; - - if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) { - if (!adapter->drv_info_dump) - return -EFAULT; - memcpy(p, adapter->drv_info_dump, adapter->drv_info_size); - return 0; - } - - if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) { - dev_err(adapter->dev, "firmware dump in progress!!\n"); - return -EBUSY; - } - - entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx]; - - if (!entry->mem_ptr) - return -EFAULT; - - memcpy(p, entry->mem_ptr, entry->mem_size); - - entry->mem_size = 0; - vfree(entry->mem_ptr); - entry->mem_ptr = NULL; - - return 0; -} - -static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val) -{ - struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - struct mwifiex_adapter *adapter = priv->adapter; - - if (!adapter->if_ops.fw_dump) - return -ENOTSUPP; - - if (val->flag == MWIFIEX_DRV_INFO_IDX) { - adapter->curr_mem_idx = MWIFIEX_DRV_INFO_IDX; - return 0; - } - - if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) { - dev_err(adapter->dev, "firmware dump in progress!!\n"); - return -EBUSY; - } - - if (val->flag == MWIFIEX_FW_DUMP_IDX) { - adapter->curr_mem_idx = val->flag; - adapter->if_ops.fw_dump(adapter); - return 0; - } - - if (val->flag < 0 || val->flag >= adapter->num_mem_types) - return -EINVAL; - - adapter->curr_mem_idx = val->flag; - - return 0; -} - const struct ethtool_ops mwifiex_ethtool_ops = { .get_wol = mwifiex_ethtool_get_wol, .set_wol = mwifiex_ethtool_set_wol, - .get_dump_flag = mwifiex_get_dump_flag, - .get_dump_data = mwifiex_get_dump_data, - .set_dump = mwifiex_set_dump, }; diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 59d8964dd0dc..c404390cb0fa 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -323,6 +323,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075 #define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f #define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083 +#define HostCmd_CMD_MEM_ACCESS 0x0086 #define HostCmd_CMD_CFG_DATA 0x008f #define HostCmd_CMD_VERSION_EXT 0x0097 #define HostCmd_CMD_MEF_CFG 0x009a @@ -1576,6 +1577,13 @@ struct mwifiex_ie_types_extcap { u8 ext_capab[0]; } __packed; +struct host_cmd_ds_mem_access { + __le16 action; + __le16 reserved; + __le32 addr; + __le32 value; +}; + struct mwifiex_ie_types_qos_info { struct mwifiex_ie_types_header header; u8 qos_info; @@ -1958,6 +1966,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_p2p_mode_cfg mode_cfg; struct host_cmd_ds_802_11_ibss_status ibss_coalescing; struct host_cmd_ds_mef_cfg mef_cfg; + struct host_cmd_ds_mem_access mem; struct host_cmd_ds_mac_reg_access mac_reg; struct host_cmd_ds_bbp_reg_access bbp_reg; struct host_cmd_ds_rf_reg_access rf_reg; diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index e12192f5cfad..df7fdc09d38c 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -56,7 +56,7 @@ static void wakeup_timer_fn(unsigned long data) { struct mwifiex_adapter *adapter = (struct mwifiex_adapter *)data; - dev_err(adapter->dev, "Firmware wakeup failed\n"); + mwifiex_dbg(adapter, ERROR, "Firmware wakeup failed\n"); adapter->hw_status = MWIFIEX_HW_STATUS_RESET; mwifiex_cancel_all_pending_cmd(adapter); @@ -172,8 +172,9 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter) /* Allocate command buffer */ ret = mwifiex_alloc_cmd_buffer(adapter); if (ret) { - dev_err(adapter->dev, "%s: failed to alloc cmd buffer\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to alloc cmd buffer\n", + __func__); return -1; } @@ -182,8 +183,9 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter) + INTF_HEADER_LEN); if (!adapter->sleep_cfm) { - dev_err(adapter->dev, "%s: failed to alloc sleep cfm" - " cmd buffer\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to alloc sleep cfm\t" + " cmd buffer\n", __func__); return -1; } skb_reserve(adapter->sleep_cfm, INTF_HEADER_LEN); @@ -417,7 +419,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) mwifiex_free_lock_list(adapter); /* Free command buffer */ - dev_dbg(adapter->dev, "info: free cmd buffer\n"); + mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n"); mwifiex_free_cmd_buffer(adapter); for (idx = 0; idx < adapter->num_mem_types; idx++) { @@ -433,6 +435,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) if (adapter->drv_info_dump) { vfree(adapter->drv_info_dump); + adapter->drv_info_dump = NULL; adapter->drv_info_size = 0; } @@ -595,10 +598,11 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv) for (i = 0; i < adapter->priv_num; ++i) { head = &adapter->bss_prio_tbl[i].bss_prio_head; lock = &adapter->bss_prio_tbl[i].bss_prio_lock; - dev_dbg(adapter->dev, "info: delete BSS priority table," - " bss_type = %d, bss_num = %d, i = %d," - " head = %p\n", - priv->bss_type, priv->bss_num, i, head); + mwifiex_dbg(adapter, INFO, + "info: delete BSS priority table,\t" + "bss_type = %d, bss_num = %d, i = %d,\t" + "head = %p\n", + priv->bss_type, priv->bss_num, i, head); { spin_lock_irqsave(lock, flags); @@ -609,9 +613,10 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv) list_for_each_entry_safe(bssprio_node, tmp_node, head, list) { if (bssprio_node->priv == priv) { - dev_dbg(adapter->dev, "info: Delete " - "node %p, next = %p\n", - bssprio_node, tmp_node); + mwifiex_dbg(adapter, INFO, + "info: Delete\t" + "node %p, next = %p\n", + bssprio_node, tmp_node); list_del(&bssprio_node->list); kfree(bssprio_node); } @@ -659,20 +664,23 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING; /* wait for mwifiex_process to complete */ if (adapter->mwifiex_processing) { - dev_warn(adapter->dev, "main process is still running\n"); + mwifiex_dbg(adapter, WARN, + "main process is still running\n"); return ret; } /* cancel current command */ if (adapter->curr_cmd) { - dev_warn(adapter->dev, "curr_cmd is still in processing\n"); + mwifiex_dbg(adapter, WARN, + "curr_cmd is still in processing\n"); del_timer_sync(&adapter->cmd_timer); mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); adapter->curr_cmd = NULL; } /* shut down mwifiex */ - dev_dbg(adapter->dev, "info: shutdown mwifiex...\n"); + mwifiex_dbg(adapter, MSG, + "info: shutdown mwifiex...\n"); /* Clean up Tx/Rx queues and delete BSS priority table */ for (i = 0; i < adapter->priv_num; i++) { @@ -741,8 +749,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, /* check if firmware is already running */ ret = adapter->if_ops.check_fw_status(adapter, poll_num); if (!ret) { - dev_notice(adapter->dev, - "WLAN FW already running! Skip FW dnld\n"); + mwifiex_dbg(adapter, MSG, + "WLAN FW already running! Skip FW dnld\n"); return 0; } @@ -750,8 +758,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, /* check if we are the winner for downloading FW */ if (!adapter->winner) { - dev_notice(adapter->dev, - "FW already running! Skip FW dnld\n"); + mwifiex_dbg(adapter, MSG, + "FW already running! Skip FW dnld\n"); goto poll_fw; } } @@ -760,7 +768,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, /* Download firmware with helper */ ret = adapter->if_ops.prog_fw(adapter, pmfw); if (ret) { - dev_err(adapter->dev, "prog_fw failed ret=%#x\n", ret); + mwifiex_dbg(adapter, ERROR, + "prog_fw failed ret=%#x\n", ret); return ret; } } @@ -769,7 +778,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, /* Check if the firmware is downloaded successfully or not */ ret = adapter->if_ops.check_fw_status(adapter, poll_num); if (ret) - dev_err(adapter->dev, "FW failed to be active in time\n"); + mwifiex_dbg(adapter, ERROR, + "FW failed to be active in time\n"); return ret; } diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h index d2b05c3a96da..6f11a25a6b49 100644 --- a/drivers/net/wireless/mwifiex/ioctl.h +++ b/drivers/net/wireless/mwifiex/ioctl.h @@ -189,6 +189,7 @@ struct tdls_peer_info { }; struct mwifiex_debug_info { + unsigned int debug_mask; u32 int_counter; u32 packets_out[MAX_NUM_TID]; u32 tx_buf_size; @@ -342,6 +343,11 @@ struct mwifiex_ds_read_eeprom { u8 value[MAX_EEPROM_DATA]; }; +struct mwifiex_ds_mem_rw { + u32 addr; + u32 value; +}; + #define IEEE_MAX_IE_SIZE 256 #define MWIFIEX_IE_HDR_SIZE (sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE) diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index f214a7cd1345..cce8e39aa45e 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -53,9 +53,9 @@ mwifiex_cmd_append_generic_ie(struct mwifiex_private *priv, u8 **buffer) * parameter buffer pointer. */ if (priv->gen_ie_buf_len) { - dev_dbg(priv->adapter->dev, - "info: %s: append generic ie len %d to %p\n", - __func__, priv->gen_ie_buf_len, *buffer); + mwifiex_dbg(priv->adapter, INFO, + "info: %s: append generic ie len %d to %p\n", + __func__, priv->gen_ie_buf_len, *buffer); /* Wrap the generic IE buffer with a pass through TLV type */ ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH); @@ -125,9 +125,9 @@ mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer, tsf_val = cpu_to_le64(bss_desc->timestamp); - dev_dbg(priv->adapter->dev, - "info: %s: TSF offset calc: %016llx - %016llx\n", - __func__, bss_desc->timestamp, bss_desc->fw_tsf); + mwifiex_dbg(priv->adapter, INFO, + "info: %s: TSF offset calc: %016llx - %016llx\n", + __func__, bss_desc->timestamp, bss_desc->fw_tsf); memcpy(*buffer, &tsf_val, sizeof(tsf_val)); *buffer += sizeof(tsf_val); @@ -152,7 +152,7 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1, tmp = kmemdup(rate1, rate1_size, GFP_KERNEL); if (!tmp) { - dev_err(priv->adapter->dev, "failed to alloc tmp buf\n"); + mwifiex_dbg(priv->adapter, ERROR, "failed to alloc tmp buf\n"); return -ENOMEM; } @@ -169,8 +169,8 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1, } } - dev_dbg(priv->adapter->dev, "info: Tx data rate set to %#x\n", - priv->data_rate); + mwifiex_dbg(priv->adapter, INFO, "info: Tx data rate set to %#x\n", + priv->data_rate); if (!priv->is_data_rate_auto) { while (*ptr) { @@ -180,9 +180,10 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1, } ptr++; } - dev_err(priv->adapter->dev, "previously set fixed data rate %#x" - " is not compatible with the network\n", - priv->data_rate); + mwifiex_dbg(priv->adapter, ERROR, + "previously set fixed data rate %#x\t" + "is not compatible with the network\n", + priv->data_rate); ret = -1; goto done; @@ -214,8 +215,9 @@ mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv, if (mwifiex_get_common_rates(priv, out_rates, MWIFIEX_SUPPORTED_RATES, card_rates, card_rates_size)) { *out_rates_size = 0; - dev_err(priv->adapter->dev, "%s: cannot get common rates\n", - __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: cannot get common rates\n", + __func__); return -1; } @@ -246,8 +248,9 @@ mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer) * parameter buffer pointer. */ if (priv->wps_ie_len) { - dev_dbg(priv->adapter->dev, "cmd: append wps ie %d to %p\n", - priv->wps_ie_len, *buffer); + mwifiex_dbg(priv->adapter, CMD, + "cmd: append wps ie %d to %p\n", + priv->wps_ie_len, *buffer); /* Wrap the generic IE buffer with a pass through TLV type */ ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE); @@ -292,8 +295,9 @@ mwifiex_cmd_append_wapi_ie(struct mwifiex_private *priv, u8 **buffer) * parameter buffer pointer. */ if (priv->wapi_ie_len) { - dev_dbg(priv->adapter->dev, "cmd: append wapi ie %d to %p\n", - priv->wapi_ie_len, *buffer); + mwifiex_dbg(priv->adapter, CMD, + "cmd: append wapi ie %d to %p\n", + priv->wapi_ie_len, *buffer); /* Wrap the generic IE buffer with a pass through TLV type */ ie_header.type = cpu_to_le16(TLV_TYPE_WAPI_IE); @@ -453,8 +457,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, rates_tlv->header.len = cpu_to_le16((u16) rates_size); memcpy(rates_tlv->rates, rates, rates_size); pos += sizeof(rates_tlv->header) + rates_size; - dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: rates size = %d\n", - rates_size); + mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_CMD: rates size = %d\n", + rates_size); /* Add the Authentication type to be used for Auth frames */ auth_tlv = (struct mwifiex_ie_types_auth_type *) pos; @@ -487,14 +491,14 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, sizeof(struct mwifiex_chan_scan_param_set)); chan_tlv->chan_scan_param[0].chan_number = (bss_desc->phy_param_set.ds_param_set.current_chan); - dev_dbg(priv->adapter->dev, "info: Assoc: TLV Chan = %d\n", - chan_tlv->chan_scan_param[0].chan_number); + mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Chan = %d\n", + chan_tlv->chan_scan_param[0].chan_number); chan_tlv->chan_scan_param[0].radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band); - dev_dbg(priv->adapter->dev, "info: Assoc: TLV Band = %d\n", - chan_tlv->chan_scan_param[0].radio_type); + mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Band = %d\n", + chan_tlv->chan_scan_param[0].radio_type); pos += sizeof(chan_tlv->header) + sizeof(struct mwifiex_chan_scan_param_set); } @@ -544,8 +548,9 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME; tmp_cap &= CAPINFO_MASK; - dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n", - tmp_cap, CAPINFO_MASK); + mwifiex_dbg(priv->adapter, INFO, + "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n", + tmp_cap, CAPINFO_MASK); assoc->cap_info_bitmap = cpu_to_le16(tmp_cap); return 0; @@ -645,9 +650,11 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, if (status_code) { priv->adapter->dbg.num_cmd_assoc_failure++; - dev_err(priv->adapter->dev, - "ASSOC_RESP: failed, status code=%d err=%#x a_id=%#x\n", - status_code, cap_info, le16_to_cpu(assoc_rsp->a_id)); + mwifiex_dbg(priv->adapter, ERROR, + "ASSOC_RESP: failed,\t" + "status code=%d err=%#x a_id=%#x\n", + status_code, cap_info, + le16_to_cpu(assoc_rsp->a_id)); if (cap_info == MWIFIEX_TIMEOUT_FOR_AP_RESP) { if (status_code == MWIFIEX_STATUS_CODE_AUTH_TIMEOUT) @@ -671,8 +678,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, /* Set the attempted BSSID Index to current */ bss_desc = priv->attempted_bss_desc; - dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: %s\n", - bss_desc->ssid.ssid); + mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: %s\n", + bss_desc->ssid.ssid); /* Make a copy of current BSSID descriptor */ memcpy(&priv->curr_bss_params.bss_descriptor, @@ -702,8 +709,9 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, = ((bss_desc->wmm_ie.qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0); - dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: curr_pkt_filter is %#x\n", - priv->curr_pkt_filter); + mwifiex_dbg(priv->adapter, INFO, + "info: ASSOC_RESP: curr_pkt_filter is %#x\n", + priv->curr_pkt_filter); if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled) priv->wpa_is_gtk_set = false; @@ -719,8 +727,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, } if (enable_data) - dev_dbg(priv->adapter->dev, - "info: post association, re-enabling data flow\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: post association, re-enabling data flow\n"); /* Reset SNR/NF/RSSI values */ priv->data_rssi_last = 0; @@ -738,7 +746,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, priv->adapter->dbg.num_cmd_assoc_success++; - dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: associated\n"); + mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: associated\n"); /* Add the ra_list here for infra mode as there will be only 1 ra always */ @@ -825,8 +833,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len); - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n", - adhoc_start->ssid); + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n", + adhoc_start->ssid); memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN); memcpy(bss_desc->ssid.ssid, req_ssid->ssid, req_ssid->ssid_len); @@ -858,12 +866,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, } if (!priv->adhoc_channel) { - dev_err(adapter->dev, "ADHOC_S_CMD: adhoc_channel cannot be 0\n"); + mwifiex_dbg(adapter, ERROR, + "ADHOC_S_CMD: adhoc_channel cannot be 0\n"); return -1; } - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: creating ADHOC on channel %d\n", - priv->adhoc_channel); + mwifiex_dbg(adapter, INFO, + "info: ADHOC_S_CMD: creating ADHOC on channel %d\n", + priv->adhoc_channel); priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel; priv->curr_bss_params.band = adapter->adhoc_start_band; @@ -895,13 +905,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, /* Set up privacy in bss_desc */ if (priv->sec_info.encryption_mode) { /* Ad-Hoc capability privacy on */ - dev_dbg(adapter->dev, - "info: ADHOC_S_CMD: wep_status set privacy to WEP\n"); + mwifiex_dbg(adapter, INFO, + "info: ADHOC_S_CMD: wep_status set privacy to WEP\n"); bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP; tmp_cap |= WLAN_CAPABILITY_PRIVACY; } else { - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: wep_status NOT set," - " setting privacy to ACCEPT ALL\n"); + mwifiex_dbg(adapter, INFO, + "info: ADHOC_S_CMD: wep_status NOT set,\t" + "setting privacy to ACCEPT ALL\n"); bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL; } @@ -912,8 +923,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL, HostCmd_ACT_GEN_SET, 0, &priv->curr_pkt_filter, false)) { - dev_err(adapter->dev, - "ADHOC_S_CMD: G Protection config failed\n"); + mwifiex_dbg(adapter, ERROR, + "ADHOC_S_CMD: G Protection config failed\n"); return -1; } } @@ -928,10 +939,10 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memcpy(&priv->curr_bss_params.data_rates, &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates); - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n", - adhoc_start->data_rate); + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: rates=%4ph\n", + adhoc_start->data_rate); - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n"); + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n"); if (IS_SUPPORT_MULTI_BANDS(adapter)) { /* Append a channel TLV */ @@ -945,8 +956,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, chan_tlv->chan_scan_param[0].chan_number = (u8) priv->curr_bss_params.bss_descriptor.channel; - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Chan = %d\n", - chan_tlv->chan_scan_param[0].chan_number); + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Chan = %d\n", + chan_tlv->chan_scan_param[0].chan_number); chan_tlv->chan_scan_param[0].radio_type = mwifiex_band_to_radio_type(priv->curr_bss_params.band); @@ -961,8 +972,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, chan_tlv->chan_scan_param[0].radio_type |= (IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4); } - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n", - chan_tlv->chan_scan_param[0].radio_type); + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Band = %d\n", + chan_tlv->chan_scan_param[0].radio_type); pos += sizeof(chan_tlv->header) + sizeof(struct mwifiex_chan_scan_param_set); cmd_append_size += @@ -1084,8 +1095,8 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL, HostCmd_ACT_GEN_SET, 0, &curr_pkt_filter, false)) { - dev_err(priv->adapter->dev, - "ADHOC_J_CMD: G Protection config failed\n"); + mwifiex_dbg(priv->adapter, ERROR, + "ADHOC_J_CMD: G Protection config failed\n"); return -1; } } @@ -1116,14 +1127,15 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, tmp_cap &= CAPINFO_MASK; - dev_dbg(priv->adapter->dev, - "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n", - tmp_cap, CAPINFO_MASK); + mwifiex_dbg(priv->adapter, INFO, + "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n", + tmp_cap, CAPINFO_MASK); /* Information on BSSID descriptor passed to FW */ - dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n", - adhoc_join->bss_descriptor.bssid, - adhoc_join->bss_descriptor.ssid); + mwifiex_dbg(priv->adapter, INFO, + "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n", + adhoc_join->bss_descriptor.bssid, + adhoc_join->bss_descriptor.ssid); for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_desc->supported_rates[i]; i++) @@ -1159,14 +1171,14 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, sizeof(struct mwifiex_chan_scan_param_set)); chan_tlv->chan_scan_param[0].chan_number = (bss_desc->phy_param_set.ds_param_set.current_chan); - dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan=%d\n", - chan_tlv->chan_scan_param[0].chan_number); + mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Chan=%d\n", + chan_tlv->chan_scan_param[0].chan_number); chan_tlv->chan_scan_param[0].radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band); - dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band=%d\n", - chan_tlv->chan_scan_param[0].radio_type); + mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Band=%d\n", + chan_tlv->chan_scan_param[0].radio_type); pos += sizeof(chan_tlv->header) + sizeof(struct mwifiex_chan_scan_param_set); cmd_append_size += sizeof(chan_tlv->header) + @@ -1220,7 +1232,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, /* Join result code 0 --> SUCCESS */ reason_code = le16_to_cpu(resp->result); if (reason_code) { - dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n"); + mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n"); if (priv->media_connected) mwifiex_reset_connect_state(priv, reason_code); @@ -1235,8 +1247,8 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, priv->media_connected = true; if (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_AD_HOC_START) { - dev_dbg(priv->adapter->dev, "info: ADHOC_S_RESP %s\n", - bss_desc->ssid.ssid); + mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_S_RESP %s\n", + bss_desc->ssid.ssid); /* Update the created network descriptor with the new BSSID */ memcpy(bss_desc->mac_address, @@ -1248,8 +1260,9 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, * Now the join cmd should be successful. * If BSSID has changed use SSID to compare instead of BSSID */ - dev_dbg(priv->adapter->dev, "info: ADHOC_J_RESP %s\n", - bss_desc->ssid.ssid); + mwifiex_dbg(priv->adapter, INFO, + "info: ADHOC_J_RESP %s\n", + bss_desc->ssid.ssid); /* * Make a copy of current BSSID descriptor, only needed for @@ -1262,10 +1275,10 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, priv->adhoc_state = ADHOC_JOINED; } - dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: channel = %d\n", - priv->adhoc_channel); - dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: BSSID = %pM\n", - priv->curr_bss_params.bss_descriptor.mac_address); + mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: channel = %d\n", + priv->adhoc_channel); + mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: BSSID = %pM\n", + priv->curr_bss_params.bss_descriptor.mac_address); if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); @@ -1327,12 +1340,12 @@ int mwifiex_adhoc_start(struct mwifiex_private *priv, struct cfg80211_ssid *adhoc_ssid) { - dev_dbg(priv->adapter->dev, "info: Adhoc Channel = %d\n", - priv->adhoc_channel); - dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n", - priv->curr_bss_params.bss_descriptor.channel); - dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n", - priv->curr_bss_params.band); + mwifiex_dbg(priv->adapter, INFO, "info: Adhoc Channel = %d\n", + priv->adhoc_channel); + mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.channel = %d\n", + priv->curr_bss_params.bss_descriptor.channel); + mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.band = %d\n", + priv->curr_bss_params.band); if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) && priv->adapter->config_bands & BAND_AAC) @@ -1353,14 +1366,16 @@ mwifiex_adhoc_start(struct mwifiex_private *priv, int mwifiex_adhoc_join(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { - dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid =%s\n", - priv->curr_bss_params.bss_descriptor.ssid.ssid); - dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid_len =%u\n", - priv->curr_bss_params.bss_descriptor.ssid.ssid_len); - dev_dbg(priv->adapter->dev, "info: adhoc join: ssid =%s\n", - bss_desc->ssid.ssid); - dev_dbg(priv->adapter->dev, "info: adhoc join: ssid_len =%u\n", - bss_desc->ssid.ssid_len); + mwifiex_dbg(priv->adapter, INFO, + "info: adhoc join: curr_bss ssid =%s\n", + priv->curr_bss_params.bss_descriptor.ssid.ssid); + mwifiex_dbg(priv->adapter, INFO, + "info: adhoc join: curr_bss ssid_len =%u\n", + priv->curr_bss_params.bss_descriptor.ssid.ssid_len); + mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid =%s\n", + bss_desc->ssid.ssid); + mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid_len =%u\n", + bss_desc->ssid.ssid_len); /* Check if the requested SSID is already joined */ if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len && @@ -1368,8 +1383,9 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv, &priv->curr_bss_params.bss_descriptor.ssid) && (priv->curr_bss_params.bss_descriptor.bss_mode == NL80211_IFTYPE_ADHOC)) { - dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: new ad-hoc SSID" - " is the same as current; not attempting to re-join\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: ADHOC_J_CMD: new ad-hoc SSID\t" + "is the same as current; not attempting to re-join\n"); return -1; } @@ -1380,10 +1396,12 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv, else mwifiex_set_ba_params(priv); - dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n", - priv->curr_bss_params.bss_descriptor.channel); - dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n", - priv->curr_bss_params.band); + mwifiex_dbg(priv->adapter, INFO, + "info: curr_bss_params.channel = %d\n", + priv->curr_bss_params.bss_descriptor.channel); + mwifiex_dbg(priv->adapter, INFO, + "info: curr_bss_params.band = %c\n", + priv->curr_bss_params.band); return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN, HostCmd_ACT_GEN_SET, 0, bss_desc, true); @@ -1431,7 +1449,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac) ret = mwifiex_deauthenticate_infra(priv, mac); if (ret) cfg80211_disconnected(priv->netdev, 0, NULL, 0, - GFP_KERNEL); + true, GFP_KERNEL); break; case NL80211_IFTYPE_ADHOC: return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP, diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 213aa986e87a..3ba4e0e04223 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -24,6 +24,10 @@ #define VERSION "1.0" +static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK; +module_param(debug_mask, uint, 0); +MODULE_PARM_DESC(debug_mask, "bitmap for debug flags"); + const char driver_version[] = "mwifiex " VERSION " (%s) "; static char *cal_data_cfg; module_param(cal_data_cfg, charp, 0); @@ -63,6 +67,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, /* Save interface specific operations in adapter */ memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops)); + adapter->debug_mask = debug_mask; /* card specific initialization has been deferred until now .. */ if (adapter->if_ops.init_if) @@ -89,7 +94,8 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, return 0; error: - dev_dbg(adapter->dev, "info: leave mwifiex_register with error\n"); + mwifiex_dbg(adapter, ERROR, + "info: leave mwifiex_register with error\n"); for (i = 0; i < adapter->priv_num; i++) kfree(adapter->priv[i]); @@ -454,8 +460,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) struct wireless_dev *wdev; if (!firmware) { - dev_err(adapter->dev, - "Failed to get firmware %s\n", adapter->fw_name); + mwifiex_dbg(adapter, ERROR, + "Failed to get firmware %s\n", adapter->fw_name); goto err_dnld_fw; } @@ -471,13 +477,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (ret == -1) goto err_dnld_fw; - dev_notice(adapter->dev, "WLAN FW is active\n"); + mwifiex_dbg(adapter, MSG, "WLAN FW is active\n"); if (cal_data_cfg) { if ((request_firmware(&adapter->cal_data, cal_data_cfg, adapter->dev)) < 0) - dev_err(adapter->dev, - "Cal data request_firmware() failed\n"); + mwifiex_dbg(adapter, ERROR, + "Cal data request_firmware() failed\n"); } /* enable host interrupt after fw dnld is successful */ @@ -502,12 +508,14 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) priv = adapter->priv[MWIFIEX_BSS_ROLE_STA]; if (mwifiex_register_cfg80211(adapter)) { - dev_err(adapter->dev, "cannot register with cfg80211\n"); + mwifiex_dbg(adapter, ERROR, + "cannot register with cfg80211\n"); goto err_init_fw; } if (mwifiex_init_channel_scan_gap(adapter)) { - dev_err(adapter->dev, "could not init channel stats table\n"); + mwifiex_dbg(adapter, ERROR, + "could not init channel stats table\n"); goto err_init_fw; } @@ -521,7 +529,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM, NL80211_IFTYPE_STATION, NULL, NULL); if (IS_ERR(wdev)) { - dev_err(adapter->dev, "cannot create default STA interface\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create default STA interface\n"); rtnl_unlock(); goto err_add_intf; } @@ -530,7 +539,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM, NL80211_IFTYPE_AP, NULL, NULL); if (IS_ERR(wdev)) { - dev_err(adapter->dev, "cannot create AP interface\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create AP interface\n"); rtnl_unlock(); goto err_add_intf; } @@ -541,8 +551,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) NL80211_IFTYPE_P2P_CLIENT, NULL, NULL); if (IS_ERR(wdev)) { - dev_err(adapter->dev, - "cannot create p2p client interface\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create p2p client interface\n"); rtnl_unlock(); goto err_add_intf; } @@ -550,7 +560,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) rtnl_unlock(); mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); - dev_notice(adapter->dev, "driver_version = %s\n", fmt); + mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt); goto done; err_add_intf: @@ -560,7 +570,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (adapter->if_ops.disable_int) adapter->if_ops.disable_int(adapter); err_dnld_fw: - pr_debug("info: %s: unregister device\n", __func__); + mwifiex_dbg(adapter, ERROR, + "info: %s: unregister device\n", __func__); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); @@ -601,8 +612,8 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter) adapter->dev, GFP_KERNEL, adapter, mwifiex_fw_dpc); if (ret < 0) - dev_err(adapter->dev, - "request_firmware_nowait() returned error %d\n", ret); + mwifiex_dbg(adapter, ERROR, + "request_firmware_nowait error %d\n", ret); return ret; } @@ -628,7 +639,8 @@ mwifiex_close(struct net_device *dev) struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (priv->scan_request) { - dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n"); + mwifiex_dbg(priv->adapter, INFO, + "aborting scan on ndo_stop\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; priv->scan_aborting = true; @@ -649,7 +661,8 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) txq = netdev_get_tx_queue(priv->netdev, index); if (!netif_tx_queue_stopped(txq)) { netif_tx_stop_queue(txq); - dev_dbg(priv->adapter->dev, "stop queue: %d\n", index); + mwifiex_dbg(priv->adapter, DATA, + "stop queue: %d\n", index); } } @@ -714,8 +727,9 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) struct mwifiex_txinfo *tx_info; bool multicast; - dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n", - jiffies, priv->bss_type, priv->bss_num); + mwifiex_dbg(priv->adapter, DATA, + "data: %lu BSS(%d-%d): Data <= kernel\n", + jiffies, priv->bss_type, priv->bss_num); if (priv->adapter->surprise_removed) { kfree_skb(skb); @@ -723,28 +737,31 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } if (!skb->len || (skb->len > ETH_FRAME_LEN)) { - dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len); + mwifiex_dbg(priv->adapter, ERROR, + "Tx: bad skb len %d\n", skb->len); kfree_skb(skb); priv->stats.tx_dropped++; return 0; } if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { - dev_dbg(priv->adapter->dev, - "data: Tx: insufficient skb headroom %d\n", - skb_headroom(skb)); + mwifiex_dbg(priv->adapter, DATA, + "data: Tx: insufficient skb headroom %d\n", + skb_headroom(skb)); /* Insufficient skb headroom - allocate a new skb */ new_skb = skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); if (unlikely(!new_skb)) { - dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Tx: cannot alloca new_skb\n"); kfree_skb(skb); priv->stats.tx_dropped++; return 0; } kfree_skb(skb); skb = new_skb; - dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n", - skb_headroom(skb)); + mwifiex_dbg(priv->adapter, INFO, + "info: new skb headroomd %d\n", + skb_headroom(skb)); } tx_info = MWIFIEX_SKB_TXCB(skb); @@ -802,8 +819,8 @@ mwifiex_set_mac_address(struct net_device *dev, void *addr) if (!ret) memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN); else - dev_err(priv->adapter->dev, - "set mac address failed: ret=%d\n", ret); + mwifiex_dbg(priv->adapter, ERROR, + "set mac address failed: ret=%d\n", ret); memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); @@ -841,20 +858,22 @@ mwifiex_tx_timeout(struct net_device *dev) priv->num_tx_timeout++; priv->tx_timeout_cnt++; - dev_err(priv->adapter->dev, - "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n", - jiffies, priv->tx_timeout_cnt, priv->bss_type, priv->bss_num); + mwifiex_dbg(priv->adapter, ERROR, + "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n", + jiffies, priv->tx_timeout_cnt, priv->bss_type, + priv->bss_num); mwifiex_set_trans_start(dev); if (priv->tx_timeout_cnt > TX_TIMEOUT_THRESHOLD && priv->adapter->if_ops.card_reset) { - dev_err(priv->adapter->dev, - "tx_timeout_cnt exceeds threshold. Triggering card reset!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "tx_timeout_cnt exceeds threshold.\t" + "Triggering card reset!\n"); priv->adapter->if_ops.card_reset(priv->adapter); } } -void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) +void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) { void *p; char drv_version[64]; @@ -867,10 +886,11 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) if (adapter->drv_info_dump) { vfree(adapter->drv_info_dump); + adapter->drv_info_dump = NULL; adapter->drv_info_size = 0; } - dev_info(adapter->dev, "=== DRIVER INFO DUMP START===\n"); + mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n"); adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX); @@ -938,12 +958,12 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) } if (adapter->iface_type == MWIFIEX_SDIO) { - p += sprintf(p, "\n=== SDIO register DUMP===\n"); + p += sprintf(p, "\n=== SDIO register dump===\n"); if (adapter->if_ops.reg_dump) p += adapter->if_ops.reg_dump(adapter, p); } - p += sprintf(p, "\n=== MORE DEBUG INFORMATION\n"); + p += sprintf(p, "\n=== more debug information\n"); debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL); if (debug_info) { for (i = 0; i < adapter->priv_num; i++) { @@ -958,9 +978,99 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) } adapter->drv_info_size = p - adapter->drv_info_dump; - dev_info(adapter->dev, "=== DRIVER INFO DUMP END===\n"); + mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n"); +} +EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump); + +void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter) +{ + u8 idx, *dump_data, *fw_dump_ptr; + u32 dump_len; + + dump_len = (strlen("========Start dump driverinfo========\n") + + adapter->drv_info_size + + strlen("\n========End dump========\n")); + + for (idx = 0; idx < adapter->num_mem_types; idx++) { + struct memory_type_mapping *entry = + &adapter->mem_type_mapping_tbl[idx]; + + if (entry->mem_ptr) { + dump_len += (strlen("========Start dump ") + + strlen(entry->mem_name) + + strlen("========\n") + + (entry->mem_size + 1) + + strlen("\n========End dump========\n")); + } + } + + dump_data = vzalloc(dump_len + 1); + if (!dump_data) + goto done; + + fw_dump_ptr = dump_data; + + /* Dump all the memory data into single file, a userspace script will + * be used to split all the memory data to multiple files + */ + mwifiex_dbg(adapter, MSG, + "== mwifiex dump information to /sys/class/devcoredump start"); + + strcpy(fw_dump_ptr, "========Start dump driverinfo========\n"); + fw_dump_ptr += strlen("========Start dump driverinfo========\n"); + memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size); + fw_dump_ptr += adapter->drv_info_size; + strcpy(fw_dump_ptr, "\n========End dump========\n"); + fw_dump_ptr += strlen("\n========End dump========\n"); + + for (idx = 0; idx < adapter->num_mem_types; idx++) { + struct memory_type_mapping *entry = + &adapter->mem_type_mapping_tbl[idx]; + + if (entry->mem_ptr) { + strcpy(fw_dump_ptr, "========Start dump "); + fw_dump_ptr += strlen("========Start dump "); + + strcpy(fw_dump_ptr, entry->mem_name); + fw_dump_ptr += strlen(entry->mem_name); + + strcpy(fw_dump_ptr, "========\n"); + fw_dump_ptr += strlen("========\n"); + + memcpy(fw_dump_ptr, entry->mem_ptr, entry->mem_size); + fw_dump_ptr += entry->mem_size; + + strcpy(fw_dump_ptr, "\n========End dump========\n"); + fw_dump_ptr += strlen("\n========End dump========\n"); + } + } + + /* device dump data will be free in device coredump release function + * after 5 min + */ + dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL); + mwifiex_dbg(adapter, MSG, + "== mwifiex dump information to /sys/class/devcoredump end"); + +done: + for (idx = 0; idx < adapter->num_mem_types; idx++) { + struct memory_type_mapping *entry = + &adapter->mem_type_mapping_tbl[idx]; + + if (entry->mem_ptr) { + vfree(entry->mem_ptr); + entry->mem_ptr = NULL; + } + entry->mem_size = 0; + } + + if (adapter->drv_info_dump) { + vfree(adapter->drv_info_dump); + adapter->drv_info_dump = NULL; + adapter->drv_info_size = 0; + } } -EXPORT_SYMBOL_GPL(mwifiex_dump_drv_info); +EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump); /* * CFG802.11 network device handler for statistics retrieval. @@ -1229,21 +1339,24 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) } } - dev_dbg(adapter->dev, "cmd: calling mwifiex_shutdown_drv...\n"); + mwifiex_dbg(adapter, CMD, + "cmd: calling mwifiex_shutdown_drv...\n"); adapter->init_wait_q_woken = false; if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) wait_event_interruptible(adapter->init_wait_q, adapter->init_wait_q_woken); - dev_dbg(adapter->dev, "cmd: mwifiex_shutdown_drv done\n"); + mwifiex_dbg(adapter, CMD, + "cmd: mwifiex_shutdown_drv done\n"); if (atomic_read(&adapter->rx_pending) || atomic_read(&adapter->tx_pending) || atomic_read(&adapter->cmd_pending)) { - dev_err(adapter->dev, "rx_pending=%d, tx_pending=%d, " - "cmd_pending=%d\n", - atomic_read(&adapter->rx_pending), - atomic_read(&adapter->tx_pending), - atomic_read(&adapter->cmd_pending)); + mwifiex_dbg(adapter, ERROR, + "rx_pending=%d, tx_pending=%d,\t" + "cmd_pending=%d\n", + atomic_read(&adapter->rx_pending), + atomic_read(&adapter->tx_pending), + atomic_read(&adapter->cmd_pending)); } for (i = 0; i < adapter->priv_num; i++) { @@ -1263,11 +1376,13 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) wiphy_free(adapter->wiphy); /* Unregister device */ - dev_dbg(adapter->dev, "info: unregister device\n"); + mwifiex_dbg(adapter, INFO, + "info: unregister device\n"); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); /* Free adapter structure */ - dev_dbg(adapter->dev, "info: free adapter\n"); + mwifiex_dbg(adapter, INFO, + "info: free adapter\n"); mwifiex_free_adapter(adapter); exit_remove: diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index fe1256044a6c..5a6c1c76b33b 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -36,6 +36,7 @@ #include #include #include +#include #include "decl.h" #include "ioctl.h" @@ -147,6 +148,54 @@ enum { /* Address alignment */ #define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1)) +/** + *enum mwifiex_debug_level - marvell wifi debug level + */ +enum MWIFIEX_DEBUG_LEVEL { + MWIFIEX_DBG_MSG = 0x00000001, + MWIFIEX_DBG_FATAL = 0x00000002, + MWIFIEX_DBG_ERROR = 0x00000004, + MWIFIEX_DBG_DATA = 0x00000008, + MWIFIEX_DBG_CMD = 0x00000010, + MWIFIEX_DBG_EVENT = 0x00000020, + MWIFIEX_DBG_INTR = 0x00000040, + MWIFIEX_DBG_IOCTL = 0x00000080, + + MWIFIEX_DBG_MPA_D = 0x00008000, + MWIFIEX_DBG_DAT_D = 0x00010000, + MWIFIEX_DBG_CMD_D = 0x00020000, + MWIFIEX_DBG_EVT_D = 0x00040000, + MWIFIEX_DBG_FW_D = 0x00080000, + MWIFIEX_DBG_IF_D = 0x00100000, + + MWIFIEX_DBG_ENTRY = 0x10000000, + MWIFIEX_DBG_WARN = 0x20000000, + MWIFIEX_DBG_INFO = 0x40000000, + MWIFIEX_DBG_DUMP = 0x80000000, + + MWIFIEX_DBG_ANY = 0xffffffff +}; + +#define MWIFIEX_DEFAULT_DEBUG_MASK (MWIFIEX_DBG_MSG | \ + MWIFIEX_DBG_FATAL | \ + MWIFIEX_DBG_ERROR) + +#define mwifiex_dbg(adapter, dbg_mask, fmt, args...) \ +do { \ + if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask) \ + if ((adapter)->dev) \ + dev_info((adapter)->dev, fmt, ## args); \ +} while (0) + +#define DEBUG_DUMP_DATA_MAX_LEN 128 +#define mwifiex_dbg_dump(adapter, dbg_mask, str, buf, len) \ +do { \ + if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask) \ + print_hex_dump(KERN_DEBUG, str, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, len, false); \ +} while (0) + struct mwifiex_dbg { u32 num_cmd_host_to_card_failure; u32 num_cmd_sleep_cfm_host_to_card_failure; @@ -451,7 +500,7 @@ enum rdwr_status { }; enum mwifiex_iface_work_flags { - MWIFIEX_IFACE_WORK_FW_DUMP, + MWIFIEX_IFACE_WORK_DEVICE_DUMP, MWIFIEX_IFACE_WORK_CARD_RESET, }; @@ -611,6 +660,7 @@ struct mwifiex_private { struct delayed_work dfs_chan_sw_work; struct cfg80211_beacon_data beacon_after; struct mwifiex_11h_intf_state state_11h; + struct mwifiex_ds_mem_rw mem_rw; }; @@ -740,8 +790,8 @@ struct mwifiex_if_ops { int (*init_fw_port) (struct mwifiex_adapter *); int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); void (*card_reset) (struct mwifiex_adapter *); - void (*fw_dump)(struct mwifiex_adapter *); int (*reg_dump)(struct mwifiex_adapter *, char *); + void (*device_dump)(struct mwifiex_adapter *); int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); void (*iface_work)(struct work_struct *work); void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter); @@ -750,6 +800,7 @@ struct mwifiex_if_ops { struct mwifiex_adapter { u8 iface_type; + unsigned int debug_mask; struct mwifiex_iface_comb iface_limit; struct mwifiex_iface_comb curr_iface_comb; struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM]; @@ -900,7 +951,6 @@ struct mwifiex_adapter { u8 key_api_major_ver, key_api_minor_ver; struct memory_type_mapping *mem_type_mapping_tbl; u8 num_mem_types; - u8 curr_mem_idx; void *drv_info_dump; u32 drv_info_size; bool scan_chan_gap_enabled; @@ -1434,7 +1484,8 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv, u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv, u8 rx_rate, u8 ht_info); -void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter); +void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); +void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index bcc7751d883c..77b9055a2d14 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -57,7 +57,7 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, mapping.addr = pci_map_single(card->dev, skb->data, size, flags); if (pci_dma_mapping_error(card->dev, mapping.addr)) { - dev_err(adapter->dev, "failed to map pci memory!\n"); + mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n"); return -1; } mapping.len = size; @@ -89,8 +89,9 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) if (card->sleep_cookie_vbase) { cookie_addr = (u32 *)card->sleep_cookie_vbase; - dev_dbg(adapter->dev, "info: ACCESS_HW: sleep cookie=0x%x\n", - *cookie_addr); + mwifiex_dbg(adapter, INFO, + "info: ACCESS_HW: sleep cookie=0x%x\n", + *cookie_addr); if (*cookie_addr == FW_AWAKE_COOKIE) return true; } @@ -164,7 +165,8 @@ static int mwifiex_pcie_resume(struct device *dev) adapter = card->adapter; if (!adapter->is_suspended) { - dev_warn(adapter->dev, "Device already resumed\n"); + mwifiex_dbg(adapter, WARN, + "Device already resumed\n"); return 0; } @@ -361,16 +363,16 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, sleep_cookie = *(u32 *)buffer; if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) { - dev_dbg(adapter->dev, - "sleep cookie found at count %d\n", count); + mwifiex_dbg(adapter, INFO, + "sleep cookie found at count %d\n", count); break; } usleep_range(20, 30); } if (count >= max_delay_loop_cnt) - dev_dbg(adapter->dev, - "max count reached while accessing sleep cookie\n"); + mwifiex_dbg(adapter, INFO, + "max count reached while accessing sleep cookie\n"); } /* This function wakes up the card by reading fw_status register. */ @@ -380,20 +382,23 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - dev_dbg(adapter->dev, "event: Wakeup device...\n"); + mwifiex_dbg(adapter, EVENT, + "event: Wakeup device...\n"); if (reg->sleep_cookie) mwifiex_pcie_dev_wakeup_delay(adapter); /* Reading fw_status register will wakeup device */ if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) { - dev_warn(adapter->dev, "Reading fw_status register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Reading fw_status register failed\n"); return -1; } if (reg->sleep_cookie) { mwifiex_pcie_dev_wakeup_delay(adapter); - dev_dbg(adapter->dev, "PCIE wakeup: Setting PS_STATE_AWAKE\n"); + mwifiex_dbg(adapter, INFO, + "PCIE wakeup: Setting PS_STATE_AWAKE\n"); adapter->ps_state = PS_STATE_AWAKE; } @@ -407,7 +412,8 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) */ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) { - dev_dbg(adapter->dev, "cmd: Wakeup device completed\n"); + mwifiex_dbg(adapter, CMD, + "cmd: Wakeup device completed\n"); return 0; } @@ -423,7 +429,8 @@ static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter) if (mwifiex_pcie_ok_to_access_hw(adapter)) { if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, 0x00000000)) { - dev_warn(adapter->dev, "Disable host interrupt failed\n"); + mwifiex_dbg(adapter, ERROR, + "Disable host interrupt failed\n"); return -1; } } @@ -443,7 +450,8 @@ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter) /* Simply write the mask to the register */ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, HOST_INTR_MASK)) { - dev_warn(adapter->dev, "Enable host interrupt failed\n"); + mwifiex_dbg(adapter, ERROR, + "Enable host interrupt failed\n"); return -1; } } @@ -499,8 +507,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, GFP_KERNEL | GFP_DMA); if (!skb) { - dev_err(adapter->dev, - "Unable to allocate skb for RX ring.\n"); + mwifiex_dbg(adapter, ERROR, + "Unable to allocate skb for RX ring.\n"); kfree(card->rxbd_ring_vbase); return -ENOMEM; } @@ -512,10 +520,10 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); - dev_dbg(adapter->dev, - "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", - skb, skb->len, skb->data, (u32)buf_pa, - (u32)((u64)buf_pa >> 32)); + mwifiex_dbg(adapter, INFO, + "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", + skb, skb->len, skb->data, (u32)buf_pa, + (u32)((u64)buf_pa >> 32)); card->rx_buf_list[i] = skb; if (reg->pfu_enabled) { @@ -556,8 +564,8 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) /* Allocate skb here so that firmware can DMA data from it */ skb = dev_alloc_skb(MAX_EVENT_SIZE); if (!skb) { - dev_err(adapter->dev, - "Unable to allocate skb for EVENT buf.\n"); + mwifiex_dbg(adapter, ERROR, + "Unable to allocate skb for EVENT buf.\n"); kfree(card->evtbd_ring_vbase); return -ENOMEM; } @@ -569,10 +577,10 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); - dev_dbg(adapter->dev, - "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", - skb, skb->len, skb->data, (u32)buf_pa, - (u32)((u64)buf_pa >> 32)); + mwifiex_dbg(adapter, EVENT, + "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", + skb, skb->len, skb->data, (u32)buf_pa, + (u32)((u64)buf_pa >> 32)); card->evt_buf_list[i] = skb; card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase + @@ -715,21 +723,23 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter) card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * MWIFIEX_MAX_TXRX_BD; - dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n", - card->txbd_ring_size); + mwifiex_dbg(adapter, INFO, + "info: txbd_ring: Allocating %d bytes\n", + card->txbd_ring_size); card->txbd_ring_vbase = pci_alloc_consistent(card->dev, card->txbd_ring_size, &card->txbd_ring_pbase); if (!card->txbd_ring_vbase) { - dev_err(adapter->dev, - "allocate consistent memory (%d bytes) failed!\n", - card->txbd_ring_size); + mwifiex_dbg(adapter, ERROR, + "allocate consistent memory (%d bytes) failed!\n", + card->txbd_ring_size); return -ENOMEM; } - dev_dbg(adapter->dev, - "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n", - card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase, - (u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size); + mwifiex_dbg(adapter, DATA, + "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n", + card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase, + (u32)((u64)card->txbd_ring_pbase >> 32), + card->txbd_ring_size); return mwifiex_init_txq_ring(adapter); } @@ -777,23 +787,24 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * MWIFIEX_MAX_TXRX_BD; - dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n", - card->rxbd_ring_size); + mwifiex_dbg(adapter, INFO, + "info: rxbd_ring: Allocating %d bytes\n", + card->rxbd_ring_size); card->rxbd_ring_vbase = pci_alloc_consistent(card->dev, card->rxbd_ring_size, &card->rxbd_ring_pbase); if (!card->rxbd_ring_vbase) { - dev_err(adapter->dev, - "allocate consistent memory (%d bytes) failed!\n", - card->rxbd_ring_size); + mwifiex_dbg(adapter, ERROR, + "allocate consistent memory (%d bytes) failed!\n", + card->rxbd_ring_size); return -ENOMEM; } - dev_dbg(adapter->dev, - "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n", - card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase, - (u32)((u64)card->rxbd_ring_pbase >> 32), - card->rxbd_ring_size); + mwifiex_dbg(adapter, DATA, + "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n", + card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase, + (u32)((u64)card->rxbd_ring_pbase >> 32), + card->rxbd_ring_size); return mwifiex_init_rxq_ring(adapter); } @@ -840,23 +851,24 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) * MWIFIEX_MAX_EVT_BD; - dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n", + mwifiex_dbg(adapter, INFO, + "info: evtbd_ring: Allocating %d bytes\n", card->evtbd_ring_size); card->evtbd_ring_vbase = pci_alloc_consistent(card->dev, card->evtbd_ring_size, &card->evtbd_ring_pbase); if (!card->evtbd_ring_vbase) { - dev_err(adapter->dev, - "allocate consistent memory (%d bytes) failed!\n", - card->evtbd_ring_size); + mwifiex_dbg(adapter, ERROR, + "allocate consistent memory (%d bytes) failed!\n", + card->evtbd_ring_size); return -ENOMEM; } - dev_dbg(adapter->dev, - "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n", - card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase, - (u32)((u64)card->evtbd_ring_pbase >> 32), - card->evtbd_ring_size); + mwifiex_dbg(adapter, EVENT, + "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n", + card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase, + (u32)((u64)card->evtbd_ring_pbase >> 32), + card->evtbd_ring_size); return mwifiex_pcie_init_evt_ring(adapter); } @@ -895,8 +907,8 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter) /* Allocate memory for receiving command response data */ skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE); if (!skb) { - dev_err(adapter->dev, - "Unable to allocate skb for command response data.\n"); + mwifiex_dbg(adapter, ERROR, + "Unable to allocate skb for command response data.\n"); return -ENOMEM; } skb_put(skb, MWIFIEX_UPLD_SIZE); @@ -944,14 +956,16 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter) card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32), &card->sleep_cookie_pbase); if (!card->sleep_cookie_vbase) { - dev_err(adapter->dev, "pci_alloc_consistent failed!\n"); + mwifiex_dbg(adapter, ERROR, + "pci_alloc_consistent failed!\n"); return -ENOMEM; } /* Init val of Sleep Cookie */ *(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE; - dev_dbg(adapter->dev, "alloc_scook: sleep cookie=0x%x\n", - *((u32 *)card->sleep_cookie_vbase)); + mwifiex_dbg(adapter, INFO, + "alloc_scook: sleep cookie=0x%x\n", + *((u32 *)card->sleep_cookie_vbase)); return 0; } @@ -993,8 +1007,8 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter) */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DNLD_RDY)) { - dev_err(adapter->dev, - "failed to assert dnld-rdy interrupt.\n"); + mwifiex_dbg(adapter, ERROR, + "failed to assert dnld-rdy interrupt.\n"); return -1; } } @@ -1018,13 +1032,14 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) /* Read the TX ring read pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) { - dev_err(adapter->dev, - "SEND COMP: failed to read reg->tx_rdptr\n"); + mwifiex_dbg(adapter, ERROR, + "SEND COMP: failed to read reg->tx_rdptr\n"); return -1; } - dev_dbg(adapter->dev, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n", - card->txbd_rdptr, rdptr); + mwifiex_dbg(adapter, DATA, + "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n", + card->txbd_rdptr, rdptr); num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr; /* free from previous txbd_rdptr to current txbd_rdptr */ @@ -1038,9 +1053,9 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) skb = card->tx_buf_list[wrdoneidx]; if (skb) { - dev_dbg(adapter->dev, - "SEND COMP: Detach skb %p at txbd_rdidx=%d\n", - skb, wrdoneidx); + mwifiex_dbg(adapter, DATA, + "SEND COMP: Detach skb %p at txbd_rdidx=%d\n", + skb, wrdoneidx); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); @@ -1112,8 +1127,9 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, __le16 *tmp; if (!(skb->data && skb->len)) { - dev_err(adapter->dev, "%s(): invalid parameter <%p, %#x>\n", - __func__, skb->data, skb->len); + mwifiex_dbg(adapter, ERROR, + "%s(): invalid parameter <%p, %#x>\n", + __func__, skb->data, skb->len); return -1; } @@ -1121,7 +1137,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, mwifiex_pm_wakeup_card(adapter); num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr; - dev_dbg(adapter->dev, "info: SEND DATA: \n", + mwifiex_dbg(adapter, DATA, + "info: SEND DATA: \n", card->txbd_rdptr, card->txbd_wrptr); if (mwifiex_pcie_txbd_not_full(card)) { u8 *payload; @@ -1175,39 +1192,40 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, /* Write the TX ring write pointer in to reg->tx_wrptr */ if (mwifiex_write_reg(adapter, reg->tx_wrptr, card->txbd_wrptr | rx_val)) { - dev_err(adapter->dev, - "SEND DATA: failed to write reg->tx_wrptr\n"); + mwifiex_dbg(adapter, ERROR, + "SEND DATA: failed to write reg->tx_wrptr\n"); ret = -1; goto done_unmap; } if ((mwifiex_pcie_txbd_not_full(card)) && tx_param->next_pkt_len) { /* have more packets and TxBD still can hold more */ - dev_dbg(adapter->dev, - "SEND DATA: delay dnld-rdy interrupt.\n"); + mwifiex_dbg(adapter, DATA, + "SEND DATA: delay dnld-rdy interrupt.\n"); adapter->data_sent = false; } else { /* Send the TX ready interrupt */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DNLD_RDY)) { - dev_err(adapter->dev, - "SEND DATA: failed to assert dnld-rdy interrupt.\n"); + mwifiex_dbg(adapter, ERROR, + "SEND DATA: failed to assert dnld-rdy interrupt.\n"); ret = -1; goto done_unmap; } } - dev_dbg(adapter->dev, "info: SEND DATA: Updated and sent packet to firmware successfully\n", - card->txbd_rdptr, card->txbd_wrptr); + mwifiex_dbg(adapter, DATA, + "info: SEND DATA: Updated and sent packet to firmware successfully\n", + card->txbd_rdptr, card->txbd_wrptr); } else { - dev_dbg(adapter->dev, - "info: TX Ring full, can't send packets to fw\n"); + mwifiex_dbg(adapter, DATA, + "info: TX Ring full, can't send packets to fw\n"); adapter->data_sent = true; /* Send the TX ready interrupt */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DNLD_RDY)) - dev_err(adapter->dev, - "SEND DATA: failed to assert door-bell intr\n"); + mwifiex_dbg(adapter, ERROR, + "SEND DATA: failed to assert door-bell intr\n"); return -EBUSY; } @@ -1243,8 +1261,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) /* Read the RX ring Write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) { - dev_err(adapter->dev, - "RECV DATA: failed to read reg->rx_wrptr\n"); + mwifiex_dbg(adapter, ERROR, + "RECV DATA: failed to read reg->rx_wrptr\n"); ret = -1; goto done; } @@ -1277,15 +1295,15 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) rx_len = le16_to_cpu(pkt_len); if (WARN_ON(rx_len <= INTF_HEADER_LEN || rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) { - dev_err(adapter->dev, - "Invalid RX len %d, Rd=%#x, Wr=%#x\n", - rx_len, card->rxbd_rdptr, wrptr); + mwifiex_dbg(adapter, ERROR, + "Invalid RX len %d, Rd=%#x, Wr=%#x\n", + rx_len, card->rxbd_rdptr, wrptr); dev_kfree_skb_any(skb_data); } else { skb_put(skb_data, rx_len); - dev_dbg(adapter->dev, - "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n", - card->rxbd_rdptr, wrptr, rx_len); + mwifiex_dbg(adapter, DATA, + "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n", + card->rxbd_rdptr, wrptr, rx_len); skb_pull(skb_data, INTF_HEADER_LEN); if (adapter->rx_work_enabled) { skb_queue_tail(&adapter->rx_data_q, skb_data); @@ -1299,8 +1317,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, GFP_KERNEL | GFP_DMA); if (!skb_tmp) { - dev_err(adapter->dev, - "Unable to allocate skb.\n"); + mwifiex_dbg(adapter, ERROR, + "Unable to allocate skb.\n"); return -ENOMEM; } @@ -1311,9 +1329,9 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp); - dev_dbg(adapter->dev, - "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n", - skb_tmp, rd_index); + mwifiex_dbg(adapter, INFO, + "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n", + skb_tmp, rd_index); card->rx_buf_list[rd_index] = skb_tmp; if (reg->pfu_enabled) { @@ -1336,28 +1354,29 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) reg->rx_rollover_ind) ^ reg->rx_rollover_ind); } - dev_dbg(adapter->dev, "info: RECV DATA: \n", - card->rxbd_rdptr, wrptr); + mwifiex_dbg(adapter, DATA, + "info: RECV DATA: \n", + card->rxbd_rdptr, wrptr); tx_val = card->txbd_wrptr & reg->tx_wrap_mask; /* Write the RX ring read pointer in to reg->rx_rdptr */ if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | tx_val)) { - dev_err(adapter->dev, - "RECV DATA: failed to write reg->rx_rdptr\n"); + mwifiex_dbg(adapter, DATA, + "RECV DATA: failed to write reg->rx_rdptr\n"); ret = -1; goto done; } /* Read the RX ring Write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) { - dev_err(adapter->dev, - "RECV DATA: failed to read reg->rx_wrptr\n"); + mwifiex_dbg(adapter, ERROR, + "RECV DATA: failed to read reg->rx_wrptr\n"); ret = -1; goto done; } - dev_dbg(adapter->dev, - "info: RECV DATA: Rcvd packet from fw successfully\n"); + mwifiex_dbg(adapter, DATA, + "info: RECV DATA: Rcvd packet from fw successfully\n"); card->rxbd_wrptr = wrptr; } @@ -1376,9 +1395,9 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (!(skb->data && skb->len)) { - dev_err(adapter->dev, - "Invalid parameter in %s <%p. len %d>\n", - __func__, skb->data, skb->len); + mwifiex_dbg(adapter, ERROR, + "Invalid parameter in %s <%p. len %d>\n", + __func__, skb->data, skb->len); return -1; } @@ -1391,9 +1410,9 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) * address scratch register */ if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) { - dev_err(adapter->dev, - "%s: failed to write download command to boot code.\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to write download command to boot code.\n", + __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } @@ -1403,18 +1422,18 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) */ if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, (u32)((u64)buf_pa >> 32))) { - dev_err(adapter->dev, - "%s: failed to write download command to boot code.\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to write download command to boot code.\n", + __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } /* Write the command length to cmd_size scratch register */ if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) { - dev_err(adapter->dev, - "%s: failed to write command len to cmd_size scratch reg\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to write command len to cmd_size scratch reg\n", + __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } @@ -1422,8 +1441,8 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* Ring the door bell */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DOOR_BELL)) { - dev_err(adapter->dev, - "%s: failed to assert door-bell intr\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to assert door-bell intr\n", __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } @@ -1443,8 +1462,8 @@ static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter) /* Write the RX ring read pointer in to reg->rx_rdptr */ if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | tx_wrap)) { - dev_err(adapter->dev, - "RECV DATA: failed to write reg->rx_rdptr\n"); + mwifiex_dbg(adapter, ERROR, + "RECV DATA: failed to write reg->rx_rdptr\n"); return -1; } return 0; @@ -1462,15 +1481,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) u8 *payload = (u8 *)skb->data; if (!(skb->data && skb->len)) { - dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x>\n", - __func__, skb->data, skb->len); + mwifiex_dbg(adapter, ERROR, + "Invalid parameter in %s <%p, %#x>\n", + __func__, skb->data, skb->len); return -1; } /* Make sure a command response buffer is available */ if (!card->cmdrsp_buf) { - dev_err(adapter->dev, - "No response buffer available, send command failed\n"); + mwifiex_dbg(adapter, ERROR, + "No response buffer available, send command failed\n"); return -EBUSY; } @@ -1503,8 +1523,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) address */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, (u32)cmdrsp_buf_pa)) { - dev_err(adapter->dev, - "Failed to write download cmd to boot code.\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } @@ -1512,8 +1532,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) address */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, (u32)((u64)cmdrsp_buf_pa >> 32))) { - dev_err(adapter->dev, - "Failed to write download cmd to boot code.\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } @@ -1523,16 +1543,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */ if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)cmd_buf_pa)) { - dev_err(adapter->dev, - "Failed to write download cmd to boot code.\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */ if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, (u32)((u64)cmd_buf_pa >> 32))) { - dev_err(adapter->dev, - "Failed to write download cmd to boot code.\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } @@ -1540,8 +1560,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* Write the command length to reg->cmd_size */ if (mwifiex_write_reg(adapter, reg->cmd_size, card->cmd_buf->len)) { - dev_err(adapter->dev, - "Failed to write cmd len to reg->cmd_size\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write cmd len to reg->cmd_size\n"); ret = -1; goto done; } @@ -1549,8 +1569,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* Ring the door bell */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DOOR_BELL)) { - dev_err(adapter->dev, - "Failed to assert door-bell intr\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to assert door-bell intr\n"); ret = -1; goto done; } @@ -1574,7 +1594,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) u16 rx_len; __le16 pkt_len; - dev_dbg(adapter->dev, "info: Rx CMD Response\n"); + mwifiex_dbg(adapter, CMD, + "info: Rx CMD Response\n"); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); @@ -1598,8 +1619,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_SLEEP_CFM_DONE)) { - dev_warn(adapter->dev, - "Write register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); return -1; } mwifiex_delay_for_sleep_cookie(adapter, @@ -1608,8 +1629,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) mwifiex_pcie_ok_to_access_hw(adapter)) usleep_range(50, 60); } else { - dev_err(adapter->dev, - "There is no command but got cmdrsp\n"); + mwifiex_dbg(adapter, ERROR, + "There is no command but got cmdrsp\n"); } memcpy(adapter->upld_buf, skb->data, min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); @@ -1628,15 +1649,15 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) will prevent firmware from writing to the same response buffer again. */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) { - dev_err(adapter->dev, - "cmd_done: failed to clear cmd_rsp_addr_lo\n"); + mwifiex_dbg(adapter, ERROR, + "cmd_done: failed to clear cmd_rsp_addr_lo\n"); return -1; } /* Write the upper 32bits of the cmdrsp buffer physical address */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) { - dev_err(adapter->dev, - "cmd_done: failed to clear cmd_rsp_addr_hi\n"); + mwifiex_dbg(adapter, ERROR, + "cmd_done: failed to clear cmd_rsp_addr_hi\n"); return -1; } } @@ -1678,25 +1699,28 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) mwifiex_pm_wakeup_card(adapter); if (adapter->event_received) { - dev_dbg(adapter->dev, "info: Event being processed, " - "do not process this interrupt just yet\n"); + mwifiex_dbg(adapter, EVENT, + "info: Event being processed,\t" + "do not process this interrupt just yet\n"); return 0; } if (rdptr >= MWIFIEX_MAX_EVT_BD) { - dev_dbg(adapter->dev, "info: Invalid read pointer...\n"); + mwifiex_dbg(adapter, ERROR, + "info: Invalid read pointer...\n"); return -1; } /* Read the event ring write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) { - dev_err(adapter->dev, - "EventReady: failed to read reg->evt_wrptr\n"); + mwifiex_dbg(adapter, ERROR, + "EventReady: failed to read reg->evt_wrptr\n"); return -1; } - dev_dbg(adapter->dev, "info: EventReady: Initial ", - card->evtbd_rdptr, wrptr); + mwifiex_dbg(adapter, EVENT, + "info: EventReady: Initial ", + card->evtbd_rdptr, wrptr); if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr & MWIFIEX_EVTBD_MASK)) || ((wrptr & reg->evt_rollover_ind) == @@ -1705,7 +1729,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) __le16 data_len = 0; u16 evt_len; - dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr); + mwifiex_dbg(adapter, INFO, + "info: Read Index: %d\n", rdptr); skb_cmd = card->evt_buf_list[rdptr]; mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE); @@ -1721,9 +1746,10 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) len is 2 bytes followed by type which is 2 bytes */ memcpy(&data_len, skb_cmd->data, sizeof(__le16)); evt_len = le16_to_cpu(data_len); - + skb_trim(skb_cmd, evt_len); skb_pull(skb_cmd, INTF_HEADER_LEN); - dev_dbg(adapter->dev, "info: Event length: %d\n", evt_len); + mwifiex_dbg(adapter, EVENT, + "info: Event length: %d\n", evt_len); if ((evt_len > 0) && (evt_len < MAX_EVENT_SIZE)) memcpy(adapter->event_body, skb_cmd->data + @@ -1740,8 +1766,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) } else { if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_EVENT_DONE)) { - dev_warn(adapter->dev, - "Write register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); return -1; } } @@ -1766,15 +1792,16 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, return 0; if (rdptr >= MWIFIEX_MAX_EVT_BD) { - dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n", - rdptr); + mwifiex_dbg(adapter, ERROR, + "event_complete: Invalid rdptr 0x%x\n", + rdptr); return -EINVAL; } /* Read the event ring write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) { - dev_err(adapter->dev, - "event_complete: failed to read reg->evt_wrptr\n"); + mwifiex_dbg(adapter, ERROR, + "event_complete: failed to read reg->evt_wrptr\n"); return -1; } @@ -1791,9 +1818,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, desc->flags = 0; skb = NULL; } else { - dev_dbg(adapter->dev, - "info: ERROR: buf still valid at index %d, <%p, %p>\n", - rdptr, card->evt_buf_list[rdptr], skb); + mwifiex_dbg(adapter, ERROR, + "info: ERROR: buf still valid at index %d, <%p, %p>\n", + rdptr, card->evt_buf_list[rdptr], skb); } if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) { @@ -1802,18 +1829,20 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, reg->evt_rollover_ind); } - dev_dbg(adapter->dev, "info: Updated ", - card->evtbd_rdptr, wrptr); + mwifiex_dbg(adapter, EVENT, + "info: Updated ", + card->evtbd_rdptr, wrptr); /* Write the event ring read pointer in to reg->evt_rdptr */ if (mwifiex_write_reg(adapter, reg->evt_rdptr, card->evtbd_rdptr)) { - dev_err(adapter->dev, - "event_complete: failed to read reg->evt_rdptr\n"); + mwifiex_dbg(adapter, ERROR, + "event_complete: failed to read reg->evt_rdptr\n"); return -1; } - dev_dbg(adapter->dev, "info: Check Events Again\n"); + mwifiex_dbg(adapter, EVENT, + "info: Check Events Again\n"); ret = mwifiex_pcie_process_event_ready(adapter); return ret; @@ -1840,17 +1869,18 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (!firmware || !firmware_len) { - dev_err(adapter->dev, - "No firmware image found! Terminating download\n"); + mwifiex_dbg(adapter, ERROR, + "No firmware image found! Terminating download\n"); return -1; } - dev_dbg(adapter->dev, "info: Downloading FW image (%d bytes)\n", - firmware_len); + mwifiex_dbg(adapter, INFO, + "info: Downloading FW image (%d bytes)\n", + firmware_len); if (mwifiex_pcie_disable_host_int(adapter)) { - dev_err(adapter->dev, - "%s: Disabling interrupts failed.\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: Disabling interrupts failed.\n", __func__); return -1; } @@ -1872,8 +1902,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, ret = mwifiex_read_reg(adapter, reg->cmd_size, &len); if (ret) { - dev_warn(adapter->dev, - "Failed reading len from boot code\n"); + mwifiex_dbg(adapter, FATAL, + "Failed reading len from boot code\n"); goto done; } if (len) @@ -1884,8 +1914,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (!len) { break; } else if (len > MWIFIEX_UPLD_SIZE) { - pr_err("FW download failure @ %d, invalid length %d\n", - offset, len); + mwifiex_dbg(adapter, ERROR, + "FW download failure @ %d, invalid length %d\n", + offset, len); ret = -1; goto done; } @@ -1895,14 +1926,16 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (len & BIT(0)) { block_retry_cnt++; if (block_retry_cnt > MAX_WRITE_IOMEM_RETRY) { - pr_err("FW download failure @ %d, over max " - "retry count\n", offset); + mwifiex_dbg(adapter, ERROR, + "FW download failure @ %d, over max\t" + "retry count\n", offset); ret = -1; goto done; } - dev_err(adapter->dev, "FW CRC error indicated by the " - "helper: len = 0x%04X, txlen = %d\n", - len, txlen); + mwifiex_dbg(adapter, ERROR, + "FW CRC error indicated by the\t" + "helper: len = 0x%04X, txlen = %d\n", + len, txlen); len &= ~BIT(0); /* Setting this to 0 to resend from same offset */ txlen = 0; @@ -1913,7 +1946,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (firmware_len - offset < txlen) txlen = firmware_len - offset; - dev_dbg(adapter->dev, "."); + mwifiex_dbg(adapter, INFO, "."); tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) / card->pcie.blksz_fw_dl; @@ -1927,8 +1960,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, /* Send the boot command to device */ if (mwifiex_pcie_send_boot_cmd(adapter, skb)) { - dev_err(adapter->dev, - "Failed to send firmware download command\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to send firmware download command\n"); ret = -1; goto done; } @@ -1937,9 +1970,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, do { if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS, &ireg_intr)) { - dev_err(adapter->dev, "%s: Failed to read " - "interrupt status during fw dnld.\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: Failed to read\t" + "interrupt status during fw dnld.\n", + __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); ret = -1; @@ -1953,8 +1987,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, offset += txlen; } while (true); - dev_notice(adapter->dev, - "info: FW download over, size %d bytes\n", offset); + mwifiex_dbg(adapter, MSG, + "info: FW download over, size %d bytes\n", offset); ret = 0; @@ -1980,15 +2014,17 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) /* Mask spurios interrupts */ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK, HOST_INTR_MASK)) { - dev_warn(adapter->dev, "Write register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); return -1; } - dev_dbg(adapter->dev, "Setting driver ready signature\n"); + mwifiex_dbg(adapter, INFO, + "Setting driver ready signature\n"); if (mwifiex_write_reg(adapter, reg->drv_rdy, FIRMWARE_READY_PCIE)) { - dev_err(adapter->dev, - "Failed to write driver ready signature\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write driver ready signature\n"); return -1; } @@ -2015,12 +2051,13 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) &winner_status)) ret = -1; else if (!winner_status) { - dev_err(adapter->dev, "PCI-E is the winner\n"); + mwifiex_dbg(adapter, INFO, + "PCI-E is the winner\n"); adapter->winner = 1; } else { - dev_err(adapter->dev, - "PCI-E is not the winner <%#x,%d>, exit dnld\n", - ret, adapter->winner); + mwifiex_dbg(adapter, ERROR, + "PCI-E is not the winner <%#x,%d>, exit dnld\n", + ret, adapter->winner); } } @@ -2039,7 +2076,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) return; if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) { - dev_warn(adapter->dev, "Read register failed\n"); + mwifiex_dbg(adapter, ERROR, "Read register failed\n"); return; } @@ -2050,7 +2087,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) /* Clear the pending interrupts */ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS, ~pcie_ireg)) { - dev_warn(adapter->dev, "Write register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); return; } spin_lock_irqsave(&adapter->int_lock, flags); @@ -2133,21 +2171,24 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) while (pcie_ireg & HOST_INTR_MASK) { if (pcie_ireg & HOST_INTR_DNLD_DONE) { pcie_ireg &= ~HOST_INTR_DNLD_DONE; - dev_dbg(adapter->dev, "info: TX DNLD Done\n"); + mwifiex_dbg(adapter, INTR, + "info: TX DNLD Done\n"); ret = mwifiex_pcie_send_data_complete(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_UPLD_RDY) { pcie_ireg &= ~HOST_INTR_UPLD_RDY; - dev_dbg(adapter->dev, "info: Rx DATA\n"); + mwifiex_dbg(adapter, INTR, + "info: Rx DATA\n"); ret = mwifiex_pcie_process_recv_data(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_EVENT_RDY) { pcie_ireg &= ~HOST_INTR_EVENT_RDY; - dev_dbg(adapter->dev, "info: Rx EVENT\n"); + mwifiex_dbg(adapter, INTR, + "info: Rx EVENT\n"); ret = mwifiex_pcie_process_event_ready(adapter); if (ret) return ret; @@ -2156,8 +2197,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (pcie_ireg & HOST_INTR_CMD_DONE) { pcie_ireg &= ~HOST_INTR_CMD_DONE; if (adapter->cmd_sent) { - dev_dbg(adapter->dev, - "info: CMD sent Interrupt\n"); + mwifiex_dbg(adapter, INTR, + "info: CMD sent Interrupt\n"); adapter->cmd_sent = false; } /* Handle command response */ @@ -2169,8 +2210,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (mwifiex_pcie_ok_to_access_hw(adapter)) { if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) { - dev_warn(adapter->dev, - "Read register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Read register failed\n"); return -1; } @@ -2178,16 +2219,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS, ~pcie_ireg)) { - dev_warn(adapter->dev, - "Write register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); return -1; } } } } - dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n", - adapter->cmd_sent, adapter->data_sent); + mwifiex_dbg(adapter, INTR, + "info: cmd_sent=%d data_sent=%d\n", + adapter->cmd_sent, adapter->data_sent); if (adapter->ps_state != PS_STATE_SLEEP) mwifiex_pcie_enable_host_int(adapter); @@ -2209,7 +2251,8 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type, struct mwifiex_tx_param *tx_param) { if (!skb) { - dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__); + mwifiex_dbg(adapter, ERROR, + "Passed NULL skb to %s\n", __func__); return -1; } @@ -2232,7 +2275,8 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY); if (ret) { - dev_err(adapter->dev, "PCIE write err\n"); + mwifiex_dbg(adapter, ERROR, + "PCIE write err\n"); return RDWR_STATUS_FAILURE; } @@ -2243,24 +2287,25 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) if (doneflag && ctrl_data == doneflag) return RDWR_STATUS_DONE; if (ctrl_data != FW_DUMP_HOST_READY) { - dev_info(adapter->dev, - "The ctrl reg was changed, re-try again!\n"); + mwifiex_dbg(adapter, WARN, + "The ctrl reg was changed, re-try again!\n"); ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY); if (ret) { - dev_err(adapter->dev, "PCIE write err\n"); + mwifiex_dbg(adapter, ERROR, + "PCIE write err\n"); return RDWR_STATUS_FAILURE; } } usleep_range(100, 200); } - dev_err(adapter->dev, "Fail to pull ctrl_data\n"); + mwifiex_dbg(adapter, ERROR, "Fail to pull ctrl_data\n"); return RDWR_STATUS_FAILURE; } /* This function dump firmware memory to file */ -static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) +static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *creg = card->pcie.reg; @@ -2269,7 +2314,6 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) enum rdwr_status stat; u32 memory_size; int ret; - static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL }; if (!card->pcie.can_dump_fw) return; @@ -2284,12 +2328,12 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) entry->mem_size = 0; } - dev_info(adapter->dev, "== mwifiex firmware dump start ==\n"); + mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n"); /* Read the number of the memories which will dump */ stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); if (stat == RDWR_STATUS_FAILURE) - goto done; + return; reg = creg->fw_dump_start; mwifiex_read_reg_byte(adapter, reg, &dump_num); @@ -2300,7 +2344,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); if (stat == RDWR_STATUS_FAILURE) - goto done; + return; memory_size = 0; reg = creg->fw_dump_start; @@ -2311,36 +2355,36 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) } if (memory_size == 0) { - dev_info(adapter->dev, "Firmware dump Finished!\n"); + mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n"); ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl, FW_DUMP_READ_DONE); if (ret) { - dev_err(adapter->dev, "PCIE write err\n"); - goto done; + mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); + return; } break; } - dev_info(adapter->dev, - "%s_SIZE=0x%x\n", entry->mem_name, memory_size); + mwifiex_dbg(adapter, DUMP, + "%s_SIZE=0x%x\n", entry->mem_name, memory_size); entry->mem_ptr = vmalloc(memory_size + 1); entry->mem_size = memory_size; if (!entry->mem_ptr) { - dev_err(adapter->dev, - "Vmalloc %s failed\n", entry->mem_name); - goto done; + mwifiex_dbg(adapter, ERROR, + "Vmalloc %s failed\n", entry->mem_name); + return; } dbg_ptr = entry->mem_ptr; end_ptr = dbg_ptr + memory_size; doneflag = entry->done_flag; - dev_info(adapter->dev, "Start %s output, please wait...\n", - entry->mem_name); + mwifiex_dbg(adapter, DUMP, "Start %s output, please wait...\n", + entry->mem_name); do { stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); if (RDWR_STATUS_FAILURE == stat) - goto done; + return; reg_start = creg->fw_dump_start; reg_end = creg->fw_dump_end; @@ -2349,46 +2393,49 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) if (dbg_ptr < end_ptr) { dbg_ptr++; } else { - dev_err(adapter->dev, - "Allocated buf not enough\n"); - goto done; + mwifiex_dbg(adapter, ERROR, + "Allocated buf not enough\n"); + return; } } if (stat != RDWR_STATUS_DONE) continue; - dev_info(adapter->dev, "%s done: size=0x%tx\n", - entry->mem_name, dbg_ptr - entry->mem_ptr); + mwifiex_dbg(adapter, DUMP, + "%s done: size=0x%tx\n", + entry->mem_name, dbg_ptr - entry->mem_ptr); break; } while (true); } - dev_info(adapter->dev, "== mwifiex firmware dump end ==\n"); - - kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env); + mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n"); +} -done: - adapter->curr_mem_idx = 0; +static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) +{ + mwifiex_drv_info_dump(adapter); + mwifiex_pcie_fw_dump(adapter); + mwifiex_upload_device_dump(adapter); } static unsigned long iface_work_flags; static struct mwifiex_adapter *save_adapter; static void mwifiex_pcie_work(struct work_struct *work) { - if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP, + if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) - mwifiex_pcie_fw_dump_work(save_adapter); + mwifiex_pcie_device_dump_work(save_adapter); } static DECLARE_WORK(pcie_work, mwifiex_pcie_work); /* This function dumps FW information */ -static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) +static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) { save_adapter = adapter; - if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags)) + if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) return; - set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags); + set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); schedule_work(&pcie_work); } @@ -2418,45 +2465,50 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter) pci_set_master(pdev); - dev_dbg(adapter->dev, "try set_consistent_dma_mask(32)\n"); + mwifiex_dbg(adapter, INFO, + "try set_consistent_dma_mask(32)\n"); ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - dev_err(adapter->dev, "set_dma_mask(32) failed\n"); + mwifiex_dbg(adapter, ERROR, + "set_dma_mask(32) failed\n"); goto err_set_dma_mask; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - dev_err(adapter->dev, "set_consistent_dma_mask(64) failed\n"); + mwifiex_dbg(adapter, ERROR, + "set_consistent_dma_mask(64) failed\n"); goto err_set_dma_mask; } ret = pci_request_region(pdev, 0, DRV_NAME); if (ret) { - dev_err(adapter->dev, "req_reg(0) error\n"); + mwifiex_dbg(adapter, ERROR, + "req_reg(0) error\n"); goto err_req_region0; } card->pci_mmap = pci_iomap(pdev, 0, 0); if (!card->pci_mmap) { - dev_err(adapter->dev, "iomap(0) error\n"); + mwifiex_dbg(adapter, ERROR, "iomap(0) error\n"); ret = -EIO; goto err_iomap0; } ret = pci_request_region(pdev, 2, DRV_NAME); if (ret) { - dev_err(adapter->dev, "req_reg(2) error\n"); + mwifiex_dbg(adapter, ERROR, "req_reg(2) error\n"); goto err_req_region2; } card->pci_mmap1 = pci_iomap(pdev, 2, 0); if (!card->pci_mmap1) { - dev_err(adapter->dev, "iomap(2) error\n"); + mwifiex_dbg(adapter, ERROR, + "iomap(2) error\n"); ret = -EIO; goto err_iomap2; } - dev_dbg(adapter->dev, - "PCI memory map Virt0: %p PCI memory map Virt2: %p\n", - card->pci_mmap, card->pci_mmap1); + mwifiex_dbg(adapter, INFO, + "PCI memory map Virt0: %p PCI memory map Virt2: %p\n", + card->pci_mmap, card->pci_mmap1); card->cmdrsp_buf = NULL; ret = mwifiex_pcie_create_txbd_ring(adapter); @@ -2521,10 +2573,11 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (user_rmmod) { - dev_dbg(adapter->dev, "Clearing driver ready signature\n"); + mwifiex_dbg(adapter, INFO, + "Clearing driver ready signature\n"); if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) - dev_err(adapter->dev, - "Failed to write driver not-ready signature\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write driver not-ready signature\n"); } if (pdev) { @@ -2555,7 +2608,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED, "MRVL_PCIE", pdev); if (ret) { - pr_err("request_irq failed: ret=%d\n", ret); + mwifiex_dbg(adapter, ERROR, + "request_irq failed: ret=%d\n", ret); adapter->card = NULL; return -1; } @@ -2582,7 +2636,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) const struct mwifiex_pcie_card_reg *reg; if (card) { - dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__); + mwifiex_dbg(adapter, INFO, + "%s(): calling free_irq()\n", __func__); free_irq(card->dev->irq, card->dev); reg = card->pcie.reg; @@ -2617,7 +2672,7 @@ static struct mwifiex_if_ops pcie_ops = { .cleanup_mpa_buf = NULL, .init_fw_port = mwifiex_pcie_init_fw_port, .clean_pcie_ring = mwifiex_clean_pcie_ring_buf, - .fw_dump = mwifiex_pcie_fw_dump, + .device_dump = mwifiex_pcie_device_dump, }; /* diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 0ffdb7c5afd2..baf9715ddc10 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -241,20 +241,21 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv, * LinkSys WRT54G && bss_desc->privacy */ ) { - dev_dbg(priv->adapter->dev, "info: %s: WPA:" - " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s " - "EncMode=%#x privacy=%#x\n", __func__, - (bss_desc->bcn_wpa_ie) ? - (*(bss_desc->bcn_wpa_ie)). - vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*(bss_desc->bcn_rsn_ie)). - ieee_hdr.element_id : 0, - (priv->sec_info.wep_enabled) ? "e" : "d", - (priv->sec_info.wpa_enabled) ? "e" : "d", - (priv->sec_info.wpa2_enabled) ? "e" : "d", - priv->sec_info.encryption_mode, - bss_desc->privacy); + mwifiex_dbg(priv->adapter, INFO, + "info: %s: WPA:\t" + "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t" + "EncMode=%#x privacy=%#x\n", __func__, + (bss_desc->bcn_wpa_ie) ? + (*bss_desc->bcn_wpa_ie). + vend_hdr.element_id : 0, + (bss_desc->bcn_rsn_ie) ? + (*bss_desc->bcn_rsn_ie). + ieee_hdr.element_id : 0, + (priv->sec_info.wep_enabled) ? "e" : "d", + (priv->sec_info.wpa_enabled) ? "e" : "d", + (priv->sec_info.wpa2_enabled) ? "e" : "d", + priv->sec_info.encryption_mode, + bss_desc->privacy); return true; } return false; @@ -277,20 +278,21 @@ mwifiex_is_bss_wpa2(struct mwifiex_private *priv, * Privacy bit may NOT be set in some APs like * LinkSys WRT54G && bss_desc->privacy */ - dev_dbg(priv->adapter->dev, "info: %s: WPA2: " - " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s " - "EncMode=%#x privacy=%#x\n", __func__, - (bss_desc->bcn_wpa_ie) ? - (*(bss_desc->bcn_wpa_ie)). - vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*(bss_desc->bcn_rsn_ie)). - ieee_hdr.element_id : 0, - (priv->sec_info.wep_enabled) ? "e" : "d", - (priv->sec_info.wpa_enabled) ? "e" : "d", - (priv->sec_info.wpa2_enabled) ? "e" : "d", - priv->sec_info.encryption_mode, - bss_desc->privacy); + mwifiex_dbg(priv->adapter, INFO, + "info: %s: WPA2:\t" + "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t" + "EncMode=%#x privacy=%#x\n", __func__, + (bss_desc->bcn_wpa_ie) ? + (*bss_desc->bcn_wpa_ie). + vend_hdr.element_id : 0, + (bss_desc->bcn_rsn_ie) ? + (*bss_desc->bcn_rsn_ie). + ieee_hdr.element_id : 0, + (priv->sec_info.wep_enabled) ? "e" : "d", + (priv->sec_info.wpa_enabled) ? "e" : "d", + (priv->sec_info.wpa2_enabled) ? "e" : "d", + priv->sec_info.encryption_mode, + bss_desc->privacy); return true; } return false; @@ -333,18 +335,19 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv, ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) && priv->sec_info.encryption_mode && bss_desc->privacy) { - dev_dbg(priv->adapter->dev, "info: %s: dynamic " - "WEP: wpa_ie=%#x wpa2_ie=%#x " - "EncMode=%#x privacy=%#x\n", - __func__, - (bss_desc->bcn_wpa_ie) ? - (*(bss_desc->bcn_wpa_ie)). - vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*(bss_desc->bcn_rsn_ie)). - ieee_hdr.element_id : 0, - priv->sec_info.encryption_mode, - bss_desc->privacy); + mwifiex_dbg(priv->adapter, INFO, + "info: %s: dynamic\t" + "WEP: wpa_ie=%#x wpa2_ie=%#x\t" + "EncMode=%#x privacy=%#x\n", + __func__, + (bss_desc->bcn_wpa_ie) ? + (*bss_desc->bcn_wpa_ie). + vend_hdr.element_id : 0, + (bss_desc->bcn_rsn_ie) ? + (*bss_desc->bcn_rsn_ie). + ieee_hdr.element_id : 0, + priv->sec_info.encryption_mode, + bss_desc->privacy); return true; } return false; @@ -383,19 +386,20 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, return 0; if (priv->wps.session_enable) { - dev_dbg(adapter->dev, - "info: return success directly in WPS period\n"); + mwifiex_dbg(adapter, IOCTL, + "info: return success directly in WPS period\n"); return 0; } if (bss_desc->chan_sw_ie_present) { - dev_err(adapter->dev, - "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n"); + mwifiex_dbg(adapter, INFO, + "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n"); return -1; } if (mwifiex_is_bss_wapi(priv, bss_desc)) { - dev_dbg(adapter->dev, "info: return success for WAPI AP\n"); + mwifiex_dbg(adapter, INFO, + "info: return success for WAPI AP\n"); return 0; } @@ -405,7 +409,8 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, return 0; } else if (mwifiex_is_bss_static_wep(priv, bss_desc)) { /* Static WEP enabled */ - dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n"); + mwifiex_dbg(adapter, INFO, + "info: Disable 11n in WEP mode.\n"); bss_desc->disable_11n = true; return 0; } else if (mwifiex_is_bss_wpa(priv, bss_desc)) { @@ -418,9 +423,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, if (mwifiex_is_wpa_oui_present (bss_desc, CIPHER_SUITE_TKIP)) { - dev_dbg(adapter->dev, - "info: Disable 11n if AES " - "is not supported by AP\n"); + mwifiex_dbg(adapter, INFO, + "info: Disable 11n if AES\t" + "is not supported by AP\n"); bss_desc->disable_11n = true; } else { return -1; @@ -437,9 +442,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, if (mwifiex_is_rsn_oui_present (bss_desc, CIPHER_SUITE_TKIP)) { - dev_dbg(adapter->dev, - "info: Disable 11n if AES " - "is not supported by AP\n"); + mwifiex_dbg(adapter, INFO, + "info: Disable 11n if AES\t" + "is not supported by AP\n"); bss_desc->disable_11n = true; } else { return -1; @@ -455,17 +460,18 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, } /* Security doesn't match */ - dev_dbg(adapter->dev, - "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s " - "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", __func__, - (bss_desc->bcn_wpa_ie) ? - (*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id : 0, - (priv->sec_info.wep_enabled) ? "e" : "d", - (priv->sec_info.wpa_enabled) ? "e" : "d", - (priv->sec_info.wpa2_enabled) ? "e" : "d", - priv->sec_info.encryption_mode, bss_desc->privacy); + mwifiex_dbg(adapter, ERROR, + "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t" + "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", + __func__, + (bss_desc->bcn_wpa_ie) ? + (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0, + (bss_desc->bcn_rsn_ie) ? + (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0, + (priv->sec_info.wep_enabled) ? "e" : "d", + (priv->sec_info.wpa_enabled) ? "e" : "d", + (priv->sec_info.wpa2_enabled) ? "e" : "d", + priv->sec_info.encryption_mode, bss_desc->privacy); return -1; } @@ -560,7 +566,8 @@ mwifiex_append_rate_tlv(struct mwifiex_private *priv, else rates_size = mwifiex_get_supported_rates(priv, rates); - dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n", + mwifiex_dbg(priv->adapter, CMD, + "info: SCAN_CMD: Rates size = %d\n", rates_size); rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos; rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES); @@ -600,9 +607,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, u8 radio_type; if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) { - dev_dbg(priv->adapter->dev, - "info: Scan: Null detect: %p, %p, %p\n", - scan_cfg_out, chan_tlv_out, scan_chan_list); + mwifiex_dbg(priv->adapter, ERROR, + "info: Scan: Null detect: %p, %p, %p\n", + scan_cfg_out, chan_tlv_out, scan_chan_list); return -1; } @@ -645,16 +652,16 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, } radio_type = tmp_chan_list->radio_type; - dev_dbg(priv->adapter->dev, - "info: Scan: Chan(%3d), Radio(%d)," - " Mode(%d, %d), Dur(%d)\n", - tmp_chan_list->chan_number, - tmp_chan_list->radio_type, - tmp_chan_list->chan_scan_mode_bitmap - & MWIFIEX_PASSIVE_SCAN, - (tmp_chan_list->chan_scan_mode_bitmap - & MWIFIEX_DISABLE_CHAN_FILT) >> 1, - le16_to_cpu(tmp_chan_list->max_scan_time)); + mwifiex_dbg(priv->adapter, INFO, + "info: Scan: Chan(%3d), Radio(%d),\t" + "Mode(%d, %d), Dur(%d)\n", + tmp_chan_list->chan_number, + tmp_chan_list->radio_type, + tmp_chan_list->chan_scan_mode_bitmap + & MWIFIEX_PASSIVE_SCAN, + (tmp_chan_list->chan_scan_mode_bitmap + & MWIFIEX_DISABLE_CHAN_FILT) >> 1, + le16_to_cpu(tmp_chan_list->max_scan_time)); /* Copy the current channel TLV to the command being prepared */ @@ -718,9 +725,11 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, /* The total scan time should be less than scan command timeout value */ if (total_scan_time > MWIFIEX_MAX_TOTAL_SCAN_TIME) { - dev_err(priv->adapter->dev, "total scan time %dms" - " is over limit (%dms), scan skipped\n", - total_scan_time, MWIFIEX_MAX_TOTAL_SCAN_TIME); + mwifiex_dbg(priv->adapter, ERROR, + "total scan time %dms\t" + "is over limit (%dms), scan skipped\n", + total_scan_time, + MWIFIEX_MAX_TOTAL_SCAN_TIME); ret = -1; break; } @@ -905,9 +914,10 @@ mwifiex_config_scan(struct mwifiex_private *priv, tlv_pos += (sizeof(wildcard_ssid_tlv->header) + le16_to_cpu(wildcard_ssid_tlv->header.len)); - dev_dbg(adapter->dev, "info: scan: ssid[%d]: %s, %d\n", - i, wildcard_ssid_tlv->ssid, - wildcard_ssid_tlv->max_ssid_length); + mwifiex_dbg(adapter, INFO, + "info: scan: ssid[%d]: %s, %d\n", + i, wildcard_ssid_tlv->ssid, + wildcard_ssid_tlv->max_ssid_length); /* Empty wildcard ssid with a maxlen will match many or potentially all SSIDs (maxlen == 32), therefore do @@ -928,8 +938,9 @@ mwifiex_config_scan(struct mwifiex_private *priv, *filtered_scan = true; if (user_scan_in->scan_chan_gap) { - dev_dbg(adapter->dev, "info: scan: channel gap = %d\n", - user_scan_in->scan_chan_gap); + mwifiex_dbg(adapter, INFO, + "info: scan: channel gap = %d\n", + user_scan_in->scan_chan_gap); *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN; @@ -961,8 +972,9 @@ mwifiex_config_scan(struct mwifiex_private *priv, add tlv */ if (num_probes) { - dev_dbg(adapter->dev, "info: scan: num_probes = %d\n", - num_probes); + mwifiex_dbg(adapter, INFO, + "info: scan: num_probes = %d\n", + num_probes); num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos; num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES); @@ -1003,7 +1015,8 @@ mwifiex_config_scan(struct mwifiex_private *priv, if (user_scan_in && user_scan_in->chan_list[0].chan_number) { - dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n"); + mwifiex_dbg(adapter, INFO, + "info: Scan: Using supplied channel list\n"); for (chan_idx = 0; chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX && @@ -1056,13 +1069,13 @@ mwifiex_config_scan(struct mwifiex_private *priv, (user_scan_in->chan_list[0].chan_number == priv->curr_bss_params.bss_descriptor.channel)) { *scan_current_only = true; - dev_dbg(adapter->dev, - "info: Scan: Scanning current channel only\n"); + mwifiex_dbg(adapter, INFO, + "info: Scan: Scanning current channel only\n"); } chan_num = chan_idx; } else { - dev_dbg(adapter->dev, - "info: Scan: Creating full region channel list\n"); + mwifiex_dbg(adapter, INFO, + "info: Scan: Creating full region channel list\n"); chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in, scan_chan_list, *filtered_scan); @@ -1094,8 +1107,9 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter, tlv_buf_left = tlv_buf_size; *tlv_data = NULL; - dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n", - tlv_buf_size); + mwifiex_dbg(adapter, INFO, + "info: SCAN_RESP: tlv_buf_size = %d\n", + tlv_buf_size); while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) { @@ -1103,26 +1117,31 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter, tlv_len = le16_to_cpu(current_tlv->header.len); if (sizeof(tlv->header) + tlv_len > tlv_buf_left) { - dev_err(adapter->dev, "SCAN_RESP: TLV buffer corrupt\n"); + mwifiex_dbg(adapter, ERROR, + "SCAN_RESP: TLV buffer corrupt\n"); break; } if (req_tlv_type == tlv_type) { switch (tlv_type) { case TLV_TYPE_TSFTIMESTAMP: - dev_dbg(adapter->dev, "info: SCAN_RESP: TSF " - "timestamp TLV, len = %d\n", tlv_len); + mwifiex_dbg(adapter, INFO, + "info: SCAN_RESP: TSF\t" + "timestamp TLV, len = %d\n", + tlv_len); *tlv_data = current_tlv; break; case TLV_TYPE_CHANNELBANDLIST: - dev_dbg(adapter->dev, "info: SCAN_RESP: channel" - " band list TLV, len = %d\n", tlv_len); + mwifiex_dbg(adapter, INFO, + "info: SCAN_RESP: channel\t" + "band list TLV, len = %d\n", + tlv_len); *tlv_data = current_tlv; break; default: - dev_err(adapter->dev, - "SCAN_RESP: unhandled TLV = %d\n", - tlv_type); + mwifiex_dbg(adapter, ERROR, + "SCAN_RESP: unhandled TLV = %d\n", + tlv_type); /* Give up, this seems corrupted */ return; } @@ -1177,8 +1196,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, total_ie_len = element_len + sizeof(struct ieee_types_header); if (bytes_left < total_ie_len) { - dev_err(adapter->dev, "err: InterpretIE: in processing" - " IE, bytes left < IE length\n"); + mwifiex_dbg(adapter, ERROR, + "err: InterpretIE: in processing\t" + "IE, bytes left < IE length\n"); return -1; } switch (element_id) { @@ -1186,9 +1206,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, bss_entry->ssid.ssid_len = element_len; memcpy(bss_entry->ssid.ssid, (current_ptr + 2), element_len); - dev_dbg(adapter->dev, - "info: InterpretIE: ssid: %-32s\n", - bss_entry->ssid.ssid); + mwifiex_dbg(adapter, INFO, + "info: InterpretIE: ssid: %-32s\n", + bss_entry->ssid.ssid); break; case WLAN_EID_SUPP_RATES: @@ -1419,19 +1439,20 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, unsigned long flags; if (adapter->scan_processing) { - dev_err(adapter->dev, "cmd: Scan already in process...\n"); + mwifiex_dbg(adapter, WARN, + "cmd: Scan already in process...\n"); return -EBUSY; } if (priv->scan_block) { - dev_err(adapter->dev, - "cmd: Scan is blocked during association...\n"); + mwifiex_dbg(adapter, WARN, + "cmd: Scan is blocked during association...\n"); return -EBUSY; } if (adapter->surprise_removed || adapter->is_cmd_timedout) { - dev_err(adapter->dev, - "Ignore scan. Card removed or firmware in bad state\n"); + mwifiex_dbg(adapter, ERROR, + "Ignore scan. Card removed or firmware in bad state\n"); return -EFAULT; } @@ -1478,7 +1499,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, /* Perform internal scan synchronously */ if (!priv->scan_request) { - dev_dbg(adapter->dev, "wait internal scan\n"); + mwifiex_dbg(adapter, INFO, + "wait internal scan\n"); mwifiex_wait_queue_complete(adapter, cmd_node); } } else { @@ -1553,8 +1575,8 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv, ret = mwifiex_is_network_compatible(priv, bss_desc, priv->bss_mode); if (ret) - dev_err(priv->adapter->dev, - "Incompatible network settings\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Incompatible network settings\n"); break; default: ret = 0; @@ -1656,7 +1678,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, */ if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) + sizeof(struct mwifiex_fixed_bcn_param)) { - dev_err(adapter->dev, "InterpretIE: not enough bytes left\n"); + mwifiex_dbg(adapter, ERROR, + "InterpretIE: not enough bytes left\n"); return -EFAULT; } @@ -1669,7 +1692,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, rssi = (-rssi) * 100; /* Convert dBm to mBm */ current_ptr += sizeof(u8); curr_bcn_bytes -= sizeof(u8); - dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi); + mwifiex_dbg(adapter, INFO, + "info: InterpretIE: RSSI=%d\n", rssi); } else { rssi = rssi_val; } @@ -1682,14 +1706,16 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, beacon_period = le16_to_cpu(bcn_param->beacon_period); cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap); - dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n", - cap_info_bitmap); + mwifiex_dbg(adapter, INFO, + "info: InterpretIE: capabilities=0x%X\n", + cap_info_bitmap); /* Rest of the current buffer are IE's */ ie_buf = current_ptr; ie_len = curr_bcn_bytes; - dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n", - curr_bcn_bytes); + mwifiex_dbg(adapter, INFO, + "info: InterpretIE: IELength for this AP = %d\n", + curr_bcn_bytes); while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) { u8 element_id, element_len; @@ -1698,8 +1724,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, element_len = *(current_ptr + 1); if (curr_bcn_bytes < element_len + sizeof(struct ieee_types_header)) { - dev_err(adapter->dev, - "%s: bytes left < IE length\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: bytes left < IE length\n", __func__); return -EFAULT; } if (element_id == WLAN_EID_DS_PARAMS) { @@ -1719,8 +1745,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, /* Skip entry if on csa closed channel */ if (channel == priv->csa_chan) { - dev_dbg(adapter->dev, - "Dropping entry on csa closed channel\n"); + mwifiex_dbg(adapter, WARN, + "Dropping entry on csa closed channel\n"); return 0; } @@ -1751,7 +1777,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, cfg80211_put_bss(priv->wdev.wiphy, bss); } } else { - dev_dbg(adapter->dev, "missing BSS channel IE\n"); + mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n"); } return 0; @@ -1765,7 +1791,8 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv) if (adapter->curr_cmd->wait_q_enabled) { adapter->cmd_wait_q.status = 0; if (!priv->scan_request) { - dev_dbg(adapter->dev, "complete internal scan\n"); + mwifiex_dbg(adapter, INFO, + "complete internal scan\n"); mwifiex_complete_cmd(adapter, adapter->curr_cmd); } } @@ -1788,12 +1815,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) mwifiex_complete_scan(priv); if (priv->scan_request) { - dev_dbg(adapter->dev, "info: notifying scan done\n"); + mwifiex_dbg(adapter, INFO, + "info: notifying scan done\n"); cfg80211_scan_done(priv->scan_request, 0); priv->scan_request = NULL; } else { priv->scan_aborting = false; - dev_dbg(adapter->dev, "info: scan already aborted\n"); + mwifiex_dbg(adapter, INFO, + "info: scan already aborted\n"); } } else if ((priv->scan_aborting && !priv->scan_request) || priv->scan_block) { @@ -1809,12 +1838,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); if (priv->scan_request) { - dev_dbg(adapter->dev, "info: aborting scan\n"); + mwifiex_dbg(adapter, INFO, + "info: aborting scan\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; } else { priv->scan_aborting = false; - dev_dbg(adapter->dev, "info: scan already aborted\n"); + mwifiex_dbg(adapter, INFO, + "info: scan already aborted\n"); } } else { /* Get scan command from scan_pending_q and put to @@ -1877,8 +1908,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, if (scan_rsp->number_of_sets > MWIFIEX_MAX_AP) { - dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", - scan_rsp->number_of_sets); + mwifiex_dbg(adapter, ERROR, + "SCAN_RESP: too many AP returned (%d)\n", + scan_rsp->number_of_sets); ret = -1; goto check_next_scan; } @@ -1887,14 +1919,15 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, mwifiex_11h_get_csa_closed_channel(priv); bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); - dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n", - bytes_left); + mwifiex_dbg(adapter, INFO, + "info: SCAN_RESP: bss_descript_size %d\n", + bytes_left); scan_resp_size = le16_to_cpu(resp->size); - dev_dbg(adapter->dev, - "info: SCAN_RESP: returned %d APs before parsing\n", - scan_rsp->number_of_sets); + mwifiex_dbg(adapter, INFO, + "info: SCAN_RESP: returned %d APs before parsing\n", + scan_rsp->number_of_sets); bss_info = scan_rsp->bss_desc_and_tlv_buffer; @@ -2007,13 +2040,13 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv, le16_to_cpu(fw_chan_stats->cca_scan_dur); chan_stats.cca_busy_dur = le16_to_cpu(fw_chan_stats->cca_busy_dur); - dev_dbg(adapter->dev, - "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n", - chan_stats.chan_num, - chan_stats.noise, - chan_stats.total_bss, - chan_stats.cca_scan_dur, - chan_stats.cca_busy_dur); + mwifiex_dbg(adapter, INFO, + "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n", + chan_stats.chan_num, + chan_stats.noise, + chan_stats.total_bss, + chan_stats.cca_scan_dur, + chan_stats.cca_busy_dur); memcpy(&adapter->chan_stats[adapter->survey_idx++], &chan_stats, sizeof(struct mwifiex_chan_stats)); fw_chan_stats++; @@ -2035,7 +2068,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, unsigned long cmd_flags, scan_flags; bool complete_scan = false; - dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n"); + mwifiex_dbg(adapter, INFO, "info: EXT scan returns successfully\n"); ext_scan_resp = &resp->params.ext_scan; @@ -2048,8 +2081,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, len = le16_to_cpu(tlv->len); if (buf_left < (sizeof(struct mwifiex_ie_types_header) + len)) { - dev_err(adapter->dev, - "error processing scan response TLVs"); + mwifiex_dbg(adapter, ERROR, + "error processing scan response TLVs"); break; } @@ -2075,8 +2108,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, cmd_ptr = (void *)cmd_node->cmd_skb->data; if (le16_to_cpu(cmd_ptr->command) == HostCmd_CMD_802_11_SCAN_EXT) { - dev_dbg(priv->adapter->dev, - "Scan pending in command pending list"); + mwifiex_dbg(adapter, INFO, + "Scan pending in command pending list"); complete_scan = false; break; } @@ -2114,17 +2147,20 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, u16 scan_resp_size = le16_to_cpu(event_scan->buf_size); if (num_of_set > MWIFIEX_MAX_AP) { - dev_err(adapter->dev, - "EXT_SCAN: Invalid number of AP returned (%d)!!\n", - num_of_set); + mwifiex_dbg(adapter, ERROR, + "EXT_SCAN: Invalid number of AP returned (%d)!!\n", + num_of_set); ret = -1; goto check_next_scan; } bytes_left = scan_resp_size; - dev_dbg(adapter->dev, - "EXT_SCAN: size %d, returned %d APs...", - scan_resp_size, num_of_set); + mwifiex_dbg(adapter, INFO, + "EXT_SCAN: size %d, returned %d APs...", + scan_resp_size, num_of_set); + mwifiex_dbg_dump(adapter, CMD_D, "EXT_SCAN buffer:", buf, + scan_resp_size + + sizeof(struct mwifiex_event_scan_result)); tlv = (struct mwifiex_ie_types_data *)scan_resp; @@ -2132,7 +2168,8 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, type = le16_to_cpu(tlv->header.type); len = le16_to_cpu(tlv->header.len); if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) { - dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n"); + mwifiex_dbg(adapter, ERROR, + "EXT_SCAN: Error bytes left < TLV length\n"); break; } scan_rsp_tlv = NULL; @@ -2158,8 +2195,9 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, len = le16_to_cpu(tlv->header.len); if (bytes_left_for_tlv < sizeof(struct mwifiex_ie_types_header) + len) { - dev_err(adapter->dev, - "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n"); + mwifiex_dbg(adapter, ERROR, + "EXT_SCAN: Error in processing TLV,\t" + "bytes left < TLV length\n"); scan_rsp_tlv = NULL; bytes_left_for_tlv = 0; continue; @@ -2199,8 +2237,8 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, if (scan_info_tlv) { rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi)); rssi *= 100; /* Convert dBm to mBm */ - dev_dbg(adapter->dev, - "info: InterpretIE: RSSI=%d\n", rssi); + mwifiex_dbg(adapter, INFO, + "info: InterpretIE: RSSI=%d\n", rssi); fw_tsf = le64_to_cpu(scan_info_tlv->tsf); radio_type = &scan_info_tlv->radio_type; } else { @@ -2271,13 +2309,14 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv, struct mwifiex_user_scan_cfg *scan_cfg; if (adapter->scan_processing) { - dev_err(adapter->dev, "cmd: Scan already in process...\n"); + mwifiex_dbg(adapter, WARN, + "cmd: Scan already in process...\n"); return -EBUSY; } if (priv->scan_block) { - dev_err(adapter->dev, - "cmd: Scan is blocked during association...\n"); + mwifiex_dbg(adapter, WARN, + "cmd: Scan is blocked during association...\n"); return -EBUSY; } @@ -2309,8 +2348,9 @@ int mwifiex_request_scan(struct mwifiex_private *priv, int ret; if (down_interruptible(&priv->async_sem)) { - dev_err(priv->adapter->dev, "%s: acquire semaphore\n", - __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: acquire semaphore fail\n", + __func__); return -1; } @@ -2400,8 +2440,9 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv) memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf, curr_bss->beacon_buf_size); - dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n", - priv->curr_bcn_size); + mwifiex_dbg(priv->adapter, INFO, + "info: current beacon saved %d\n", + priv->curr_bcn_size); curr_bss->beacon_buf = priv->curr_bcn_buf; diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index d10320f89bc1..a0b121f3460c 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -166,7 +166,8 @@ static int mwifiex_sdio_resume(struct device *dev) adapter = card->adapter; if (!adapter->is_suspended) { - dev_warn(adapter->dev, "device already resumed\n"); + mwifiex_dbg(adapter, WARN, + "device already resumed\n"); return 0; } @@ -191,8 +192,6 @@ mwifiex_sdio_remove(struct sdio_func *func) struct mwifiex_adapter *adapter; struct mwifiex_private *priv; - pr_debug("info: SDIO func num=%d\n", func->num); - card = sdio_get_drvdata(func); if (!card) return; @@ -201,6 +200,8 @@ mwifiex_sdio_remove(struct sdio_func *func) if (!adapter || !adapter->priv_num) return; + mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); + if (user_rmmod) { if (adapter->is_suspended) mwifiex_sdio_resume(adapter->dev); @@ -257,12 +258,14 @@ static int mwifiex_sdio_suspend(struct device *dev) /* Enable the Host Sleep */ if (!mwifiex_enable_hs(adapter)) { - dev_err(adapter->dev, "cmd: failed to suspend\n"); + mwifiex_dbg(adapter, ERROR, + "cmd: failed to suspend\n"); adapter->hs_enabling = false; return -EFAULT; } - dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n"); + mwifiex_dbg(adapter, INFO, + "cmd: suspend with MMC_PM_KEEP_POWER\n"); ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); /* Indicate device suspended */ @@ -386,8 +389,8 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); if (adapter->is_suspended) { - dev_err(adapter->dev, - "%s: not allowed while suspended\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: not allowed while suspended\n", __func__); return -1; } @@ -434,7 +437,8 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer, */ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) { - dev_dbg(adapter->dev, "event: wakeup device...\n"); + mwifiex_dbg(adapter, EVENT, + "event: wakeup device...\n"); return mwifiex_write_reg(adapter, CONFIGURATION_REG, HOST_POWER_UP); } @@ -446,7 +450,8 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) */ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) { - dev_dbg(adapter->dev, "cmd: wakeup device completed\n"); + mwifiex_dbg(adapter, EVENT, + "cmd: wakeup device completed\n"); return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0); } @@ -524,7 +529,8 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter) else return -1; cont: - pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport); + mwifiex_dbg(adapter, INFO, + "info: SDIO FUNC1 IO port: %#x\n", adapter->ioport); /* Set Host interrupt reset to read to clear */ if (!mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, ®)) @@ -556,10 +562,12 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter, ret = mwifiex_write_data_sync(adapter, payload, pkt_len, port); if (ret) { i++; - dev_err(adapter->dev, "host_to_card, write iomem" - " (%d) failed: %d\n", i, ret); + mwifiex_dbg(adapter, ERROR, + "host_to_card, write iomem\t" + "(%d) failed: %d\n", i, ret); if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04)) - dev_err(adapter->dev, "write CFG reg failed\n"); + mwifiex_dbg(adapter, ERROR, + "write CFG reg failed\n"); ret = -1; if (i > MAX_WRITE_IOMEM_RETRY) @@ -584,7 +592,8 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) const struct mwifiex_sdio_card_reg *reg = card->reg; u32 rd_bitmap = card->mp_rd_bitmap; - dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap); + mwifiex_dbg(adapter, DATA, + "data: mp_rd_bitmap=0x%08x\n", rd_bitmap); if (card->supports_sdio_new_mode) { if (!(rd_bitmap & reg->data_port_mask)) @@ -598,8 +607,9 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) (card->mp_rd_bitmap & CTRL_PORT_MASK)) { card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK); *port = CTRL_PORT; - dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n", - *port, card->mp_rd_bitmap); + mwifiex_dbg(adapter, DATA, + "data: port=%d mp_rd_bitmap=0x%08x\n", + *port, card->mp_rd_bitmap); return 0; } @@ -613,9 +623,9 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) if (++card->curr_rd_port == card->max_ports) card->curr_rd_port = reg->start_rd_port; - dev_dbg(adapter->dev, - "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n", - *port, rd_bitmap, card->mp_rd_bitmap); + mwifiex_dbg(adapter, DATA, + "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n", + *port, rd_bitmap, card->mp_rd_bitmap); return 0; } @@ -633,7 +643,8 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port) const struct mwifiex_sdio_card_reg *reg = card->reg; u32 wr_bitmap = card->mp_wr_bitmap; - dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap); + mwifiex_dbg(adapter, DATA, + "data: mp_wr_bitmap=0x%08x\n", wr_bitmap); if (!(wr_bitmap & card->mp_data_port_mask)) { adapter->data_sent = true; @@ -651,15 +662,16 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port) } if ((card->has_control_mask) && (*port == CTRL_PORT)) { - dev_err(adapter->dev, - "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", - *port, card->curr_wr_port, wr_bitmap, - card->mp_wr_bitmap); + mwifiex_dbg(adapter, ERROR, + "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", + *port, card->curr_wr_port, wr_bitmap, + card->mp_wr_bitmap); return -1; } - dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", - *port, wr_bitmap, card->mp_wr_bitmap); + mwifiex_dbg(adapter, DATA, + "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", + *port, wr_bitmap, card->mp_wr_bitmap); return 0; } @@ -683,7 +695,8 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits) usleep_range(10, 20); } - dev_err(adapter->dev, "poll card status failed, tries = %d\n", tries); + mwifiex_dbg(adapter, ERROR, + "poll card status failed, tries = %d\n", tries); return -1; } @@ -738,7 +751,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) if (mwifiex_read_data_sync(adapter, card->mp_regs, card->reg->max_mp_regs, REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) { - dev_err(adapter->dev, "read mp_regs failed\n"); + mwifiex_dbg(adapter, ERROR, "read mp_regs failed\n"); return; } @@ -751,7 +764,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) * UP_LD_CMD_PORT_HOST_INT_STATUS * Clear the interrupt status register */ - dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg); + mwifiex_dbg(adapter, INTR, + "int: sdio_ireg = %#x\n", sdio_ireg); spin_lock_irqsave(&adapter->int_lock, flags); adapter->int_status |= sdio_ireg; spin_unlock_irqrestore(&adapter->int_lock, flags); @@ -802,7 +816,8 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) /* Request the SDIO IRQ */ ret = sdio_claim_irq(func, mwifiex_sdio_interrupt); if (ret) { - dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret); + mwifiex_dbg(adapter, ERROR, + "claim irq failed: ret=%d\n", ret); goto out; } @@ -810,7 +825,8 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) ret = mwifiex_write_reg_locked(func, card->reg->host_int_mask_reg, card->reg->host_int_enable); if (ret) { - dev_err(adapter->dev, "enable host interrupt failed\n"); + mwifiex_dbg(adapter, ERROR, + "enable host interrupt failed\n"); sdio_release_irq(func); } @@ -830,22 +846,25 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter, u32 nb; if (!buffer) { - dev_err(adapter->dev, "%s: buffer is NULL\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: buffer is NULL\n", __func__); return -1; } ret = mwifiex_read_data_sync(adapter, buffer, npayload, ioport, 1); if (ret) { - dev_err(adapter->dev, "%s: read iomem failed: %d\n", __func__, + mwifiex_dbg(adapter, ERROR, + "%s: read iomem failed: %d\n", __func__, ret); return -1; } nb = le16_to_cpu(*(__le16 *) (buffer)); if (nb > npayload) { - dev_err(adapter->dev, "%s: invalid packet, nb=%d npayload=%d\n", - __func__, nb, npayload); + mwifiex_dbg(adapter, ERROR, + "%s: invalid packet, nb=%d npayload=%d\n", + __func__, nb, npayload); return -1; } @@ -877,13 +896,14 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, u32 i = 0; if (!firmware_len) { - dev_err(adapter->dev, - "firmware image not found! Terminating download\n"); + mwifiex_dbg(adapter, ERROR, + "firmware image not found! Terminating download\n"); return -1; } - dev_dbg(adapter->dev, "info: downloading FW image (%d bytes)\n", - firmware_len); + mwifiex_dbg(adapter, INFO, + "info: downloading FW image (%d bytes)\n", + firmware_len); /* Assume that the allocated buffer is 8-byte aligned */ fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL); @@ -897,8 +917,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, ret = mwifiex_sdio_poll_card_status(adapter, CARD_IO_READY | DN_LD_CARD_RDY); if (ret) { - dev_err(adapter->dev, "FW download with helper:" - " poll status timeout @ %d\n", offset); + mwifiex_dbg(adapter, ERROR, + "FW download with helper:\t" + "poll status timeout @ %d\n", offset); goto done; } @@ -910,19 +931,19 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, ret = mwifiex_read_reg(adapter, reg->base_0_reg, &base0); if (ret) { - dev_err(adapter->dev, - "dev BASE0 register read failed: " - "base0=%#04X(%d). Terminating dnld\n", - base0, base0); + mwifiex_dbg(adapter, ERROR, + "dev BASE0 register read failed:\t" + "base0=%#04X(%d). Terminating dnld\n", + base0, base0); goto done; } ret = mwifiex_read_reg(adapter, reg->base_1_reg, &base1); if (ret) { - dev_err(adapter->dev, - "dev BASE1 register read failed: " - "base1=%#04X(%d). Terminating dnld\n", - base1, base1); + mwifiex_dbg(adapter, ERROR, + "dev BASE1 register read failed:\t" + "base1=%#04X(%d). Terminating dnld\n", + base1, base1); goto done; } len = (u16) (((base1 & 0xff) << 8) | (base0 & 0xff)); @@ -936,9 +957,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (!len) { break; } else if (len > MWIFIEX_UPLD_SIZE) { - dev_err(adapter->dev, - "FW dnld failed @ %d, invalid length %d\n", - offset, len); + mwifiex_dbg(adapter, ERROR, + "FW dnld failed @ %d, invalid length %d\n", + offset, len); ret = -1; goto done; } @@ -948,14 +969,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (len & BIT(0)) { i++; if (i > MAX_WRITE_IOMEM_RETRY) { - dev_err(adapter->dev, - "FW dnld failed @ %d, over max retry\n", - offset); + mwifiex_dbg(adapter, ERROR, + "FW dnld failed @ %d, over max retry\n", + offset); ret = -1; goto done; } - dev_err(adapter->dev, "CRC indicated by the helper:" - " len = 0x%04X, txlen = %d\n", len, txlen); + mwifiex_dbg(adapter, ERROR, + "CRC indicated by the helper:\t" + "len = 0x%04X, txlen = %d\n", len, txlen); len &= ~BIT(0); /* Setting this to 0 to resend from same offset */ txlen = 0; @@ -978,11 +1000,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, MWIFIEX_SDIO_BLOCK_SIZE, adapter->ioport); if (ret) { - dev_err(adapter->dev, - "FW download, write iomem (%d) failed @ %d\n", - i, offset); + mwifiex_dbg(adapter, ERROR, + "FW download, write iomem (%d) failed @ %d\n", + i, offset); if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04)) - dev_err(adapter->dev, "write CFG reg failed\n"); + mwifiex_dbg(adapter, ERROR, + "write CFG reg failed\n"); ret = -1; goto done; @@ -991,8 +1014,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, offset += txlen; } while (true); - dev_notice(adapter->dev, - "info: FW download over, size %d bytes\n", offset); + mwifiex_dbg(adapter, MSG, + "info: FW download over, size %d bytes\n", offset); ret = 0; done: @@ -1066,18 +1089,20 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter, blk_num = *(data + BLOCK_NUMBER_OFFSET); blk_size = adapter->sdio_rx_block_size * blk_num; if (blk_size > total_pkt_len) { - dev_err(adapter->dev, "%s: error in pkt,\t" - "blk_num=%d, blk_size=%d, total_pkt_len=%d\n", - __func__, blk_num, blk_size, total_pkt_len); + mwifiex_dbg(adapter, ERROR, + "%s: error in blk_size,\t" + "blk_num=%d, blk_size=%d, total_pkt_len=%d\n", + __func__, blk_num, blk_size, total_pkt_len); break; } pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET)); pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET + 2)); if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) { - dev_err(adapter->dev, "%s: error in pkt,\t" - "pkt_len=%d, blk_size=%d\n", - __func__, pkt_len, blk_size); + mwifiex_dbg(adapter, ERROR, + "%s: error in pkt_len,\t" + "pkt_len=%d, blk_size=%d\n", + __func__, pkt_len, blk_size); break; } skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, @@ -1116,7 +1141,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, switch (upld_typ) { case MWIFIEX_TYPE_AGGR_DATA: - dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n"); + mwifiex_dbg(adapter, INFO, + "info: --- Rx: Aggr Data packet ---\n"); rx_info = MWIFIEX_SKB_RXCB(skb); rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA; if (adapter->rx_work_enabled) { @@ -1130,7 +1156,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, break; case MWIFIEX_TYPE_DATA: - dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n"); + mwifiex_dbg(adapter, DATA, + "info: --- Rx: Data packet ---\n"); if (adapter->rx_work_enabled) { skb_queue_tail(&adapter->rx_data_q, skb); adapter->data_received = true; @@ -1141,7 +1168,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, break; case MWIFIEX_TYPE_CMD: - dev_dbg(adapter->dev, "info: --- Rx: Cmd Response ---\n"); + mwifiex_dbg(adapter, CMD, + "info: --- Rx: Cmd Response ---\n"); /* take care of curr_cmd = NULL case */ if (!adapter->curr_cmd) { cmd_buf = adapter->upld_buf; @@ -1163,7 +1191,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, break; case MWIFIEX_TYPE_EVENT: - dev_dbg(adapter->dev, "info: --- Rx: Event ---\n"); + mwifiex_dbg(adapter, EVENT, + "info: --- Rx: Event ---\n"); adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data); if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE)) @@ -1178,7 +1207,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, break; default: - dev_err(adapter->dev, "unknown upload type %#x\n", upld_typ); + mwifiex_dbg(adapter, ERROR, + "unknown upload type %#x\n", upld_typ); dev_kfree_skb_any(skb); break; } @@ -1210,16 +1240,18 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, if ((card->has_control_mask) && (port == CTRL_PORT)) { /* Read the command Resp without aggr */ - dev_dbg(adapter->dev, "info: %s: no aggregation for cmd " - "response\n", __func__); + mwifiex_dbg(adapter, CMD, + "info: %s: no aggregation for cmd\t" + "response\n", __func__); f_do_rx_cur = 1; goto rx_curr_single; } if (!card->mpa_rx.enabled) { - dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n", - __func__); + mwifiex_dbg(adapter, WARN, + "info: %s: rx aggregation disabled\n", + __func__); f_do_rx_cur = 1; goto rx_curr_single; @@ -1230,7 +1262,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, (card->has_control_mask && (card->mp_rd_bitmap & (~((u32) CTRL_PORT_MASK))))) { /* Some more data RX pending */ - dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: not last packet\n", __func__); if (MP_RX_AGGR_IN_PROGRESS(card)) { if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) { @@ -1247,7 +1280,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, } else { /* No more data RX pending */ - dev_dbg(adapter->dev, "info: %s: last packet\n", __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: last packet\n", __func__); if (MP_RX_AGGR_IN_PROGRESS(card)) { f_do_rx_aggr = 1; @@ -1262,14 +1296,16 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, } if (f_aggr_cur) { - dev_dbg(adapter->dev, "info: current packet aggregation\n"); + mwifiex_dbg(adapter, INFO, + "info: current packet aggregation\n"); /* Curr pkt can be aggregated */ mp_rx_aggr_setup(card, rx_len, port); if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) || mp_rx_aggr_port_limit_reached(card)) { - dev_dbg(adapter->dev, "info: %s: aggregated packet " - "limit reached\n", __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: aggregated packet\t" + "limit reached\n", __func__); /* No more pkts allowed in Aggr buf, rx it */ f_do_rx_aggr = 1; } @@ -1277,8 +1313,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, if (f_do_rx_aggr) { /* do aggr RX now */ - dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n", - card->mpa_rx.pkt_cnt); + mwifiex_dbg(adapter, DATA, + "info: do_rx_aggr: num of packets: %d\n", + card->mpa_rx.pkt_cnt); if (card->supports_sdio_new_mode) { int i; @@ -1318,8 +1355,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, GFP_KERNEL | GFP_DMA); if (!skb_deaggr) { - dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n", - pkt_len, pkt_type); + mwifiex_dbg(adapter, ERROR, "skb allocation failure\t" + "drop pkt len=%d type=%d\n", + pkt_len, pkt_type); curr_ptr += len_arr[pind]; continue; } @@ -1339,12 +1377,12 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, mwifiex_decode_rx_packet(adapter, skb_deaggr, pkt_type); } else { - dev_err(adapter->dev, " drop wrong aggr pkt:\t" - "sdio_single_port_rx_aggr=%d\t" - "type=%d len=%d max_len=%d\n", - adapter->sdio_rx_aggr_enable, - pkt_type, pkt_len, - len_arr[pind]); + mwifiex_dbg(adapter, ERROR, + "drop wrong aggr pkt:\t" + "sdio_single_port_rx_aggr=%d\t" + "type=%d len=%d max_len=%d\n", + adapter->sdio_rx_aggr_enable, + pkt_type, pkt_len, len_arr[pind]); dev_kfree_skb_any(skb_deaggr); } curr_ptr += len_arr[pind]; @@ -1354,13 +1392,14 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, rx_curr_single: if (f_do_rx_cur) { - dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n", - port, rx_len); + mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n", + port, rx_len); skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); if (!skb) { - dev_err(adapter->dev, "single skb allocated fail,\t" - "drop pkt port=%d len=%d\n", port, rx_len); + mwifiex_dbg(adapter, ERROR, + "single skb allocated fail,\t" + "drop pkt port=%d len=%d\n", port, rx_len); if (mwifiex_sdio_card_to_host(adapter, &pkt_type, card->mpa_rx.buf, rx_len, adapter->ioport + port)) @@ -1376,9 +1415,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, goto error; if (!adapter->sdio_rx_aggr_enable && pkt_type == MWIFIEX_TYPE_AGGR_DATA) { - dev_err(adapter->dev, "drop wrong pkt type %d\t" - "current SDIO RX Aggr not enabled\n", - pkt_type); + mwifiex_dbg(adapter, ERROR, "drop wrong pkt type %d\t" + "current SDIO RX Aggr not enabled\n", + pkt_type); dev_kfree_skb_any(skb); return 0; } @@ -1386,7 +1425,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, mwifiex_decode_rx_packet(adapter, skb, pkt_type); } if (f_post_aggr_cur) { - dev_dbg(adapter->dev, "info: current packet aggregation\n"); + mwifiex_dbg(adapter, INFO, + "info: current packet aggregation\n"); /* Curr pkt can be aggregated */ mp_rx_aggr_setup(card, rx_len, port); } @@ -1458,7 +1498,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) MWIFIEX_RX_DATA_BUF_SIZE) return -1; rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); - dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len); + mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len); skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); if (!skb) @@ -1469,17 +1509,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data, skb->len, adapter->ioport | CMD_PORT_SLCT)) { - dev_err(adapter->dev, - "%s: failed to card_to_host", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to card_to_host", __func__); dev_kfree_skb_any(skb); goto term_cmd; } if ((pkt_type != MWIFIEX_TYPE_CMD) && (pkt_type != MWIFIEX_TYPE_EVENT)) - dev_err(adapter->dev, - "%s:Received wrong packet on cmd port", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s:Received wrong packet on cmd port", + __func__); mwifiex_decode_rx_packet(adapter, skb, pkt_type); } @@ -1495,12 +1535,13 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) } card->mp_wr_bitmap = bitmap; - dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n", - card->mp_wr_bitmap); + mwifiex_dbg(adapter, INTR, + "int: DNLD: wr_bitmap=0x%x\n", + card->mp_wr_bitmap); if (adapter->data_sent && (card->mp_wr_bitmap & card->mp_data_port_mask)) { - dev_dbg(adapter->dev, - "info: <--- Tx DONE Interrupt --->\n"); + mwifiex_dbg(adapter, INTR, + "info: <--- Tx DONE Interrupt --->\n"); adapter->data_sent = false; } } @@ -1517,8 +1558,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) adapter->cmd_sent = false; } - dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n", - adapter->cmd_sent, adapter->data_sent); + mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n", + adapter->cmd_sent, adapter->data_sent); if (sdio_ireg & UP_LD_HOST_INT_STATUS) { bitmap = (u32) card->mp_regs[reg->rd_bitmap_l]; bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8; @@ -1529,40 +1570,45 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) ((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24; } card->mp_rd_bitmap = bitmap; - dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n", - card->mp_rd_bitmap); + mwifiex_dbg(adapter, INTR, + "int: UPLD: rd_bitmap=0x%x\n", + card->mp_rd_bitmap); while (true) { ret = mwifiex_get_rd_port(adapter, &port); if (ret) { - dev_dbg(adapter->dev, - "info: no more rd_port available\n"); + mwifiex_dbg(adapter, INFO, + "info: no more rd_port available\n"); break; } len_reg_l = reg->rd_len_p0_l + (port << 1); len_reg_u = reg->rd_len_p0_u + (port << 1); rx_len = ((u16) card->mp_regs[len_reg_u]) << 8; rx_len |= (u16) card->mp_regs[len_reg_l]; - dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n", - port, rx_len); + mwifiex_dbg(adapter, INFO, + "info: RX: port=%d rx_len=%u\n", + port, rx_len); rx_blocks = (rx_len + MWIFIEX_SDIO_BLOCK_SIZE - 1) / MWIFIEX_SDIO_BLOCK_SIZE; if (rx_len <= INTF_HEADER_LEN || (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) > card->mpa_rx.buf_size) { - dev_err(adapter->dev, "invalid rx_len=%d\n", - rx_len); + mwifiex_dbg(adapter, ERROR, + "invalid rx_len=%d\n", + rx_len); return -1; } rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); - dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len); + mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", + rx_len); if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len, port)) { - dev_err(adapter->dev, "card_to_host_mpa failed:" - " int status=%#x\n", sdio_ireg); + mwifiex_dbg(adapter, ERROR, + "card_to_host_mpa failed: int status=%#x\n", + sdio_ireg); goto term_cmd; } } @@ -1573,19 +1619,23 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) term_cmd: /* terminate cmd */ if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr)) - dev_err(adapter->dev, "read CFG reg failed\n"); + mwifiex_dbg(adapter, ERROR, "read CFG reg failed\n"); else - dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr); + mwifiex_dbg(adapter, INFO, + "info: CFG reg val = %d\n", cr); if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04))) - dev_err(adapter->dev, "write CFG reg failed\n"); + mwifiex_dbg(adapter, ERROR, + "write CFG reg failed\n"); else - dev_dbg(adapter->dev, "info: write success\n"); + mwifiex_dbg(adapter, INFO, "info: write success\n"); if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr)) - dev_err(adapter->dev, "read CFG reg failed\n"); + mwifiex_dbg(adapter, ERROR, + "read CFG reg failed\n"); else - dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr); + mwifiex_dbg(adapter, INFO, + "info: CFG reg val =%x\n", cr); return -1; } @@ -1619,8 +1669,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, if (!card->mpa_tx.enabled || (card->has_control_mask && (port == CTRL_PORT)) || (card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) { - dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n", - __func__); + mwifiex_dbg(adapter, WARN, + "info: %s: tx aggregation disabled\n", + __func__); f_send_cur_buf = 1; goto tx_curr_single; @@ -1628,8 +1679,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, if (next_pkt_len) { /* More pkt in TX queue */ - dev_dbg(adapter->dev, "info: %s: more packets in queue.\n", - __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: more packets in queue.\n", + __func__); if (MP_TX_AGGR_IN_PROGRESS(card)) { if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) { @@ -1659,8 +1711,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, } } else { /* Last pkt in TX queue */ - dev_dbg(adapter->dev, "info: %s: Last packet in Tx Queue.\n", - __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: Last packet in Tx Queue.\n", + __func__); if (MP_TX_AGGR_IN_PROGRESS(card)) { /* some packs in Aggr buf already */ @@ -1677,8 +1730,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, } if (f_precopy_cur_buf) { - dev_dbg(adapter->dev, "data: %s: precopy current buffer\n", - __func__); + mwifiex_dbg(adapter, DATA, + "data: %s: precopy current buffer\n", + __func__); MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port); if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) || @@ -1688,9 +1742,10 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, } if (f_send_aggr_buf) { - dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n", - __func__, - card->mpa_tx.start_port, card->mpa_tx.ports); + mwifiex_dbg(adapter, DATA, + "data: %s: send aggr buffer: %d %d\n", + __func__, card->mpa_tx.start_port, + card->mpa_tx.ports); if (card->supports_sdio_new_mode) { u32 port_count; int i; @@ -1719,15 +1774,17 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, tx_curr_single: if (f_send_cur_buf) { - dev_dbg(adapter->dev, "data: %s: send current buffer %d\n", - __func__, port); + mwifiex_dbg(adapter, DATA, + "data: %s: send current buffer %d\n", + __func__, port); ret = mwifiex_write_data_to_card(adapter, payload, pkt_len, adapter->ioport + port); } if (f_postcopy_cur_buf) { - dev_dbg(adapter->dev, "data: %s: postcopy current buffer\n", - __func__); + mwifiex_dbg(adapter, DATA, + "data: %s: postcopy current buffer\n", + __func__); MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port); } @@ -1771,8 +1828,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, if (type == MWIFIEX_TYPE_DATA) { ret = mwifiex_get_wr_port_data(adapter, &port); if (ret) { - dev_err(adapter->dev, "%s: no wr_port available\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: no wr_port available\n", + __func__); return ret; } } else { @@ -1781,8 +1839,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, if (pkt_len <= INTF_HEADER_LEN || pkt_len > MWIFIEX_UPLD_SIZE) - dev_err(adapter->dev, "%s: payload=%p, nb=%d\n", - __func__, payload, pkt_len); + mwifiex_dbg(adapter, ERROR, + "%s: payload=%p, nb=%d\n", + __func__, payload, pkt_len); if (card->supports_sdio_new_mode) port = CMD_PORT_SLCT; @@ -1896,7 +1955,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE); sdio_release_host(func); if (ret) { - pr_err("cannot set SDIO block size\n"); + mwifiex_dbg(adapter, ERROR, + "cannot set SDIO block size\n"); return ret; } @@ -1977,7 +2037,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) card->mp_tx_agg_buf_size, card->mp_rx_agg_buf_size); if (ret) { - dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n"); + mwifiex_dbg(adapter, ERROR, + "failed to alloc sdio mp-a buffers\n"); kfree(card->mp_regs); return -1; } @@ -2041,8 +2102,9 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) card->curr_wr_port = reg->start_wr_port; - dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n", - port, card->mp_data_port_mask); + mwifiex_dbg(adapter, CMD, + "cmd: mp_end_port %d, data port mask 0x%x\n", + port, card->mp_data_port_mask); } static struct mwifiex_adapter *save_adapter; @@ -2059,7 +2121,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) * We run it in a totally independent workqueue. */ - pr_err("Resetting card...\n"); + mwifiex_dbg(adapter, WARN, "Resetting card...\n"); mmc_remove_host(target); /* 200ms delay is based on experiment with sdhci controller */ mdelay(200); @@ -2079,14 +2141,14 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl, &ret); if (ret) { - dev_err(adapter->dev, "SDIO Write ERR\n"); + mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n"); return RDWR_STATUS_FAILURE; } for (tries = 0; tries < MAX_POLL_TRIES; tries++) { ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl, &ret); if (ret) { - dev_err(adapter->dev, "SDIO read err\n"); + mwifiex_dbg(adapter, ERROR, "SDIO read err\n"); return RDWR_STATUS_FAILURE; } if (ctrl_data == FW_DUMP_DONE) @@ -2094,19 +2156,20 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, if (doneflag && ctrl_data == doneflag) return RDWR_STATUS_DONE; if (ctrl_data != FW_DUMP_HOST_READY) { - dev_info(adapter->dev, - "The ctrl reg was changed, re-try again!\n"); + mwifiex_dbg(adapter, WARN, + "The ctrl reg was changed, re-try again!\n"); sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl, &ret); if (ret) { - dev_err(adapter->dev, "SDIO write err\n"); + mwifiex_dbg(adapter, ERROR, "SDIO write err\n"); return RDWR_STATUS_FAILURE; } } usleep_range(100, 200); } if (ctrl_data == FW_DUMP_HOST_READY) { - dev_err(adapter->dev, "Fail to pull ctrl_data\n"); + mwifiex_dbg(adapter, ERROR, + "Fail to pull ctrl_data\n"); return RDWR_STATUS_FAILURE; } @@ -2114,7 +2177,7 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, } /* This function dump firmware memory to file */ -static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) +static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; int ret = 0; @@ -2122,9 +2185,6 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0; enum rdwr_status stat; u32 memory_size; - static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL }; - - mwifiex_dump_drv_info(adapter); if (!card->can_dump_fw) return; @@ -2142,7 +2202,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) mwifiex_pm_wakeup_card(adapter); sdio_claim_host(card->func); - dev_info(adapter->dev, "== mwifiex firmware dump start ==\n"); + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n"); stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag); if (stat == RDWR_STATUS_FAILURE) @@ -2152,7 +2212,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) /* Read the number of the memories which will dump */ dump_num = sdio_readb(card->func, reg, &ret); if (ret) { - dev_err(adapter->dev, "SDIO read memory length err\n"); + mwifiex_dbg(adapter, ERROR, "SDIO read memory length err\n"); goto done; } @@ -2169,7 +2229,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) for (i = 0; i < 4; i++) { read_reg = sdio_readb(card->func, reg, &ret); if (ret) { - dev_err(adapter->dev, "SDIO read err\n"); + mwifiex_dbg(adapter, ERROR, "SDIO read err\n"); goto done; } memory_size |= (read_reg << i*8); @@ -2177,25 +2237,33 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) } if (memory_size == 0) { - dev_info(adapter->dev, "Firmware dump Finished!\n"); + mwifiex_dbg(adapter, DUMP, "Firmware dump Finished!\n"); + ret = mwifiex_write_reg(adapter, + card->reg->fw_dump_ctrl, + FW_DUMP_READ_DONE); + if (ret) { + mwifiex_dbg(adapter, ERROR, "SDIO write err\n"); + return; + } break; } - dev_info(adapter->dev, - "%s_SIZE=0x%x\n", entry->mem_name, memory_size); + mwifiex_dbg(adapter, DUMP, + "%s_SIZE=0x%x\n", entry->mem_name, memory_size); entry->mem_ptr = vmalloc(memory_size + 1); entry->mem_size = memory_size; if (!entry->mem_ptr) { - dev_err(adapter->dev, "Vmalloc %s failed\n", - entry->mem_name); + mwifiex_dbg(adapter, ERROR, "Vmalloc %s failed\n", + entry->mem_name); goto done; } dbg_ptr = entry->mem_ptr; end_ptr = dbg_ptr + memory_size; doneflag = entry->done_flag; - dev_info(adapter->dev, "Start %s output, please wait...\n", - entry->mem_name); + mwifiex_dbg(adapter, DUMP, + "Start %s output, please wait...\n", + entry->mem_name); do { stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag); @@ -2207,39 +2275,43 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) for (reg = reg_start; reg <= reg_end; reg++) { *dbg_ptr = sdio_readb(card->func, reg, &ret); if (ret) { - dev_err(adapter->dev, - "SDIO read err\n"); + mwifiex_dbg(adapter, ERROR, + "SDIO read err\n"); goto done; } if (dbg_ptr < end_ptr) dbg_ptr++; else - dev_err(adapter->dev, - "Allocated buf not enough\n"); + mwifiex_dbg(adapter, ERROR, + "Allocated buf not enough\n"); } if (stat != RDWR_STATUS_DONE) continue; - dev_info(adapter->dev, "%s done: size=0x%tx\n", - entry->mem_name, dbg_ptr - entry->mem_ptr); + mwifiex_dbg(adapter, DUMP, "%s done: size=0x%tx\n", + entry->mem_name, dbg_ptr - entry->mem_ptr); break; } while (1); } - dev_info(adapter->dev, "== mwifiex firmware dump end ==\n"); - - kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env); + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); done: sdio_release_host(card->func); - adapter->curr_mem_idx = 0; +} + +static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter) +{ + mwifiex_drv_info_dump(adapter); + mwifiex_sdio_fw_dump(adapter); + mwifiex_upload_device_dump(adapter); } static void mwifiex_sdio_work(struct work_struct *work) { - if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP, + if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) - mwifiex_sdio_fw_dump_work(save_adapter); + mwifiex_sdio_device_dump_work(save_adapter); if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags)) mwifiex_sdio_card_reset_work(save_adapter); @@ -2259,13 +2331,13 @@ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter) } /* This function dumps FW information */ -static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter) +static void mwifiex_sdio_device_dump(struct mwifiex_adapter *adapter) { save_adapter = adapter; - if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags)) + if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) return; - set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags); + set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); schedule_work(&sdio_work); } @@ -2285,7 +2357,7 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) if (!p) return 0; - dev_info(adapter->dev, "SDIO register DUMP START\n"); + mwifiex_dbg(adapter, MSG, "SDIO register dump start\n"); mwifiex_pm_wakeup_card(adapter); @@ -2351,13 +2423,13 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) reg++; } - dev_info(adapter->dev, "%s\n", buf); + mwifiex_dbg(adapter, MSG, "%s\n", buf); p += sprintf(p, "%s\n", buf); } sdio_release_host(cardp->func); - dev_info(adapter->dev, "SDIO register DUMP END\n"); + mwifiex_dbg(adapter, MSG, "SDIO register dump end\n"); return p - drv_buf; } @@ -2382,8 +2454,8 @@ static struct mwifiex_if_ops sdio_ops = { .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete, .event_complete = mwifiex_sdio_event_complete, .card_reset = mwifiex_sdio_card_reset, - .fw_dump = mwifiex_sdio_fw_dump, .reg_dump = mwifiex_sdio_reg_dump, + .device_dump = mwifiex_sdio_device_dump, .deaggr_pkt = mwifiex_deaggr_sdio_pkt, }; diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 49422f2a5380..037adcd1f484 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -77,8 +77,8 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv, struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl; if (cmd_action != HostCmd_ACT_GEN_SET) { - dev_err(priv->adapter->dev, - "mac_control: only support set cmd\n"); + mwifiex_dbg(priv->adapter, ERROR, + "mac_control: only support set cmd\n"); return -1; } @@ -112,7 +112,8 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv, { struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib; - dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid); + mwifiex_dbg(priv->adapter, CMD, + "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid); cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB); cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib) - 1 + S_DS_GEN); @@ -129,11 +130,11 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv, le16_add_cpu(&cmd->size, sizeof(u16)); } - dev_dbg(priv->adapter->dev, - "cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x," - " Value=0x%x\n", - cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size), - le16_to_cpu(*(__le16 *) snmp_mib->value)); + mwifiex_dbg(priv->adapter, CMD, + "cmd: SNMP_CMD: Action=0x%x, OID=0x%x,\t" + "OIDSize=0x%x, Value=0x%x\n", + cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size), + le16_to_cpu(*(__le16 *)snmp_mib->value)); return 0; } @@ -356,9 +357,9 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv, (hscfg_param->conditions != cpu_to_le32(HS_CFG_CANCEL)) && ((adapter->arp_filter_size > 0) && (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) { - dev_dbg(adapter->dev, - "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n", - adapter->arp_filter_size); + mwifiex_dbg(adapter, CMD, + "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n", + adapter->arp_filter_size); memcpy(((u8 *) hs_cfg) + sizeof(struct host_cmd_ds_802_11_hs_cfg_enh), adapter->arp_filter, adapter->arp_filter_size); @@ -378,11 +379,11 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv, hs_cfg->params.hs_config.conditions = hscfg_param->conditions; hs_cfg->params.hs_config.gpio = hscfg_param->gpio; hs_cfg->params.hs_config.gap = hscfg_param->gap; - dev_dbg(adapter->dev, - "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n", - hs_cfg->params.hs_config.conditions, - hs_cfg->params.hs_config.gpio, - hs_cfg->params.hs_config.gap); + mwifiex_dbg(adapter, CMD, + "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n", + hs_cfg->params.hs_config.conditions, + hs_cfg->params.hs_config.gpio, + hs_cfg->params.hs_config.gap); } return 0; @@ -462,7 +463,7 @@ static int mwifiex_cmd_802_11_deauthenticate(struct mwifiex_private *priv, /* Set AP MAC address */ memcpy(deauth->mac_addr, mac, ETH_ALEN); - dev_dbg(priv->adapter->dev, "cmd: Deauth: %pM\n", deauth->mac_addr); + mwifiex_dbg(priv->adapter, CMD, "cmd: Deauth: %pM\n", deauth->mac_addr); deauth->reason_code = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING); @@ -540,9 +541,9 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv, } else if (!priv->wep_key[i].key_length) { continue; } else { - dev_err(priv->adapter->dev, - "key%d Length = %d is incorrect\n", - (i + 1), priv->wep_key[i].key_length); + mwifiex_dbg(priv->adapter, ERROR, + "key%d Length = %d is incorrect\n", + (i + 1), priv->wep_key[i].key_length); return -1; } } @@ -562,7 +563,8 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv, u16 size, len = KEY_PARAMS_FIXED_LEN; if (enc_key->is_igtk_key) { - dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__); + mwifiex_dbg(adapter, INFO, + "%s: Set CMAC AES Key\n", __func__); if (enc_key->is_rx_seq_valid) memcpy(km->key_param_set.key_params.cmac_aes.ipn, enc_key->pn, enc_key->pn_len); @@ -575,7 +577,8 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv, enc_key->key_material, enc_key->key_len); len += sizeof(struct mwifiex_cmac_aes_param); } else { - dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__); + mwifiex_dbg(adapter, INFO, + "%s: Set AES Key\n", __func__); if (enc_key->is_rx_seq_valid) memcpy(km->key_param_set.key_params.aes.pn, enc_key->pn, enc_key->pn_len); @@ -619,7 +622,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv, km->action = cpu_to_le16(cmd_action); if (cmd_action == HostCmd_ACT_GEN_GET) { - dev_dbg(adapter->dev, "%s: Get key\n", __func__); + mwifiex_dbg(adapter, INFO, "%s: Get key\n", __func__); km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK; km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2); @@ -646,7 +649,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv, sizeof(struct mwifiex_ie_type_key_param_set_v2)); if (enc_key->key_disable) { - dev_dbg(adapter->dev, "%s: Remove key\n", __func__); + mwifiex_dbg(adapter, INFO, "%s: Remove key\n", __func__); km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE); km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2); km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN); @@ -667,7 +670,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv, memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN); if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) { - dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__); + mwifiex_dbg(adapter, INFO, "%s: Set WEP Key\n", __func__); len += sizeof(struct mwifiex_wep_param); km->key_param_set.len = cpu_to_le16(len); km->key_param_set.key_type = KEY_TYPE_ID_WEP; @@ -710,7 +713,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv, key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY; if (enc_key->is_wapi_key) { - dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__); + mwifiex_dbg(adapter, INFO, "%s: Set WAPI Key\n", __func__); km->key_param_set.key_type = KEY_TYPE_ID_WAPI; memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn, PN_LEN); @@ -750,7 +753,8 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv, return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km); if (enc_key->key_len == WLAN_KEY_LEN_TKIP) { - dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__); + mwifiex_dbg(adapter, INFO, + "%s: Set TKIP Key\n", __func__); if (enc_key->is_rx_seq_valid) memcpy(km->key_param_set.key_params.tkip.pn, enc_key->pn, enc_key->pn_len); @@ -814,7 +818,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv, memset(&key_material->key_param_set, 0, sizeof(struct mwifiex_ie_type_key_param_set)); if (enc_key->is_wapi_key) { - dev_dbg(priv->adapter->dev, "info: Set WAPI Key\n"); + mwifiex_dbg(priv->adapter, INFO, "info: Set WAPI Key\n"); key_material->key_param_set.key_type_id = cpu_to_le16(KEY_TYPE_ID_WAPI); if (cmd_oid == KEY_INFO_ENABLED) @@ -860,7 +864,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv, } if (enc_key->key_len == WLAN_KEY_LEN_CCMP) { if (enc_key->is_igtk_key) { - dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n"); + mwifiex_dbg(priv->adapter, CMD, "cmd: CMAC_AES\n"); key_material->key_param_set.key_type_id = cpu_to_le16(KEY_TYPE_ID_AES_CMAC); if (cmd_oid == KEY_INFO_ENABLED) @@ -873,7 +877,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv, key_material->key_param_set.key_info |= cpu_to_le16(KEY_IGTK); } else { - dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n"); + mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_AES\n"); key_material->key_param_set.key_type_id = cpu_to_le16(KEY_TYPE_ID_AES); if (cmd_oid == KEY_INFO_ENABLED) @@ -892,7 +896,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv, cpu_to_le16(KEY_MCAST); } } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) { - dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n"); + mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_TKIP\n"); key_material->key_param_set.key_type_id = cpu_to_le16(KEY_TYPE_ID_TKIP); key_material->key_param_set.key_info = @@ -999,7 +1003,8 @@ static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv, &domain_info->domain; u8 no_of_triplet = adapter->domain_reg.no_of_triplet; - dev_dbg(adapter->dev, "info: 11D: no_of_triplet=0x%x\n", no_of_triplet); + mwifiex_dbg(adapter, INFO, + "info: 11D: no_of_triplet=0x%x\n", no_of_triplet); cmd->command = cpu_to_le16(HostCmd_CMD_802_11D_DOMAIN_INFO); domain_info->action = cpu_to_le16(cmd_action); @@ -1071,6 +1076,26 @@ static int mwifiex_cmd_ibss_coalescing_status(struct host_cmd_ds_command *cmd, return 0; } +/* This function prepares command buffer to get/set memory location value. + */ +static int +mwifiex_cmd_mem_access(struct host_cmd_ds_command *cmd, u16 cmd_action, + void *pdata_buf) +{ + struct mwifiex_ds_mem_rw *mem_rw = (void *)pdata_buf; + struct host_cmd_ds_mem_access *mem_access = (void *)&cmd->params.mem; + + cmd->command = cpu_to_le16(HostCmd_CMD_MEM_ACCESS); + cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mem_access) + + S_DS_GEN); + + mem_access->action = cpu_to_le16(cmd_action); + mem_access->addr = cpu_to_le32(mem_rw->addr); + mem_access->value = cpu_to_le32(mem_rw->value); + + return 0; +} + /* * This function prepares command to set/get register value. * @@ -1215,8 +1240,9 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv, (u32)(card->sleep_cookie_pbase); host_spec->sleep_cookie_addr_hi = (u32)(((u64)(card->sleep_cookie_pbase)) >> 32); - dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: 0x%x\n", - host_spec->sleep_cookie_addr_lo); + mwifiex_dbg(priv->adapter, INFO, + "sleep_cook_lo phy addr: 0x%x\n", + host_spec->sleep_cookie_addr_lo); } return 0; @@ -1243,7 +1269,8 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, S_DS_GEN); subsc_evt->action = cpu_to_le16(subsc_evt_cfg->action); - dev_dbg(priv->adapter->dev, "cmd: action: %d\n", subsc_evt_cfg->action); + mwifiex_dbg(priv->adapter, CMD, + "cmd: action: %d\n", subsc_evt_cfg->action); /*For query requests, no configuration TLV structures are to be added.*/ if (subsc_evt_cfg->action == HostCmd_ACT_GEN_GET) @@ -1252,14 +1279,15 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, subsc_evt->events = cpu_to_le16(subsc_evt_cfg->events); event_bitmap = subsc_evt_cfg->events; - dev_dbg(priv->adapter->dev, "cmd: event bitmap : %16x\n", - event_bitmap); + mwifiex_dbg(priv->adapter, CMD, "cmd: event bitmap : %16x\n", + event_bitmap); if (((subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) || (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_SET)) && (event_bitmap == 0)) { - dev_dbg(priv->adapter->dev, "Error: No event specified " - "for bitwise action type\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Error: No event specified\t" + "for bitwise action type\n"); return -EINVAL; } @@ -1284,10 +1312,11 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, rssi_tlv->abs_value = subsc_evt_cfg->bcn_l_rssi_cfg.abs_value; rssi_tlv->evt_freq = subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq; - dev_dbg(priv->adapter->dev, "Cfg Beacon Low Rssi event, " - "RSSI:-%d dBm, Freq:%d\n", - subsc_evt_cfg->bcn_l_rssi_cfg.abs_value, - subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq); + mwifiex_dbg(priv->adapter, EVENT, + "Cfg Beacon Low Rssi event,\t" + "RSSI:-%d dBm, Freq:%d\n", + subsc_evt_cfg->bcn_l_rssi_cfg.abs_value, + subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq); pos += sizeof(struct mwifiex_ie_types_rssi_threshold); le16_add_cpu(&cmd->size, @@ -1304,10 +1333,11 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, rssi_tlv->abs_value = subsc_evt_cfg->bcn_h_rssi_cfg.abs_value; rssi_tlv->evt_freq = subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq; - dev_dbg(priv->adapter->dev, "Cfg Beacon High Rssi event, " - "RSSI:-%d dBm, Freq:%d\n", - subsc_evt_cfg->bcn_h_rssi_cfg.abs_value, - subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq); + mwifiex_dbg(priv->adapter, EVENT, + "Cfg Beacon High Rssi event,\t" + "RSSI:-%d dBm, Freq:%d\n", + subsc_evt_cfg->bcn_h_rssi_cfg.abs_value, + subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq); pos += sizeof(struct mwifiex_ie_types_rssi_threshold); le16_add_cpu(&cmd->size, @@ -1463,12 +1493,14 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, data, len); if (ret) return ret; - dev_dbg(adapter->dev, - "download cfg_data from device tree: %s\n", prop->name); + mwifiex_dbg(adapter, INFO, + "download cfg_data from device tree: %s\n", + prop->name); } else if (adapter->cal_data->data && adapter->cal_data->size > 0) { len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data, adapter->cal_data->size, data); - dev_dbg(adapter->dev, "download cfg_data from config file\n"); + mwifiex_dbg(adapter, INFO, + "download cfg_data from config file\n"); } else { return -1; } @@ -1583,9 +1615,9 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG); if (!params) { - dev_err(priv->adapter->dev, - "TDLS config params not available for %pM\n", - oper->peer_mac); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS config params not available for %pM\n", + oper->peer_mac); return -ENODATA; } @@ -1663,7 +1695,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, break; default: - dev_err(priv->adapter->dev, "Unknown TDLS operation\n"); + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS operation\n"); return -ENOTSUPP; } @@ -1870,8 +1902,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, ret = mwifiex_cmd_11n_cfg(priv, cmd_ptr, cmd_action, data_buf); break; case HostCmd_CMD_WMM_GET_STATUS: - dev_dbg(priv->adapter->dev, - "cmd: WMM: WMM_GET_STATUS cmd sent\n"); + mwifiex_dbg(priv->adapter, CMD, + "cmd: WMM: WMM_GET_STATUS cmd sent\n"); cmd_ptr->command = cpu_to_le16(HostCmd_CMD_WMM_GET_STATUS); cmd_ptr->size = cpu_to_le16(sizeof(struct host_cmd_ds_wmm_get_status) + @@ -1885,6 +1917,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_802_11_SCAN_EXT: ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf); break; + case HostCmd_CMD_MEM_ACCESS: + ret = mwifiex_cmd_mem_access(cmd_ptr, cmd_action, data_buf); + break; case HostCmd_CMD_MAC_REG_ACCESS: case HostCmd_CMD_BBP_REG_ACCESS: case HostCmd_CMD_RF_REG_ACCESS: @@ -1932,8 +1967,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, data_buf); break; default: - dev_err(priv->adapter->dev, - "PREP_CMD: unknown cmd- %#x\n", cmd_no); + mwifiex_dbg(priv->adapter, ERROR, + "PREP_CMD: unknown cmd- %#x\n", cmd_no); ret = -1; break; } @@ -2024,8 +2059,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) &sdio_sp_rx_aggr_enable, true); if (ret) { - dev_err(priv->adapter->dev, - "error while enabling SP aggregation..disable it"); + mwifiex_dbg(priv->adapter, ERROR, + "error while enabling SP aggregation..disable it"); adapter->sdio_rx_aggr_enable = false; } } @@ -2108,8 +2143,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) HostCmd_ACT_GEN_SET, DOT11D_I, &state_11d, true); if (ret) - dev_err(priv->adapter->dev, - "11D: failed to enable 11D\n"); + mwifiex_dbg(priv->adapter, ERROR, + "11D: failed to enable 11D\n"); } /* Send cmd to FW to configure 11n specific configuration diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 88dc6b672ef4..aa5b9a310340 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -49,8 +49,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv, struct host_cmd_ds_802_11_ps_mode_enh *pm; unsigned long flags; - dev_err(adapter->dev, "CMD_RESP: cmd %#x error, result=%#x\n", - resp->command, resp->result); + mwifiex_dbg(adapter, ERROR, + "CMD_RESP: cmd %#x error, result=%#x\n", + resp->command, resp->result); if (adapter->curr_cmd->wait_q_enabled) adapter->cmd_wait_q.status = -1; @@ -58,9 +59,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv, switch (le16_to_cpu(resp->command)) { case HostCmd_CMD_802_11_PS_MODE_ENH: pm = &resp->params.psmode_enh; - dev_err(adapter->dev, - "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n", - resp->result, le16_to_cpu(pm->action)); + mwifiex_dbg(adapter, ERROR, + "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n", + resp->result, le16_to_cpu(pm->action)); /* We do not re-try enter-ps command in ad-hoc mode. */ if (le16_to_cpu(pm->action) == EN_AUTO_PS && (le16_to_cpu(pm->params.ps_bitmap) & BITMAP_STA_PS) && @@ -91,7 +92,8 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv, break; case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: - dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n"); + mwifiex_dbg(adapter, MSG, + "SDIO RX single-port aggregation Not support\n"); break; default: @@ -187,29 +189,34 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv, u16 query_type = le16_to_cpu(smib->query_type); u32 ul_temp; - dev_dbg(priv->adapter->dev, "info: SNMP_RESP: oid value = %#x," - " query_type = %#x, buf size = %#x\n", - oid, query_type, le16_to_cpu(smib->buf_size)); + mwifiex_dbg(priv->adapter, INFO, + "info: SNMP_RESP: oid value = %#x,\t" + "query_type = %#x, buf size = %#x\n", + oid, query_type, le16_to_cpu(smib->buf_size)); if (query_type == HostCmd_ACT_GEN_GET) { ul_temp = le16_to_cpu(*((__le16 *) (smib->value))); if (data_buf) *data_buf = ul_temp; switch (oid) { case FRAG_THRESH_I: - dev_dbg(priv->adapter->dev, - "info: SNMP_RESP: FragThsd =%u\n", ul_temp); + mwifiex_dbg(priv->adapter, INFO, + "info: SNMP_RESP: FragThsd =%u\n", + ul_temp); break; case RTS_THRESH_I: - dev_dbg(priv->adapter->dev, - "info: SNMP_RESP: RTSThsd =%u\n", ul_temp); + mwifiex_dbg(priv->adapter, INFO, + "info: SNMP_RESP: RTSThsd =%u\n", + ul_temp); break; case SHORT_RETRY_LIM_I: - dev_dbg(priv->adapter->dev, - "info: SNMP_RESP: TxRetryCount=%u\n", ul_temp); + mwifiex_dbg(priv->adapter, INFO, + "info: SNMP_RESP: TxRetryCount=%u\n", + ul_temp); break; case DTIM_PERIOD_I: - dev_dbg(priv->adapter->dev, - "info: SNMP_RESP: DTIM period=%u\n", ul_temp); + mwifiex_dbg(priv->adapter, INFO, + "info: SNMP_RESP: DTIM period=%u\n", + ul_temp); default: break; } @@ -426,14 +433,15 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv, priv->tx_power_level = (u16) pg->power_min; break; default: - dev_err(adapter->dev, "CMD_RESP: unknown cmd action %d\n", - action); + mwifiex_dbg(adapter, ERROR, + "CMD_RESP: unknown cmd action %d\n", + action); return 0; } - dev_dbg(adapter->dev, - "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n", - priv->tx_power_level, priv->max_tx_power_level, - priv->min_tx_power_level); + mwifiex_dbg(adapter, INFO, + "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n", + priv->tx_power_level, priv->max_tx_power_level, + priv->min_tx_power_level); return 0; } @@ -454,10 +462,10 @@ static int mwifiex_ret_rf_tx_power(struct mwifiex_private *priv, priv->min_tx_power_level = txp->min_power; } - dev_dbg(priv->adapter->dev, - "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n", - priv->tx_power_level, priv->max_tx_power_level, - priv->min_tx_power_level); + mwifiex_dbg(priv->adapter, INFO, + "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n", + priv->tx_power_level, priv->max_tx_power_level, + priv->min_tx_power_level); return 0; } @@ -473,18 +481,18 @@ static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv, struct mwifiex_adapter *adapter = priv->adapter; if (adapter->hw_dev_mcs_support == HT_STREAM_2X2) - dev_dbg(adapter->dev, - "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x" - " Rx action = 0x%x, Rx Mode = 0x%04x\n", - le16_to_cpu(ant_mimo->action_tx), - le16_to_cpu(ant_mimo->tx_ant_mode), - le16_to_cpu(ant_mimo->action_rx), - le16_to_cpu(ant_mimo->rx_ant_mode)); + mwifiex_dbg(adapter, INFO, + "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x\t" + "Rx action = 0x%x, Rx Mode = 0x%04x\n", + le16_to_cpu(ant_mimo->action_tx), + le16_to_cpu(ant_mimo->tx_ant_mode), + le16_to_cpu(ant_mimo->action_rx), + le16_to_cpu(ant_mimo->rx_ant_mode)); else - dev_dbg(adapter->dev, - "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n", - le16_to_cpu(ant_siso->action), - le16_to_cpu(ant_siso->ant_mode)); + mwifiex_dbg(adapter, INFO, + "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n", + le16_to_cpu(ant_siso->action), + le16_to_cpu(ant_siso->ant_mode)); return 0; } @@ -502,8 +510,8 @@ static int mwifiex_ret_802_11_mac_address(struct mwifiex_private *priv, memcpy(priv->curr_addr, cmd_mac_addr->mac_addr, ETH_ALEN); - dev_dbg(priv->adapter->dev, - "info: set mac address: %pM\n", priv->curr_addr); + mwifiex_dbg(priv->adapter, INFO, + "info: set mac address: %pM\n", priv->curr_addr); return 0; } @@ -587,7 +595,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) { - dev_dbg(priv->adapter->dev, "info: key: GTK is set\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: key: GTK is set\n"); priv->wpa_is_gtk_set = true; priv->scan_block = false; } @@ -617,7 +626,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, key_v2 = &resp->params.key_material_v2; if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) { - dev_dbg(priv->adapter->dev, "info: key: GTK is set\n"); + mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n"); priv->wpa_is_gtk_set = true; priv->scan_block = false; } @@ -663,14 +672,14 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv, - IEEE80211_COUNTRY_STRING_LEN) / sizeof(struct ieee80211_country_ie_triplet)); - dev_dbg(priv->adapter->dev, - "info: 11D Domain Info Resp: no_of_triplet=%d\n", - no_of_triplet); + mwifiex_dbg(priv->adapter, INFO, + "info: 11D Domain Info Resp: no_of_triplet=%d\n", + no_of_triplet); if (no_of_triplet > MWIFIEX_MAX_TRIPLET_802_11D) { - dev_warn(priv->adapter->dev, - "11D: invalid number of triplets %d returned\n", - no_of_triplet); + mwifiex_dbg(priv->adapter, FATAL, + "11D: invalid number of triplets %d returned\n", + no_of_triplet); return -1; } @@ -680,8 +689,8 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv, case HostCmd_ACT_GEN_GET: break; default: - dev_err(priv->adapter->dev, - "11D: invalid action:%d\n", domain_info->action); + mwifiex_dbg(priv->adapter, ERROR, + "11D: invalid action:%d\n", domain_info->action); return -1; } @@ -741,6 +750,19 @@ mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv, return 0; } +/* This function handles the command response of mem_access command + */ +static int +mwifiex_ret_mem_access(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp, void *pioctl_buf) +{ + struct host_cmd_ds_mem_access *mem = (void *)&resp->params.mem; + + priv->mem_rw.addr = le32_to_cpu(mem->addr); + priv->mem_rw.value = le32_to_cpu(mem->value); + + return 0; +} /* * This function handles the command response of register access. * @@ -830,12 +852,12 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv, if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET) return 0; - dev_dbg(priv->adapter->dev, - "info: new BSSID %pM\n", ibss_coal_resp->bssid); + mwifiex_dbg(priv->adapter, INFO, + "info: new BSSID %pM\n", ibss_coal_resp->bssid); /* If rsp has NULL BSSID, Just return..... No Action */ if (is_zero_ether_addr(ibss_coal_resp->bssid)) { - dev_warn(priv->adapter->dev, "new BSSID is NULL\n"); + mwifiex_dbg(priv->adapter, FATAL, "new BSSID is NULL\n"); return 0; } @@ -871,48 +893,48 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv, case ACT_TDLS_DELETE: if (reason) { if (!node || reason == TDLS_ERR_LINK_NONEXISTENT) - dev_dbg(priv->adapter->dev, - "TDLS link delete for %pM failed: reason %d\n", - cmd_tdls_oper->peer_mac, reason); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS link delete for %pM failed: reason %d\n", + cmd_tdls_oper->peer_mac, reason); else - dev_err(priv->adapter->dev, - "TDLS link delete for %pM failed: reason %d\n", - cmd_tdls_oper->peer_mac, reason); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS link delete for %pM failed: reason %d\n", + cmd_tdls_oper->peer_mac, reason); } else { - dev_dbg(priv->adapter->dev, - "TDLS link delete for %pM successful\n", - cmd_tdls_oper->peer_mac); + mwifiex_dbg(priv->adapter, MSG, + "TDLS link delete for %pM successful\n", + cmd_tdls_oper->peer_mac); } break; case ACT_TDLS_CREATE: if (reason) { - dev_err(priv->adapter->dev, - "TDLS link creation for %pM failed: reason %d", - cmd_tdls_oper->peer_mac, reason); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS link creation for %pM failed: reason %d", + cmd_tdls_oper->peer_mac, reason); if (node && reason != TDLS_ERR_LINK_EXISTS) node->tdls_status = TDLS_SETUP_FAILURE; } else { - dev_dbg(priv->adapter->dev, - "TDLS link creation for %pM successful", - cmd_tdls_oper->peer_mac); + mwifiex_dbg(priv->adapter, MSG, + "TDLS link creation for %pM successful", + cmd_tdls_oper->peer_mac); } break; case ACT_TDLS_CONFIG: if (reason) { - dev_err(priv->adapter->dev, - "TDLS link config for %pM failed, reason %d\n", - cmd_tdls_oper->peer_mac, reason); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS link config for %pM failed, reason %d\n", + cmd_tdls_oper->peer_mac, reason); if (node) node->tdls_status = TDLS_SETUP_FAILURE; } else { - dev_dbg(priv->adapter->dev, - "TDLS link config for %pM successful\n", - cmd_tdls_oper->peer_mac); + mwifiex_dbg(priv->adapter, MSG, + "TDLS link config for %pM successful\n", + cmd_tdls_oper->peer_mac); } break; default: - dev_err(priv->adapter->dev, - "Unknown TDLS command action response %d", action); + mwifiex_dbg(priv->adapter, ERROR, + "Unknown TDLS command action response %d", action); return -1; } @@ -929,8 +951,9 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv, /* For every subscribe event command (Get/Set/Clear), FW reports the * current set of subscribed events*/ - dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n", - le16_to_cpu(cmd_sub_event->events)); + mwifiex_dbg(priv->adapter, EVENT, + "Bitmap of currently subscribed events: %16x\n", + le16_to_cpu(cmd_sub_event->events)); return 0; } @@ -940,7 +963,7 @@ static int mwifiex_ret_cfg_data(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { if (resp->result != HostCmd_RESULT_OK) { - dev_err(priv->adapter->dev, "Cal data cmd resp failed\n"); + mwifiex_dbg(priv->adapter, ERROR, "Cal data cmd resp failed\n"); return -1; } @@ -1008,8 +1031,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_802_11_BG_SCAN_QUERY: ret = mwifiex_ret_802_11_scan(priv, resp); - dev_dbg(adapter->dev, - "info: CMD_RESP: BG_SCAN result is ready!\n"); + mwifiex_dbg(adapter, CMD, + "info: CMD_RESP: BG_SCAN result is ready!\n"); break; case HostCmd_CMD_TXPWR_CFG: ret = mwifiex_ret_tx_power_cfg(priv, resp); @@ -1088,8 +1111,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, / MWIFIEX_SDIO_BLOCK_SIZE) * MWIFIEX_SDIO_BLOCK_SIZE; adapter->curr_tx_buf_size = adapter->tx_buf_size; - dev_dbg(adapter->dev, "cmd: curr_tx_buf_size=%d\n", - adapter->curr_tx_buf_size); + mwifiex_dbg(adapter, CMD, "cmd: curr_tx_buf_size=%d\n", + adapter->curr_tx_buf_size); if (adapter->if_ops.update_mp_end_port) adapter->if_ops.update_mp_end_port(adapter, @@ -1103,6 +1126,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS: ret = mwifiex_ret_ibss_coalescing_status(priv, resp); break; + case HostCmd_CMD_MEM_ACCESS: + ret = mwifiex_ret_mem_access(priv, resp, data_buf); + break; case HostCmd_CMD_MAC_REG_ACCESS: case HostCmd_CMD_BBP_REG_ACCESS: case HostCmd_CMD_RF_REG_ACCESS: @@ -1146,8 +1172,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp); break; default: - dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", - resp->command); + mwifiex_dbg(adapter, ERROR, + "CMD_RESP: unknown cmd response %#x\n", + resp->command); break; } diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index 0dc7a1d3993d..95203780010a 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -48,7 +48,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) if (!priv->media_connected) return; - dev_dbg(adapter->dev, "info: handles disconnect event\n"); + mwifiex_dbg(adapter, INFO, + "info: handles disconnect event\n"); priv->media_connected = false; @@ -104,12 +105,14 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) * it could be used for re-assoc */ - dev_dbg(adapter->dev, "info: previous SSID=%s, SSID len=%u\n", - priv->prev_ssid.ssid, priv->prev_ssid.ssid_len); + mwifiex_dbg(adapter, INFO, + "info: previous SSID=%s, SSID len=%u\n", + priv->prev_ssid.ssid, priv->prev_ssid.ssid_len); - dev_dbg(adapter->dev, "info: current SSID=%s, SSID len=%u\n", - priv->curr_bss_params.bss_descriptor.ssid.ssid, - priv->curr_bss_params.bss_descriptor.ssid.ssid_len); + mwifiex_dbg(adapter, INFO, + "info: current SSID=%s, SSID len=%u\n", + priv->curr_bss_params.bss_descriptor.ssid.ssid, + priv->curr_bss_params.bss_descriptor.ssid.ssid_len); memcpy(&priv->prev_ssid, &priv->curr_bss_params.bss_descriptor.ssid, @@ -127,13 +130,13 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) if (adapter->is_cmd_timedout && adapter->curr_cmd) return; priv->media_connected = false; - dev_dbg(adapter->dev, - "info: successfully disconnected from %pM: reason code %d\n", - priv->cfg_bssid, reason_code); + mwifiex_dbg(adapter, MSG, + "info: successfully disconnected from %pM: reason code %d\n", + priv->cfg_bssid, reason_code); if (priv->bss_mode == NL80211_IFTYPE_STATION || priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, - GFP_KERNEL); + false, GFP_KERNEL); } eth_zero_addr(priv->cfg_bssid); @@ -154,13 +157,13 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, /* reserved 2 bytes are not mandatory in tdls event */ if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) - sizeof(u16) - sizeof(adapter->event_cause))) { - dev_err(adapter->dev, "Invalid event length!\n"); + mwifiex_dbg(adapter, ERROR, "Invalid event length!\n"); return -1; } sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac); if (!sta_ptr) { - dev_err(adapter->dev, "cannot get sta entry!\n"); + mwifiex_dbg(adapter, ERROR, "cannot get sta entry!\n"); return -1; } @@ -239,21 +242,21 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) switch (eventcause) { case EVENT_DUMMY_HOST_WAKEUP_SIGNAL: - dev_err(adapter->dev, - "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n"); + mwifiex_dbg(adapter, ERROR, + "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n"); break; case EVENT_LINK_SENSED: - dev_dbg(adapter->dev, "event: LINK_SENSED\n"); + mwifiex_dbg(adapter, EVENT, "event: LINK_SENSED\n"); if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); break; case EVENT_DEAUTHENTICATED: - dev_dbg(adapter->dev, "event: Deauthenticated\n"); + mwifiex_dbg(adapter, EVENT, "event: Deauthenticated\n"); if (priv->wps.session_enable) { - dev_dbg(adapter->dev, - "info: receive deauth event in wps session\n"); + mwifiex_dbg(adapter, INFO, + "info: receive deauth event in wps session\n"); break; } adapter->dbg.num_event_deauth++; @@ -265,10 +268,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_DISASSOCIATED: - dev_dbg(adapter->dev, "event: Disassociated\n"); + mwifiex_dbg(adapter, EVENT, "event: Disassociated\n"); if (priv->wps.session_enable) { - dev_dbg(adapter->dev, - "info: receive disassoc event in wps session\n"); + mwifiex_dbg(adapter, INFO, + "info: receive disassoc event in wps session\n"); break; } adapter->dbg.num_event_disassoc++; @@ -280,7 +283,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_LINK_LOST: - dev_dbg(adapter->dev, "event: Link lost\n"); + mwifiex_dbg(adapter, EVENT, "event: Link lost\n"); adapter->dbg.num_event_link_lost++; if (priv->media_connected) { reason_code = @@ -290,7 +293,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_PS_SLEEP: - dev_dbg(adapter->dev, "info: EVENT: SLEEP\n"); + mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n"); adapter->ps_state = PS_STATE_PRE_SLEEP; @@ -298,12 +301,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_PS_AWAKE: - dev_dbg(adapter->dev, "info: EVENT: AWAKE\n"); + mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n"); if (!adapter->pps_uapsd_mode && priv->media_connected && adapter->sleep_period.period) { adapter->pps_uapsd_mode = true; - dev_dbg(adapter->dev, - "event: PPS/UAPSD mode activated\n"); + mwifiex_dbg(adapter, EVENT, + "event: PPS/UAPSD mode activated\n"); } adapter->tx_lock_flag = false; if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) { @@ -333,26 +336,26 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_DEEP_SLEEP_AWAKE: adapter->if_ops.wakeup_complete(adapter); - dev_dbg(adapter->dev, "event: DS_AWAKE\n"); + mwifiex_dbg(adapter, EVENT, "event: DS_AWAKE\n"); if (adapter->is_deep_sleep) adapter->is_deep_sleep = false; break; case EVENT_HS_ACT_REQ: - dev_dbg(adapter->dev, "event: HS_ACT_REQ\n"); + mwifiex_dbg(adapter, EVENT, "event: HS_ACT_REQ\n"); ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH, 0, 0, NULL, false); break; case EVENT_MIC_ERR_UNICAST: - dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n"); + mwifiex_dbg(adapter, EVENT, "event: UNICAST MIC ERROR\n"); cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid, NL80211_KEYTYPE_PAIRWISE, -1, NULL, GFP_KERNEL); break; case EVENT_MIC_ERR_MULTICAST: - dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n"); + mwifiex_dbg(adapter, EVENT, "event: MULTICAST MIC ERROR\n"); cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid, NL80211_KEYTYPE_GROUP, -1, NULL, GFP_KERNEL); @@ -362,7 +365,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_ADHOC_BCN_LOST: - dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n"); + mwifiex_dbg(adapter, EVENT, "event: ADHOC_BCN_LOST\n"); priv->adhoc_is_link_sensed = false; mwifiex_clean_txrx(priv); mwifiex_stop_net_dev_queue(priv->netdev, adapter); @@ -371,17 +374,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_BG_SCAN_REPORT: - dev_dbg(adapter->dev, "event: BGS_REPORT\n"); + mwifiex_dbg(adapter, EVENT, "event: BGS_REPORT\n"); ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY, HostCmd_ACT_GEN_GET, 0, NULL, false); break; case EVENT_PORT_RELEASE: - dev_dbg(adapter->dev, "event: PORT RELEASE\n"); + mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n"); break; case EVENT_EXT_SCAN_REPORT: - dev_dbg(adapter->dev, "event: EXT_SCAN Report\n"); + mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n"); if (adapter->ext_scan) ret = mwifiex_handle_event_ext_scan_report(priv, adapter->event_skb->data); @@ -389,7 +392,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_WMM_STATUS_CHANGE: - dev_dbg(adapter->dev, "event: WMM status changed\n"); + mwifiex_dbg(adapter, EVENT, "event: WMM status changed\n"); ret = mwifiex_send_cmd(priv, HostCmd_CMD_WMM_GET_STATUS, 0, 0, NULL, false); break; @@ -401,13 +404,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO, HostCmd_ACT_GEN_GET, 0, NULL, false); priv->subsc_evt_rssi_state = RSSI_LOW_RECVD; - dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n"); + mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_LOW\n"); break; case EVENT_SNR_LOW: - dev_dbg(adapter->dev, "event: Beacon SNR_LOW\n"); + mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_LOW\n"); break; case EVENT_MAX_FAIL: - dev_dbg(adapter->dev, "event: MAX_FAIL\n"); + mwifiex_dbg(adapter, EVENT, "event: MAX_FAIL\n"); break; case EVENT_RSSI_HIGH: cfg80211_cqm_rssi_notify(priv->netdev, @@ -416,47 +419,47 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO, HostCmd_ACT_GEN_GET, 0, NULL, false); priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD; - dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n"); + mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_HIGH\n"); break; case EVENT_SNR_HIGH: - dev_dbg(adapter->dev, "event: Beacon SNR_HIGH\n"); + mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_HIGH\n"); break; case EVENT_DATA_RSSI_LOW: - dev_dbg(adapter->dev, "event: Data RSSI_LOW\n"); + mwifiex_dbg(adapter, EVENT, "event: Data RSSI_LOW\n"); break; case EVENT_DATA_SNR_LOW: - dev_dbg(adapter->dev, "event: Data SNR_LOW\n"); + mwifiex_dbg(adapter, EVENT, "event: Data SNR_LOW\n"); break; case EVENT_DATA_RSSI_HIGH: - dev_dbg(adapter->dev, "event: Data RSSI_HIGH\n"); + mwifiex_dbg(adapter, EVENT, "event: Data RSSI_HIGH\n"); break; case EVENT_DATA_SNR_HIGH: - dev_dbg(adapter->dev, "event: Data SNR_HIGH\n"); + mwifiex_dbg(adapter, EVENT, "event: Data SNR_HIGH\n"); break; case EVENT_LINK_QUALITY: - dev_dbg(adapter->dev, "event: Link Quality\n"); + mwifiex_dbg(adapter, EVENT, "event: Link Quality\n"); break; case EVENT_PRE_BEACON_LOST: - dev_dbg(adapter->dev, "event: Pre-Beacon Lost\n"); + mwifiex_dbg(adapter, EVENT, "event: Pre-Beacon Lost\n"); break; case EVENT_IBSS_COALESCED: - dev_dbg(adapter->dev, "event: IBSS_COALESCED\n"); + mwifiex_dbg(adapter, EVENT, "event: IBSS_COALESCED\n"); ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS, HostCmd_ACT_GEN_GET, 0, NULL, false); break; case EVENT_ADDBA: - dev_dbg(adapter->dev, "event: ADDBA Request\n"); + mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n"); mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP, HostCmd_ACT_GEN_SET, 0, adapter->event_body, false); break; case EVENT_DELBA: - dev_dbg(adapter->dev, "event: DELBA Request\n"); + mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n"); mwifiex_11n_delete_ba_stream(priv, adapter->event_body); break; case EVENT_BA_STREAM_TIEMOUT: - dev_dbg(adapter->dev, "event: BA Stream timeout\n"); + mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n"); mwifiex_11n_ba_stream_timeout(priv, (struct host_cmd_ds_11n_batimeout *) @@ -464,28 +467,31 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_AMSDU_AGGR_CTRL: ctrl = le16_to_cpu(*(__le16 *)adapter->event_body); - dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl); + mwifiex_dbg(adapter, EVENT, + "event: AMSDU_AGGR_CTRL %d\n", ctrl); adapter->tx_buf_size = min_t(u16, adapter->curr_tx_buf_size, ctrl); - dev_dbg(adapter->dev, "event: tx_buf_size %d\n", - adapter->tx_buf_size); + mwifiex_dbg(adapter, EVENT, "event: tx_buf_size %d\n", + adapter->tx_buf_size); break; case EVENT_WEP_ICV_ERR: - dev_dbg(adapter->dev, "event: WEP ICV error\n"); + mwifiex_dbg(adapter, EVENT, "event: WEP ICV error\n"); break; case EVENT_BW_CHANGE: - dev_dbg(adapter->dev, "event: BW Change\n"); + mwifiex_dbg(adapter, EVENT, "event: BW Change\n"); break; case EVENT_HOSTWAKE_STAIE: - dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause); + mwifiex_dbg(adapter, EVENT, + "event: HOSTWAKE_STAIE %d\n", eventcause); break; case EVENT_REMAIN_ON_CHAN_EXPIRED: - dev_dbg(adapter->dev, "event: Remain on channel expired\n"); + mwifiex_dbg(adapter, EVENT, + "event: Remain on channel expired\n"); cfg80211_remain_on_channel_expired(&priv->wdev, priv->roc_cfg.cookie, &priv->roc_cfg.chan, @@ -496,7 +502,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_CHANNEL_SWITCH_ANN: - dev_dbg(adapter->dev, "event: Channel Switch Announcement\n"); + mwifiex_dbg(adapter, EVENT, "event: Channel Switch Announcement\n"); priv->csa_expire_time = jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME); priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel; @@ -511,23 +517,23 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_TX_STATUS_REPORT: - dev_dbg(adapter->dev, "event: TX_STATUS Report\n"); + mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n"); mwifiex_parse_tx_status_event(priv, adapter->event_body); break; case EVENT_CHANNEL_REPORT_RDY: - dev_dbg(adapter->dev, "event: Channel Report\n"); + mwifiex_dbg(adapter, EVENT, "event: Channel Report\n"); ret = mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb); break; case EVENT_RADAR_DETECTED: - dev_dbg(adapter->dev, "event: Radar detected\n"); + mwifiex_dbg(adapter, EVENT, "event: Radar detected\n"); ret = mwifiex_11h_handle_radar_detected(priv, adapter->event_skb); break; default: - dev_dbg(adapter->dev, "event: unknown event id: %#x\n", - eventcause); + mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n", + eventcause); break; } diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index a0bc26c5eac0..d8b7d9c20450 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -66,7 +66,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, if (status <= 0) { if (status == 0) status = -ETIMEDOUT; - dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status); + mwifiex_dbg(adapter, ERROR, + "cmd_wait_q terminated: %d\n", status); mwifiex_cancel_all_pending_cmd(adapter); return status; } @@ -93,7 +94,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, old_pkt_filter = priv->curr_pkt_filter; if (mcast_list->mode == MWIFIEX_PROMISC_MODE) { - dev_dbg(priv->adapter->dev, "info: Enable Promiscuous mode\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: Enable Promiscuous mode\n"); priv->curr_pkt_filter |= HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; @@ -101,16 +103,16 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, /* Multicast */ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) { - dev_dbg(priv->adapter->dev, - "info: Enabling All Multicast!\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: Enabling All Multicast!\n"); priv->curr_pkt_filter |= HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; } else { priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; - dev_dbg(priv->adapter->dev, - "info: Set multicast list=%d\n", - mcast_list->num_multicast_addr); + mwifiex_dbg(priv->adapter, INFO, + "info: Set multicast list=%d\n", + mcast_list->num_multicast_addr); /* Send multicast addresses to firmware */ ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_MULTICAST_ADR, @@ -118,9 +120,9 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, mcast_list, false); } } - dev_dbg(priv->adapter->dev, - "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n", - old_pkt_filter, priv->curr_pkt_filter); + mwifiex_dbg(priv->adapter, INFO, + "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n", + old_pkt_filter, priv->curr_pkt_filter); if (old_pkt_filter != priv->curr_pkt_filter) { ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL, HostCmd_ACT_GEN_SET, @@ -153,7 +155,8 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, rcu_read_unlock(); if (!beacon_ie) { - dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); + mwifiex_dbg(priv->adapter, ERROR, + " failed to alloc beacon_ie\n"); return -ENOMEM; } @@ -167,7 +170,8 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, bss_desc->bss_band = bss_priv->band; bss_desc->fw_tsf = bss_priv->fw_tsf; if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) { - dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: InterpretIE: AP WEP enabled\n"); bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP; } else { bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL; @@ -221,8 +225,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) { rcu_read_unlock(); - wiphy_dbg(priv->wdev.wiphy, - "11D: skip setting domain info in FW\n"); + mwifiex_dbg(priv->adapter, INFO, + "11D: skip setting domain info in FW\n"); return 0; } memcpy(priv->adapter->country_code, &country_ie[2], 2); @@ -243,8 +247,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO, HostCmd_ACT_GEN_SET, 0, NULL, false)) { - wiphy_err(priv->adapter->wiphy, - "11D: setting domain info in FW\n"); + mwifiex_dbg(priv->adapter, ERROR, + "11D: setting domain info in FW fail\n"); return -1; } @@ -306,14 +310,15 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, if (mwifiex_11h_get_csa_closed_channel(priv) == (u8)bss_desc->channel) { - dev_err(adapter->dev, - "Attempt to reconnect on csa closed chan(%d)\n", - bss_desc->channel); + mwifiex_dbg(adapter, ERROR, + "Attempt to reconnect on csa closed chan(%d)\n", + bss_desc->channel); goto done; } - dev_dbg(adapter->dev, "info: SSID found in scan list ... " - "associating...\n"); + mwifiex_dbg(adapter, INFO, + "info: SSID found in scan list ...\t" + "associating...\n"); mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) @@ -355,15 +360,17 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, netif_carrier_off(priv->netdev); if (!ret) { - dev_dbg(adapter->dev, "info: network found in scan" - " list. Joining...\n"); + mwifiex_dbg(adapter, INFO, + "info: network found in scan\t" + " list. Joining...\n"); ret = mwifiex_adhoc_join(priv, bss_desc); if (bss) cfg80211_put_bss(priv->adapter->wiphy, bss); } else { - dev_dbg(adapter->dev, "info: Network not found in " - "the list, creating adhoc with ssid = %s\n", - req_ssid->ssid); + mwifiex_dbg(adapter, INFO, + "info: Network not found in\t" + "the list, creating adhoc with ssid = %s\n", + req_ssid->ssid); ret = mwifiex_adhoc_start(priv, req_ssid); } } @@ -398,8 +405,9 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action, switch (action) { case HostCmd_ACT_GEN_SET: if (adapter->pps_uapsd_mode) { - dev_dbg(adapter->dev, "info: Host Sleep IOCTL" - " is blocked in UAPSD/PPS mode\n"); + mwifiex_dbg(adapter, INFO, + "info: Host Sleep IOCTL\t" + "is blocked in UAPSD/PPS mode\n"); status = -1; break; } @@ -496,7 +504,8 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) } if (adapter->hs_activated) { - dev_dbg(adapter->dev, "cmd: HS Already activated\n"); + mwifiex_dbg(adapter, CMD, + "cmd: HS Already activated\n"); return true; } @@ -512,14 +521,16 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) MWIFIEX_BSS_ROLE_STA), HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD, &hscfg)) { - dev_err(adapter->dev, "IOCTL request HS enable failed\n"); + mwifiex_dbg(adapter, ERROR, + "IOCTL request HS enable failed\n"); return false; } if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q, adapter->hs_activate_wait_q_woken, (10 * HZ)) <= 0) { - dev_err(adapter->dev, "hs_activate_wait_q terminated\n"); + mwifiex_dbg(adapter, ERROR, + "hs_activate_wait_q terminated\n"); return false; } @@ -639,10 +650,11 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, dbm = (u16) power_cfg->power_level; if ((dbm < priv->min_tx_power_level) || (dbm > priv->max_tx_power_level)) { - dev_err(priv->adapter->dev, "txpower value %d dBm" - " is out of range (%d dBm-%d dBm)\n", - dbm, priv->min_tx_power_level, - priv->max_tx_power_level); + mwifiex_dbg(priv->adapter, ERROR, + "txpower value %d dBm\t" + "is out of range (%d dBm-%d dBm)\n", + dbm, priv->min_tx_power_level, + priv->max_tx_power_level); return -1; } } @@ -741,14 +753,15 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv, { if (ie_len) { if (ie_len > sizeof(priv->wpa_ie)) { - dev_err(priv->adapter->dev, - "failed to copy WPA IE, too big\n"); + mwifiex_dbg(priv->adapter, ERROR, + "failed to copy WPA IE, too big\n"); return -1; } memcpy(priv->wpa_ie, ie_data_ptr, ie_len); priv->wpa_ie_len = (u8) ie_len; - dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n", - priv->wpa_ie_len, priv->wpa_ie[0]); + mwifiex_dbg(priv->adapter, CMD, + "cmd: Set Wpa_ie_len=%d IE=%#x\n", + priv->wpa_ie_len, priv->wpa_ie[0]); if (priv->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) { priv->sec_info.wpa_enabled = true; @@ -761,8 +774,9 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv, } else { memset(priv->wpa_ie, 0, sizeof(priv->wpa_ie)); priv->wpa_ie_len = 0; - dev_dbg(priv->adapter->dev, "info: reset wpa_ie_len=%d IE=%#x\n", - priv->wpa_ie_len, priv->wpa_ie[0]); + mwifiex_dbg(priv->adapter, INFO, + "info: reset wpa_ie_len=%d IE=%#x\n", + priv->wpa_ie_len, priv->wpa_ie[0]); priv->sec_info.wpa_enabled = false; priv->sec_info.wpa2_enabled = false; } @@ -782,23 +796,24 @@ static int mwifiex_set_wapi_ie(struct mwifiex_private *priv, { if (ie_len) { if (ie_len > sizeof(priv->wapi_ie)) { - dev_dbg(priv->adapter->dev, - "info: failed to copy WAPI IE, too big\n"); + mwifiex_dbg(priv->adapter, ERROR, + "info: failed to copy WAPI IE, too big\n"); return -1; } memcpy(priv->wapi_ie, ie_data_ptr, ie_len); priv->wapi_ie_len = ie_len; - dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n", - priv->wapi_ie_len, priv->wapi_ie[0]); + mwifiex_dbg(priv->adapter, CMD, + "cmd: Set wapi_ie_len=%d IE=%#x\n", + priv->wapi_ie_len, priv->wapi_ie[0]); if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY) priv->sec_info.wapi_enabled = true; } else { memset(priv->wapi_ie, 0, sizeof(priv->wapi_ie)); priv->wapi_ie_len = ie_len; - dev_dbg(priv->adapter->dev, - "info: Reset wapi_ie_len=%d IE=%#x\n", - priv->wapi_ie_len, priv->wapi_ie[0]); + mwifiex_dbg(priv->adapter, INFO, + "info: Reset wapi_ie_len=%d IE=%#x\n", + priv->wapi_ie_len, priv->wapi_ie[0]); priv->sec_info.wapi_enabled = false; } return 0; @@ -816,8 +831,8 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv, { if (ie_len) { if (ie_len > MWIFIEX_MAX_VSIE_LEN) { - dev_dbg(priv->adapter->dev, - "info: failed to copy WPS IE, too big\n"); + mwifiex_dbg(priv->adapter, ERROR, + "info: failed to copy WPS IE, too big\n"); return -1; } @@ -827,13 +842,14 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv, memcpy(priv->wps_ie, ie_data_ptr, ie_len); priv->wps_ie_len = ie_len; - dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n", - priv->wps_ie_len, priv->wps_ie[0]); + mwifiex_dbg(priv->adapter, CMD, + "cmd: Set wps_ie_len=%d IE=%#x\n", + priv->wps_ie_len, priv->wps_ie[0]); } else { kfree(priv->wps_ie); priv->wps_ie_len = ie_len; - dev_dbg(priv->adapter->dev, - "info: Reset wps_ie_len=%d\n", priv->wps_ie_len); + mwifiex_dbg(priv->adapter, INFO, + "info: Reset wps_ie_len=%d\n", priv->wps_ie_len); } return 0; } @@ -877,8 +893,8 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv, /* Copy the required key as the current key */ wep_key = &priv->wep_key[index]; if (!wep_key->key_length) { - dev_err(adapter->dev, - "key not set, so cannot enable it\n"); + mwifiex_dbg(adapter, ERROR, + "key not set, so cannot enable it\n"); return -1; } @@ -955,7 +971,8 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv, /* Current driver only supports key length of up to 32 bytes */ if (encrypt_key->key_len > WLAN_MAX_KEY_LEN) { - dev_err(priv->adapter->dev, "key length too long\n"); + mwifiex_dbg(priv->adapter, ERROR, + "key length too long\n"); return -1; } @@ -1042,7 +1059,7 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version, snprintf(version, max_len, driver_version, fw_ver); - dev_dbg(adapter->dev, "info: MWIFIEX VERSION: %s\n", version); + mwifiex_dbg(adapter, MSG, "info: MWIFIEX VERSION: %s\n", version); return 0; } @@ -1130,7 +1147,8 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action, } if (mwifiex_send_cmd(priv, HostCmd_CMD_REMAIN_ON_CHAN, action, 0, &roc_cfg, true)) { - dev_err(priv->adapter->dev, "failed to remain on channel\n"); + mwifiex_dbg(priv->adapter, ERROR, + "failed to remain on channel\n"); return -1; } @@ -1315,8 +1333,8 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui)))) { priv->wps.session_enable = true; - dev_dbg(priv->adapter->dev, - "info: WPS Session Enabled.\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: WPS Session Enabled.\n"); ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len); } @@ -1363,7 +1381,8 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv, memset(adapter->arp_filter, 0, sizeof(adapter->arp_filter)); if (gen_ie->len > ARP_FILTER_MAX_BUF_SIZE) { adapter->arp_filter_size = 0; - dev_err(adapter->dev, "invalid ARP filter size\n"); + mwifiex_dbg(adapter, ERROR, + "invalid ARP filter size\n"); return -1; } else { memcpy(adapter->arp_filter, gen_ie->ie_data, @@ -1372,7 +1391,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv, } break; default: - dev_err(adapter->dev, "invalid IE type\n"); + mwifiex_dbg(adapter, ERROR, "invalid IE type\n"); return -1; } return 0; diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c index b8729c9394e9..d4d4cb1ce95b 100644 --- a/drivers/net/wireless/mwifiex/sta_rx.c +++ b/drivers/net/wireless/mwifiex/sta_rx.c @@ -141,7 +141,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, if (priv->hs2_enabled && mwifiex_discard_gratuitous_arp(priv, skb)) { - dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n"); + mwifiex_dbg(priv->adapter, INFO, "Bypassed Gratuitous ARP\n"); dev_kfree_skb_any(skb); return 0; } @@ -166,7 +166,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, ret = mwifiex_recv_packet(priv, skb); if (ret == -1) - dev_err(priv->adapter->dev, "recv packet failed\n"); + mwifiex_dbg(priv->adapter, ERROR, + "recv packet failed\n"); return ret; } @@ -203,9 +204,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv, rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset; if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) { - dev_err(adapter->dev, - "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n", - skb->len, rx_pkt_offset, rx_pkt_length); + mwifiex_dbg(adapter, ERROR, + "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n", + skb->len, rx_pkt_offset, rx_pkt_length); priv->stats.rx_dropped++; dev_kfree_skb_any(skb); return ret; @@ -214,7 +215,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv, if (rx_pkt_type == PKT_TYPE_MGMT) { ret = mwifiex_process_mgmt_packet(priv, skb); if (ret) - dev_err(adapter->dev, "Rx of mgmt packet failed"); + mwifiex_dbg(adapter, ERROR, "Rx of mgmt packet failed"); dev_kfree_skb_any(skb); return ret; } diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c index 5ce2d9a4f919..355ac5904fac 100644 --- a/drivers/net/wireless/mwifiex/sta_tx.c +++ b/drivers/net/wireless/mwifiex/sta_tx.c @@ -53,7 +53,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, INTF_HEADER_LEN; if (!skb->len) { - dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); + mwifiex_dbg(adapter, ERROR, + "Tx: bad packet length: %d\n", skb->len); tx_info->status_code = -1; return skb->data; } @@ -184,21 +185,24 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags) switch (ret) { case -EBUSY: dev_kfree_skb_any(skb); - dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n", - __func__, ret); + mwifiex_dbg(adapter, ERROR, + "%s: host_to_card failed: ret=%d\n", + __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; break; case -1: adapter->data_sent = false; dev_kfree_skb_any(skb); - dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n", - __func__, ret); + mwifiex_dbg(adapter, ERROR, + "%s: host_to_card failed: ret=%d\n", + __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; break; case 0: dev_kfree_skb_any(skb); - dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n", - __func__); + mwifiex_dbg(adapter, DATA, + "data: %s: host_to_card succeeded\n", + __func__); adapter->tx_lock_flag = true; break; case -EINPROGRESS: diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c index 275a476f0dc7..2faa1bc42abe 100644 --- a/drivers/net/wireless/mwifiex/tdls.c +++ b/drivers/net/wireless/mwifiex/tdls.c @@ -37,7 +37,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u32 tid; u8 tid_down; - dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac); + mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) { @@ -94,7 +94,7 @@ static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, unsigned long flags; int i; - dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac); + mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); for (i = 0; i < MAX_NUM_TID; i++) { @@ -132,8 +132,8 @@ mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv, supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES); if (skb_tailroom(skb) < rates_size + 4) { - dev_err(priv->adapter->dev, - "Insuffient space while adding rates\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Insuffient space while adding rates\n"); return -ENOMEM; } @@ -199,8 +199,8 @@ mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac, sta_ptr = mwifiex_get_sta_entry(priv, mac); if (unlikely(!sta_ptr)) { - dev_warn(priv->adapter->dev, - "TDLS peer station not found in list\n"); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS peer station not found in list\n"); return -1; } @@ -247,15 +247,16 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv, sta_ptr = mwifiex_get_sta_entry(priv, mac); if (unlikely(!sta_ptr)) { - dev_warn(adapter->dev, "TDLS peer station not found in list\n"); + mwifiex_dbg(adapter, ERROR, + "TDLS peer station not found in list\n"); return -1; } if (!mwifiex_is_bss_in_11ac_mode(priv)) { if (sta_ptr->tdls_cap.extcap.ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) { - dev_dbg(adapter->dev, - "TDLS peer doesn't support wider bandwidth\n"); + mwifiex_dbg(adapter, WARN, + "TDLS peer doesn't support wider bandwidth\n"); return 0; } } else { @@ -554,7 +555,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, tf->u.discover_req.dialog_token = dialog_token; break; default: - dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n"); + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n"); return -EINVAL; } @@ -608,8 +609,8 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer, skb = dev_alloc_skb(skb_len); if (!skb) { - dev_err(priv->adapter->dev, - "allocate skb failed for management frame\n"); + mwifiex_dbg(priv->adapter, ERROR, + "allocate skb failed for management frame\n"); return -ENOMEM; } skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN); @@ -742,7 +743,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, mwifiex_tdls_add_qos_capab(skb); break; default: - dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n"); + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS action frame type\n"); return -EINVAL; } @@ -781,8 +782,8 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer, skb = dev_alloc_skb(skb_len); if (!skb) { - dev_err(priv->adapter->dev, - "allocate skb failed for management frame\n"); + mwifiex_dbg(priv->adapter, ERROR, + "allocate skb failed for management frame\n"); return -ENOMEM; } @@ -848,8 +849,8 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, peer = buf + ETH_ALEN; action = *(buf + sizeof(struct ethhdr) + 2); - dev_dbg(priv->adapter->dev, - "rx:tdls action: peer=%pM, action=%d\n", peer, action); + mwifiex_dbg(priv->adapter, DATA, + "rx:tdls action: peer=%pM, action=%d\n", peer, action); switch (action) { case WLAN_TDLS_SETUP_REQUEST: @@ -880,7 +881,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN; break; default: - dev_dbg(priv->adapter->dev, "Unknown TDLS frame type.\n"); + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n"); return; } @@ -967,8 +968,8 @@ mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer) sta_ptr = mwifiex_get_sta_entry(priv, peer); if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) { - dev_err(priv->adapter->dev, - "link absent for peer %pM; cannot config\n", peer); + mwifiex_dbg(priv->adapter, ERROR, + "link absent for peer %pM; cannot config\n", peer); return -EINVAL; } @@ -988,8 +989,8 @@ mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer) sta_ptr = mwifiex_get_sta_entry(priv, peer); if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) { - dev_dbg(priv->adapter->dev, - "Setup already in progress for peer %pM\n", peer); + mwifiex_dbg(priv->adapter, WARN, + "Setup already in progress for peer %pM\n", peer); return 0; } @@ -1046,8 +1047,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer) sta_ptr = mwifiex_get_sta_entry(priv, peer); if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) { - dev_dbg(priv->adapter->dev, - "tdls: enable link %pM success\n", peer); + mwifiex_dbg(priv->adapter, MSG, + "tdls: enable link %pM success\n", peer); sta_ptr->tdls_status = TDLS_SETUP_COMPLETE; @@ -1076,8 +1077,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer) mwifiex_auto_tdls_update_peer_status(priv, peer, TDLS_SETUP_COMPLETE); } else { - dev_dbg(priv->adapter->dev, - "tdls: enable link %pM failed\n", peer); + mwifiex_dbg(priv->adapter, ERROR, + "tdls: enable link %pM failed\n", peer); if (sta_ptr) { mwifiex_11n_cleanup_reorder_tbl(priv); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, @@ -1180,9 +1181,9 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv) tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK; if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER, HostCmd_ACT_GEN_SET, 0, &tdls_oper, false)) - dev_warn(priv->adapter->dev, - "Disable link failed for TDLS peer %pM", - sta_ptr->mac_addr); + mwifiex_dbg(priv->adapter, ERROR, + "Disable link failed for TDLS peer %pM", + sta_ptr->mac_addr); } mwifiex_del_all_sta_list(priv); @@ -1204,9 +1205,9 @@ int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb) (peer->failure_count < MWIFIEX_TDLS_MAX_FAIL_COUNT)) { peer->tdls_status = TDLS_SETUP_INPROGRESS; - dev_dbg(priv->adapter->dev, - "setup TDLS link, peer=%pM rssi=%d\n", - peer->mac_addr, peer->rssi); + mwifiex_dbg(priv->adapter, INFO, + "setup TDLS link, peer=%pM rssi=%d\n", + peer->mac_addr, peer->rssi); cfg80211_tdls_oper_request(priv->netdev, peer->mac_addr, @@ -1272,8 +1273,8 @@ void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac) tdls_peer->rssi_jiffies = jiffies; INIT_LIST_HEAD(&tdls_peer->list); list_add_tail(&tdls_peer->list, &priv->auto_tdls_list); - dev_dbg(priv->adapter->dev, "Add auto TDLS peer= %pM to list\n", - mac); + mwifiex_dbg(priv->adapter, INFO, + "Add auto TDLS peer= %pM to list\n", mac); } spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); @@ -1341,8 +1342,8 @@ void mwifiex_check_auto_tdls(unsigned long context) return; if (!priv->auto_tdls_timer_active) { - dev_dbg(priv->adapter->dev, - "auto TDLS timer inactive; return"); + mwifiex_dbg(priv->adapter, INFO, + "auto TDLS timer inactive; return"); return; } @@ -1368,9 +1369,9 @@ void mwifiex_check_auto_tdls(unsigned long context) !tdls_peer->rssi) && tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) { tdls_peer->tdls_status = TDLS_LINK_TEARDOWN; - dev_dbg(priv->adapter->dev, - "teardown TDLS link,peer=%pM rssi=%d\n", - tdls_peer->mac_addr, -tdls_peer->rssi); + mwifiex_dbg(priv->adapter, MSG, + "teardown TDLS link,peer=%pM rssi=%d\n", + tdls_peer->mac_addr, -tdls_peer->rssi); tdls_peer->do_discover = true; priv->check_tdls_tx = true; cfg80211_tdls_oper_request(priv->netdev, @@ -1384,9 +1385,10 @@ void mwifiex_check_auto_tdls(unsigned long context) MWIFIEX_TDLS_MAX_FAIL_COUNT) { priv->check_tdls_tx = true; tdls_peer->do_setup = true; - dev_dbg(priv->adapter->dev, - "check TDLS with peer=%pM rssi=%d\n", - tdls_peer->mac_addr, -tdls_peer->rssi); + mwifiex_dbg(priv->adapter, INFO, + "check TDLS with peer=%pM\t" + "rssi=%d\n", tdls_peer->mac_addr, + tdls_peer->rssi); } } spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index a245f444aeec..28dcc84a34d2 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -50,11 +50,15 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter, priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); if (!priv) { - dev_err(adapter->dev, "data: priv not found. Drop RX packet\n"); + mwifiex_dbg(adapter, ERROR, + "data: priv not found. Drop RX packet\n"); dev_kfree_skb_any(skb); return -1; } + mwifiex_dbg_dump(adapter, DAT_D, "rx pkt:", skb->data, + min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN)); + memset(rx_info, 0, sizeof(*rx_info)); rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; @@ -112,10 +116,12 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, skb, tx_param); } } + mwifiex_dbg_dump(adapter, DAT_D, "tx pkt:", skb->data, + min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN)); switch (ret) { case -ENOSR: - dev_dbg(adapter->dev, "data: -ENOSR is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n"); break; case -EBUSY: if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && @@ -124,13 +130,14 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, if (local_tx_pd) local_tx_pd->flags = 0; } - dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); break; case -1: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; - dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n", - ret); + mwifiex_dbg(adapter, ERROR, + "mwifiex_write_data_async failed: 0x%X\n", + ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, ret); break; @@ -162,7 +169,8 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter, priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num, tx_info->bss_type); if (!priv) { - dev_err(adapter->dev, "data: priv not found. Drop TX packet\n"); + mwifiex_dbg(adapter, ERROR, + "data: priv not found. Drop TX packet\n"); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, 0); return ret; @@ -187,7 +195,7 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter, } switch (ret) { case -ENOSR: - dev_err(adapter->dev, "data: -ENOSR is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n"); break; case -EBUSY: if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && @@ -202,13 +210,13 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter, atomic_add(tx_info->aggr_num, &adapter->tx_queued); else atomic_inc(&adapter->tx_queued); - dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); break; case -1: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; - dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n", - ret); + mwifiex_dbg(adapter, ERROR, + "mwifiex_write_data_async failed: 0x%X\n", ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, ret); break; @@ -319,7 +327,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, txq = netdev_get_tx_queue(priv->netdev, index); if (netif_tx_queue_stopped(txq)) { netif_tx_wake_queue(txq); - dev_dbg(adapter->dev, "wake queue: %d\n", index); + mwifiex_dbg(adapter, DATA, "wake queue: %d\n", index); } } done: diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index 3d0281190b9d..a4ae28353b6d 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -184,8 +184,8 @@ mwifiex_set_ht_params(struct mwifiex_private *priv, bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff; break; default: - dev_warn(priv->adapter->dev, - "Unsupported RX-STBC, default to 2x2\n"); + mwifiex_dbg(priv->adapter, WARN, + "Unsupported RX-STBC, default to 2x2\n"); bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff; bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff; break; @@ -767,8 +767,8 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no, return -1; break; default: - dev_err(priv->adapter->dev, - "PREP_CMD: unknown cmd %#x\n", cmd_no); + mwifiex_dbg(priv->adapter, ERROR, + "PREP_CMD: unknown cmd %#x\n", cmd_no); return -1; } @@ -806,24 +806,28 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg) { if (mwifiex_del_mgmt_ies(priv)) - dev_err(priv->adapter->dev, "Failed to delete mgmt IEs!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to delete mgmt IEs!\n"); if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP, HostCmd_ACT_GEN_SET, 0, NULL, true)) { - dev_err(priv->adapter->dev, "Failed to stop the BSS\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to stop the BSS\n"); return -1; } if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG, HostCmd_ACT_GEN_SET, UAP_BSS_PARAMS_I, bss_cfg, false)) { - dev_err(priv->adapter->dev, "Failed to set the SSID\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to set the SSID\n"); return -1; } if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START, HostCmd_ACT_GEN_SET, 0, NULL, false)) { - dev_err(priv->adapter->dev, "Failed to start the BSS\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to start the BSS\n"); return -1; } diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c index f4794cdc36d2..06ce3fe660f1 100644 --- a/drivers/net/wireless/mwifiex/uap_event.c +++ b/drivers/net/wireless/mwifiex/uap_event.c @@ -80,8 +80,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) node = mwifiex_add_sta_entry(priv, event->sta_addr); if (!node) { - dev_warn(adapter->dev, - "could not create station entry!\n"); + mwifiex_dbg(adapter, ERROR, + "could not create station entry!\n"); return -1; } @@ -128,7 +128,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); break; case EVENT_UAP_BSS_START: - dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); + mwifiex_dbg(adapter, EVENT, + "AP EVENT: event id: %#x\n", eventcause); memcpy(priv->netdev->dev_addr, adapter->event_body + 2, ETH_ALEN); if (priv->hist_data) @@ -136,50 +137,53 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) break; case EVENT_UAP_MIC_COUNTERMEASURES: /* For future development */ - dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); + mwifiex_dbg(adapter, EVENT, + "AP EVENT: event id: %#x\n", eventcause); break; case EVENT_AMSDU_AGGR_CTRL: ctrl = le16_to_cpu(*(__le16 *)adapter->event_body); - dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl); + mwifiex_dbg(adapter, EVENT, + "event: AMSDU_AGGR_CTRL %d\n", ctrl); if (priv->media_connected) { adapter->tx_buf_size = min_t(u16, adapter->curr_tx_buf_size, ctrl); - dev_dbg(adapter->dev, "event: tx_buf_size %d\n", - adapter->tx_buf_size); + mwifiex_dbg(adapter, EVENT, + "event: tx_buf_size %d\n", + adapter->tx_buf_size); } break; case EVENT_ADDBA: - dev_dbg(adapter->dev, "event: ADDBA Request\n"); + mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n"); if (priv->media_connected) mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP, HostCmd_ACT_GEN_SET, 0, adapter->event_body, false); break; case EVENT_DELBA: - dev_dbg(adapter->dev, "event: DELBA Request\n"); + mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n"); if (priv->media_connected) mwifiex_11n_delete_ba_stream(priv, adapter->event_body); break; case EVENT_BA_STREAM_TIEMOUT: - dev_dbg(adapter->dev, "event: BA Stream timeout\n"); + mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n"); if (priv->media_connected) { ba_timeout = (void *)adapter->event_body; mwifiex_11n_ba_stream_timeout(priv, ba_timeout); } break; case EVENT_EXT_SCAN_REPORT: - dev_dbg(adapter->dev, "event: EXT_SCAN Report\n"); + mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n"); if (adapter->ext_scan) return mwifiex_handle_event_ext_scan_report(priv, adapter->event_skb->data); break; case EVENT_TX_STATUS_REPORT: - dev_dbg(adapter->dev, "event: TX_STATUS Report\n"); + mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n"); mwifiex_parse_tx_status_event(priv, adapter->event_body); break; case EVENT_PS_SLEEP: - dev_dbg(adapter->dev, "info: EVENT: SLEEP\n"); + mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n"); adapter->ps_state = PS_STATE_PRE_SLEEP; @@ -187,12 +191,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) break; case EVENT_PS_AWAKE: - dev_dbg(adapter->dev, "info: EVENT: AWAKE\n"); + mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n"); if (!adapter->pps_uapsd_mode && priv->media_connected && adapter->sleep_period.period) { adapter->pps_uapsd_mode = true; - dev_dbg(adapter->dev, - "event: PPS/UAPSD mode activated\n"); + mwifiex_dbg(adapter, EVENT, + "event: PPS/UAPSD mode activated\n"); } adapter->tx_lock_flag = false; if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) { @@ -218,16 +222,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) break; case EVENT_CHANNEL_REPORT_RDY: - dev_dbg(adapter->dev, "event: Channel Report\n"); + mwifiex_dbg(adapter, EVENT, "event: Channel Report\n"); mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb); break; case EVENT_RADAR_DETECTED: - dev_dbg(adapter->dev, "event: Radar detected\n"); + mwifiex_dbg(adapter, EVENT, "event: Radar detected\n"); mwifiex_11h_handle_radar_detected(priv, adapter->event_skb); break; default: - dev_dbg(adapter->dev, "event: unknown event id: %#x\n", - eventcause); + mwifiex_dbg(adapter, EVENT, + "event: unknown event id: %#x\n", eventcause); break; } diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index 38ac4d74c486..61c52fdf945d 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -103,8 +103,8 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, if ((atomic_read(&adapter->pending_bridged_pkts) >= MWIFIEX_BRIDGED_PKTS_THR_HIGH)) { - dev_err(priv->adapter->dev, - "Tx: Bridge packet limit reached. Drop packet!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Tx: Bridge packet limit reached. Drop packet!\n"); kfree_skb(skb); mwifiex_uap_cleanup_tx_queues(priv); return; @@ -153,15 +153,15 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, skb_pull(skb, hdr_chop); if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { - dev_dbg(priv->adapter->dev, - "data: Tx: insufficient skb headroom %d\n", - skb_headroom(skb)); + mwifiex_dbg(priv->adapter, ERROR, + "data: Tx: insufficient skb headroom %d\n", + skb_headroom(skb)); /* Insufficient skb headroom - allocate a new skb */ new_skb = skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); if (unlikely(!new_skb)) { - dev_err(priv->adapter->dev, - "Tx: cannot allocate new_skb\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Tx: cannot allocate new_skb\n"); kfree_skb(skb); priv->stats.tx_dropped++; return; @@ -169,8 +169,9 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, kfree_skb(skb); skb = new_skb; - dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n", - skb_headroom(skb)); + mwifiex_dbg(priv->adapter, INFO, + "info: new skb headroom %d\n", + skb_headroom(skb)); } tx_info = MWIFIEX_SKB_TXCB(skb); @@ -225,7 +226,8 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, /* don't do packet forwarding in disconnected state */ if (!priv->media_connected) { - dev_err(adapter->dev, "drop packet in disconnected state.\n"); + mwifiex_dbg(adapter, ERROR, + "drop packet in disconnected state.\n"); dev_kfree_skb_any(skb); return 0; } @@ -275,10 +277,10 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) + le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) { - dev_err(adapter->dev, - "wrong rx packet: len=%d, offset=%d, length=%d\n", - skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset), - le16_to_cpu(uap_rx_pd->rx_pkt_length)); + mwifiex_dbg(adapter, ERROR, + "wrong rx packet: len=%d, offset=%d, length=%d\n", + skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset), + le16_to_cpu(uap_rx_pd->rx_pkt_length)); priv->stats.rx_dropped++; dev_kfree_skb_any(skb); return 0; @@ -287,7 +289,8 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, if (rx_pkt_type == PKT_TYPE_MGMT) { ret = mwifiex_process_mgmt_packet(priv, skb); if (ret) - dev_err(adapter->dev, "Rx of mgmt packet failed"); + mwifiex_dbg(adapter, ERROR, + "Rx of mgmt packet failed"); dev_kfree_skb_any(skb); return ret; } @@ -354,7 +357,8 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, INTF_HEADER_LEN; if (!skb->len) { - dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); + mwifiex_dbg(adapter, ERROR, + "Tx: bad packet length: %d\n", skb->len); tx_info->status_code = -1; return skb->data; } diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index fd8027f200a0..aada93425f80 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -60,7 +60,6 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size); static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, struct sk_buff *skb, u8 ep) { - struct device *dev = adapter->dev; u32 recv_type; __le32 tmp; int ret; @@ -69,13 +68,15 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, mwifiex_process_hs_config(adapter); if (skb->len < INTF_HEADER_LEN) { - dev_err(dev, "%s: invalid skb->len\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: invalid skb->len\n", __func__); return -1; } switch (ep) { case MWIFIEX_USB_EP_CMD_EVENT: - dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__); + mwifiex_dbg(adapter, EVENT, + "%s: EP_CMD_EVENT\n", __func__); skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN); recv_type = le32_to_cpu(tmp); skb_pull(skb, INTF_HEADER_LEN); @@ -83,11 +84,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, switch (recv_type) { case MWIFIEX_USB_TYPE_CMD: if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) { - dev_err(dev, "CMD: skb->len too large\n"); + mwifiex_dbg(adapter, ERROR, + "CMD: skb->len too large\n"); ret = -1; goto exit_restore_skb; } else if (!adapter->curr_cmd) { - dev_dbg(dev, "CMD: no curr_cmd\n"); + mwifiex_dbg(adapter, WARN, "CMD: no curr_cmd\n"); if (adapter->ps_state == PS_STATE_SLEEP_CFM) { mwifiex_process_sleep_confirm_resp( adapter, skb->data, @@ -104,16 +106,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, break; case MWIFIEX_USB_TYPE_EVENT: if (skb->len < sizeof(u32)) { - dev_err(dev, "EVENT: skb->len too small\n"); + mwifiex_dbg(adapter, ERROR, + "EVENT: skb->len too small\n"); ret = -1; goto exit_restore_skb; } skb_copy_from_linear_data(skb, &tmp, sizeof(u32)); adapter->event_cause = le32_to_cpu(tmp); - dev_dbg(dev, "event_cause %#x\n", adapter->event_cause); + mwifiex_dbg(adapter, EVENT, + "event_cause %#x\n", adapter->event_cause); if (skb->len > MAX_EVENT_SIZE) { - dev_err(dev, "EVENT: event body too large\n"); + mwifiex_dbg(adapter, ERROR, + "EVENT: event body too large\n"); ret = -1; goto exit_restore_skb; } @@ -125,14 +130,16 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, adapter->event_skb = skb; break; default: - dev_err(dev, "unknown recv_type %#x\n", recv_type); + mwifiex_dbg(adapter, ERROR, + "unknown recv_type %#x\n", recv_type); return -1; } break; case MWIFIEX_USB_EP_DATA: - dev_dbg(dev, "%s: EP_DATA\n", __func__); + mwifiex_dbg(adapter, DATA, "%s: EP_DATA\n", __func__); if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) { - dev_err(dev, "DATA: skb->len too large\n"); + mwifiex_dbg(adapter, ERROR, + "DATA: skb->len too large\n"); return -1; } @@ -141,7 +148,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, atomic_inc(&adapter->rx_pending); break; default: - dev_err(dev, "%s: unknown endport %#x\n", __func__, ep); + mwifiex_dbg(adapter, ERROR, + "%s: unknown endport %#x\n", __func__, ep); return -1; } @@ -176,8 +184,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb) if (recv_length) { if (urb->status || (adapter->surprise_removed)) { - dev_err(adapter->dev, - "URB status is failed: %d\n", urb->status); + mwifiex_dbg(adapter, ERROR, + "URB status is failed: %d\n", urb->status); /* Do not free skb in case of command ep */ if (card->rx_cmd_ep != context->ep) dev_kfree_skb_any(skb); @@ -190,8 +198,9 @@ static void mwifiex_usb_rx_complete(struct urb *urb) status = mwifiex_usb_recv(adapter, skb, context->ep); - dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n", - recv_length, status); + mwifiex_dbg(adapter, INFO, + "info: recv_length=%d, status=%d\n", + recv_length, status); if (status == -EINPROGRESS) { mwifiex_queue_main_work(adapter); @@ -203,8 +212,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb) return; } else { if (status == -1) - dev_err(adapter->dev, - "received data processing failed!\n"); + mwifiex_dbg(adapter, ERROR, + "received data processing failed!\n"); /* Do not free skb in case of command ep */ if (card->rx_cmd_ep != context->ep) @@ -212,8 +221,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb) } } else if (urb->status) { if (!adapter->is_suspended) { - dev_warn(adapter->dev, - "Card is removed: %d\n", urb->status); + mwifiex_dbg(adapter, FATAL, + "Card is removed: %d\n", urb->status); adapter->surprise_removed = true; } dev_kfree_skb_any(skb); @@ -249,14 +258,17 @@ static void mwifiex_usb_tx_complete(struct urb *urb) struct mwifiex_adapter *adapter = context->adapter; struct usb_card_rec *card = adapter->card; - dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status); + mwifiex_dbg(adapter, INFO, + "%s: status: %d\n", __func__, urb->status); if (context->ep == card->tx_cmd_ep) { - dev_dbg(adapter->dev, "%s: CMD\n", __func__); + mwifiex_dbg(adapter, CMD, + "%s: CMD\n", __func__); atomic_dec(&card->tx_cmd_urb_pending); adapter->cmd_sent = false; } else { - dev_dbg(adapter->dev, "%s: DATA\n", __func__); + mwifiex_dbg(adapter, DATA, + "%s: DATA\n", __func__); atomic_dec(&card->tx_data_urb_pending); mwifiex_write_data_complete(adapter, context->skb, 0, urb->status ? -1 : 0); @@ -275,8 +287,8 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size) if (card->rx_cmd_ep != ctx->ep) { ctx->skb = dev_alloc_skb(size); if (!ctx->skb) { - dev_err(adapter->dev, - "%s: dev_alloc_skb failed\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: dev_alloc_skb failed\n", __func__); return -ENOMEM; } } @@ -291,7 +303,7 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size) atomic_inc(&card->rx_data_urb_pending); if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) { - dev_err(adapter->dev, "usb_submit_urb failed\n"); + mwifiex_dbg(adapter, ERROR, "usb_submit_urb failed\n"); dev_kfree_skb_any(ctx->skb); ctx->skb = NULL; @@ -468,7 +480,8 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message) adapter = card->adapter; if (unlikely(adapter->is_suspended)) - dev_warn(adapter->dev, "Device already suspended\n"); + mwifiex_dbg(adapter, WARN, + "Device already suspended\n"); mwifiex_enable_hs(adapter); @@ -519,7 +532,8 @@ static int mwifiex_usb_resume(struct usb_interface *intf) adapter = card->adapter; if (unlikely(!adapter->is_suspended)) { - dev_warn(adapter->dev, "Device already resumed\n"); + mwifiex_dbg(adapter, WARN, + "Device already resumed\n"); return 0; } @@ -578,7 +592,8 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) mwifiex_usb_free(card); - dev_dbg(adapter->dev, "%s: removing card\n", __func__); + mwifiex_dbg(adapter, FATAL, + "%s: removing card\n", __func__); mwifiex_remove_card(adapter, &add_remove_card_sem); usb_set_intfdata(intf, NULL); @@ -608,7 +623,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL); if (!card->tx_cmd.urb) { - dev_err(adapter->dev, "tx_cmd.urb allocation failed\n"); + mwifiex_dbg(adapter, ERROR, + "tx_cmd.urb allocation failed\n"); return -ENOMEM; } @@ -620,8 +636,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL); if (!card->tx_data_list[i].urb) { - dev_err(adapter->dev, - "tx_data_list[] urb allocation failed\n"); + mwifiex_dbg(adapter, ERROR, + "tx_data_list[] urb allocation failed\n"); return -ENOMEM; } } @@ -639,15 +655,13 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter) card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL); if (!card->rx_cmd.urb) { - dev_err(adapter->dev, "rx_cmd.urb allocation failed\n"); + mwifiex_dbg(adapter, ERROR, "rx_cmd.urb allocation failed\n"); return -ENOMEM; } card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE); - if (!card->rx_cmd.skb) { - dev_err(adapter->dev, "rx_cmd.skb allocation failed\n"); + if (!card->rx_cmd.skb) return -ENOMEM; - } if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE)) return -1; @@ -658,8 +672,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter) card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL); if (!card->rx_data_list[i].urb) { - dev_err(adapter->dev, - "rx_data_list[] urb allocation failed\n"); + mwifiex_dbg(adapter, ERROR, + "rx_data_list[] urb allocation failed\n"); return -1; } if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i], @@ -683,7 +697,8 @@ static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf, *len, &actual_length, timeout); if (ret) { - dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret); + mwifiex_dbg(adapter, ERROR, + "usb_bulk_msg for tx failed: %d\n", ret); return ret; } @@ -702,7 +717,8 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf, *len, &actual_length, timeout); if (ret) { - dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret); + mwifiex_dbg(adapter, ERROR, + "usb_bulk_msg for rx failed: %d\n", ret); return ret; } @@ -722,13 +738,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep, struct urb *tx_urb; if (adapter->is_suspended) { - dev_err(adapter->dev, - "%s: not allowed while suspended\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: not allowed while suspended\n", __func__); return -1; } if (adapter->surprise_removed) { - dev_err(adapter->dev, "%s: device removed\n", __func__); + mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__); return -1; } @@ -737,7 +753,7 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep, return -EBUSY; } - dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep); + mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep); if (ep == card->tx_cmd_ep) { context = &card->tx_cmd; @@ -764,7 +780,8 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep, atomic_inc(&card->tx_data_urb_pending); if (usb_submit_urb(tx_urb, GFP_ATOMIC)) { - dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: usb_submit_urb failed\n", __func__); if (ep == card->tx_cmd_ep) { atomic_dec(&card->tx_cmd_urb_pending); } else { @@ -843,8 +860,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, u8 check_winner = 1; if (!firmware) { - dev_err(adapter->dev, - "No firmware image found! Terminating download\n"); + mwifiex_dbg(adapter, ERROR, + "No firmware image found! Terminating download\n"); ret = -1; goto fw_exit; } @@ -889,8 +906,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, MWIFIEX_USB_EP_CMD_EVENT, MWIFIEX_USB_TIMEOUT); if (ret) { - dev_err(adapter->dev, - "write_data_sync: failed: %d\n", ret); + mwifiex_dbg(adapter, ERROR, + "write_data_sync: failed: %d\n", + ret); continue; } @@ -902,8 +920,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, MWIFIEX_USB_EP_CMD_EVENT, MWIFIEX_USB_TIMEOUT); if (ret) { - dev_err(adapter->dev, - "read_data_sync: failed: %d\n", ret); + mwifiex_dbg(adapter, ERROR, + "read_data_sync: failed: %d\n", + ret); continue; } @@ -913,17 +932,17 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, /* check 1st firmware block resp for highest bit set */ if (check_winner) { if (le32_to_cpu(sync_fw.cmd) & 0x80000000) { - dev_warn(adapter->dev, - "USB is not the winner %#x\n", - sync_fw.cmd); + mwifiex_dbg(adapter, WARN, + "USB is not the winner %#x\n", + sync_fw.cmd); /* returning success */ ret = 0; goto cleanup; } - dev_dbg(adapter->dev, - "USB is the winner, start to download FW\n"); + mwifiex_dbg(adapter, MSG, + "start to download FW...\n"); check_winner = 0; break; @@ -931,9 +950,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, /* check the firmware block response for CRC errors */ if (sync_fw.cmd) { - dev_err(adapter->dev, - "FW received block with CRC %#x\n", - sync_fw.cmd); + mwifiex_dbg(adapter, ERROR, + "FW received block with CRC %#x\n", + sync_fw.cmd); ret = -1; continue; } @@ -945,8 +964,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, } while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries); cleanup: - dev_notice(adapter->dev, - "info: FW download over, size %d bytes\n", tlen); + mwifiex_dbg(adapter, MSG, + "info: FW download over, size %d bytes\n", tlen); kfree(recv_buff); kfree(fwdata); diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index 9482d955c384..370323a47ecb 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -26,6 +26,8 @@ #include "11n.h" static struct mwifiex_debug_data items[] = { + {"debug_mask", item_size(debug_mask), + item_addr(debug_mask), 1}, {"int_counter", item_size(int_counter), item_addr(int_counter), 1}, {"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]), @@ -158,7 +160,8 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, } else if (func_init_shutdown == MWIFIEX_FUNC_SHUTDOWN) { cmd = HostCmd_CMD_FUNC_SHUTDOWN; } else { - dev_err(priv->adapter->dev, "unsupported parameter\n"); + mwifiex_dbg(priv->adapter, ERROR, + "unsupported parameter\n"); return -1; } @@ -178,6 +181,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv, struct mwifiex_adapter *adapter = priv->adapter; if (info) { + info->debug_mask = adapter->debug_mask; memcpy(info->packets_out, priv->wmm.packets_out, sizeof(priv->wmm.packets_out)); @@ -336,9 +340,9 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len, action_code = *(payload + sizeof(struct ieee80211_hdr) + 1); if (category == WLAN_CATEGORY_PUBLIC && action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) { - dev_dbg(priv->adapter->dev, - "TDLS discovery response %pM nf=%d, snr=%d\n", - ieee_hdr->addr2, rx_pd->nf, rx_pd->snr); + mwifiex_dbg(priv->adapter, INFO, + "TDLS discovery response %pM nf=%d, snr=%d\n", + ieee_hdr->addr2, rx_pd->nf, rx_pd->snr); mwifiex_auto_tdls_update_peer_signal(priv, ieee_hdr->addr2, rx_pd->snr, @@ -346,8 +350,8 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len, } break; default: - dev_dbg(priv->adapter->dev, - "unknown mgmt frame subytpe %#x\n", stype); + mwifiex_dbg(priv->adapter, INFO, + "unknown mgmt frame subtype %#x\n", stype); } return 0; @@ -369,8 +373,8 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, if (!priv->mgmt_frame_mask || priv->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) { - dev_dbg(priv->adapter->dev, - "do not receive mgmt frames on uninitialized intf"); + mwifiex_dbg(priv->adapter, ERROR, + "do not receive mgmt frames on uninitialized intf"); return -1; } @@ -464,13 +468,14 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb) int mwifiex_complete_cmd(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { - dev_dbg(adapter->dev, "cmd completed: status=%d\n", - adapter->cmd_wait_q.status); + mwifiex_dbg(adapter, CMD, + "cmd completed: status=%d\n", + adapter->cmd_wait_q.status); *(cmd_node->condition) = true; if (adapter->cmd_wait_q.status == -ETIMEDOUT) - dev_err(adapter->dev, "cmd timeout\n"); + mwifiex_dbg(adapter, ERROR, "cmd timeout\n"); else wake_up_interruptible(&adapter->cmd_wait_q.wait); diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 8be9d1310cde..a8ea21c3340c 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -107,7 +107,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra) ra_list->total_pkt_count = 0; - dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list); + mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list); return ra_list; } @@ -150,7 +150,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra) for (i = 0; i < MAX_NUM_TID; ++i) { ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra); - dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list); + mwifiex_dbg(adapter, INFO, + "info: created ra_list %p\n", ra_list); if (!ra_list) break; @@ -178,8 +179,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra) spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); } - dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n", - ra_list, ra_list->is_11n_enabled); + mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n", + ra_list, ra_list->is_11n_enabled); if (ra_list->is_11n_enabled) { ra_list->ba_pkt_count = 0; @@ -241,11 +242,12 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv, return; } - dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, " - "qos_info Parameter Set Count=%d, Reserved=%#x\n", - wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap & - IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK, - wmm_ie->reserved); + mwifiex_dbg(priv->adapter, INFO, + "info: WMM Parameter IE: version=%d,\t" + "qos_info Parameter Set Count=%d, Reserved=%#x\n", + wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap & + IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK, + wmm_ie->reserved); for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) { u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap; @@ -257,10 +259,10 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv, priv->wmm.queue_priority[ac_idx] = ac_idx; tmp[ac_idx] = avg_back_off; - dev_dbg(priv->adapter->dev, - "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n", - (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1, - cw_min, avg_back_off); + mwifiex_dbg(priv->adapter, INFO, + "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n", + (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1, + cw_min, avg_back_off); mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]); } @@ -333,8 +335,8 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv) { int ac_val; - dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:" - "BK(0), BE(1), VI(2), VO(3)\n"); + mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t" + "BK(0), BE(1), VI(2), VO(3)\n"); if (!priv->wmm_enabled) { /* WMM is not enabled, default priorities */ @@ -346,9 +348,10 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv) priv->wmm.ac_down_graded_vals[ac_val] = mwifiex_wmm_eval_downgrade_ac(priv, (enum mwifiex_wmm_ac_e) ac_val); - dev_dbg(priv->adapter->dev, - "info: WMM: AC PRIO %d maps to %d\n", - ac_val, priv->wmm.ac_down_graded_vals[ac_val]); + mwifiex_dbg(priv->adapter, INFO, + "info: WMM: AC PRIO %d maps to %d\n", + ac_val, + priv->wmm.ac_down_graded_vals[ac_val]); } } } @@ -521,8 +524,8 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv) int i; for (i = 0; i < MAX_NUM_TID; ++i) { - dev_dbg(priv->adapter->dev, - "info: ra_list: freeing buf for tid %d\n", i); + mwifiex_dbg(priv->adapter, INFO, + "info: ra_list: freeing buf for tid %d\n", i); list_for_each_entry_safe(ra_list, tmp_node, &priv->wmm.tid_tbl_ptr[i].ra_list, list) { @@ -694,14 +697,15 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA && ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) { if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS) - dev_dbg(adapter->dev, - "TDLS setup packet for %pM. Don't block\n", ra); + mwifiex_dbg(adapter, DATA, + "TDLS setup packet for %pM.\t" + "Don't block\n", ra); else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN)) tdls_status = mwifiex_get_tdls_link_status(priv, ra); } if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) { - dev_dbg(adapter->dev, "data: drop packet in disconnect\n"); + mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n"); mwifiex_write_data_complete(adapter, skb, 0, -1); return; } @@ -782,6 +786,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, { u8 *curr = (u8 *) &resp->params.get_wmm_status; uint16_t resp_len = le16_to_cpu(resp->size), tlv_len; + int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK; bool valid = true; struct mwifiex_ie_types_data *tlv_hdr; @@ -789,8 +794,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, struct ieee_types_wmm_parameter *wmm_param_ie = NULL; struct mwifiex_wmm_ac_status *ac_status; - dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n", - resp_len); + mwifiex_dbg(priv->adapter, INFO, + "info: WMM: WMM_GET_STATUS cmdresp received: %d\n", + resp_len); while ((resp_len >= sizeof(tlv_hdr->header)) && valid) { tlv_hdr = (struct mwifiex_ie_types_data *) curr; @@ -804,12 +810,12 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, tlv_wmm_qstatus = (struct mwifiex_ie_types_wmm_queue_status *) tlv_hdr; - dev_dbg(priv->adapter->dev, - "info: CMD_RESP: WMM_GET_STATUS:" - " QSTATUS TLV: %d, %d, %d\n", - tlv_wmm_qstatus->queue_index, - tlv_wmm_qstatus->flow_required, - tlv_wmm_qstatus->disabled); + mwifiex_dbg(priv->adapter, CMD, + "info: CMD_RESP: WMM_GET_STATUS:\t" + "QSTATUS TLV: %d, %d, %d\n", + tlv_wmm_qstatus->queue_index, + tlv_wmm_qstatus->flow_required, + tlv_wmm_qstatus->disabled); ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus-> queue_index]; @@ -832,11 +838,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, wmm_param_ie->vend_hdr.element_id = WLAN_EID_VENDOR_SPECIFIC; - dev_dbg(priv->adapter->dev, - "info: CMD_RESP: WMM_GET_STATUS:" - " WMM Parameter Set Count: %d\n", - wmm_param_ie->qos_info_bitmap & - IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK); + mwifiex_dbg(priv->adapter, CMD, + "info: CMD_RESP: WMM_GET_STATUS:\t" + "WMM Parameter Set Count: %d\n", + wmm_param_ie->qos_info_bitmap & mask); memcpy((u8 *) &priv->curr_bss_params.bss_descriptor. wmm_ie, wmm_param_ie, @@ -884,9 +889,9 @@ mwifiex_wmm_process_association_req(struct mwifiex_private *priv, if (!wmm_ie) return 0; - dev_dbg(priv->adapter->dev, - "info: WMM: process assoc req: bss->wmm_ie=%#x\n", - wmm_ie->vend_hdr.element_id); + mwifiex_dbg(priv->adapter, INFO, + "info: WMM: process assoc req: bss->wmm_ie=%#x\n", + wmm_ie->vend_hdr.element_id); if ((priv->wmm_required || (ht_cap && (priv->adapter->config_bands & BAND_GN || @@ -936,8 +941,8 @@ mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv, */ ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1); - dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms," - " %d ms sent to FW\n", queue_delay, ret_val); + mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t" + "%d ms sent to FW\n", queue_delay, ret_val); return ret_val; } @@ -1091,14 +1096,15 @@ mwifiex_send_single_packet(struct mwifiex_private *priv, if (skb_queue_empty(&ptr->skb_head)) { spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); - dev_dbg(adapter->dev, "data: nothing to send\n"); + mwifiex_dbg(adapter, DATA, "data: nothing to send\n"); return; } skb = skb_dequeue(&ptr->skb_head); tx_info = MWIFIEX_SKB_TXCB(skb); - dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb); + mwifiex_dbg(adapter, DATA, + "data: dequeuing the packet %p %p\n", ptr, skb); ptr->total_pkt_count--; @@ -1214,7 +1220,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, switch (ret) { case -EBUSY: - dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { @@ -1233,7 +1239,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, case -1: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; - dev_err(adapter->dev, "host_to_card failed: %#x\n", ret); + mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, ret); break; @@ -1272,7 +1278,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) tid = mwifiex_get_tid(ptr); - dev_dbg(adapter->dev, "data: tid=%d\n", tid); + mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 477f86354dc5..0881ba8535f4 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -143,7 +143,7 @@ static int psm; static char *essid; /* Default to encapsulation unless translation requested */ -static bool translate = 1; +static bool translate = true; static int country = USA; diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index d72ff8e7125d..71a825c750cf 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -356,9 +356,9 @@ struct ndis_80211_pmkid { #define CAP_MODE_80211G 4 #define CAP_MODE_MASK 7 -#define WORK_LINK_UP (1<<0) -#define WORK_LINK_DOWN (1<<1) -#define WORK_SET_MULTICAST_LIST (1<<2) +#define WORK_LINK_UP 0 +#define WORK_LINK_DOWN 1 +#define WORK_SET_MULTICAST_LIST 2 #define RNDIS_WLAN_ALG_NONE 0 #define RNDIS_WLAN_ALG_WEP (1<<0) @@ -2861,7 +2861,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) deauthenticate(usbdev); - cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL); + cfg80211_disconnected(usbdev->net, 0, NULL, 0, true, GFP_KERNEL); } netif_carrier_off(usbdev->net); diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig index 5cf509d346e8..73067cac289c 100644 --- a/drivers/net/wireless/rtlwifi/Kconfig +++ b/drivers/net/wireless/rtlwifi/Kconfig @@ -100,7 +100,7 @@ config RTL8821AE select RTLWIFI_PCI select RTLBTCOEXIST ---help--- - This is the driver for Realtek RTL8i821AE/RTL8812AE 802.11av PCIe + This is the driver for Realtek RTL8821AE/RTL8812AE 802.11ac PCIe wireless network adapters. If you choose to build it as a module, it will be called rtl8821ae diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c index cefe26991421..f2b9d11adc9e 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -1286,8 +1286,11 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, 0x12, 0xe1, 0x90); break; case 3: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, - 0x3, 0xf1, 0x90); + /* This call breaks BT when wireless is active - + * comment it out for now until a better fix is found: + * btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + * 0x3, 0xf1, 0x90); + */ break; case 4: btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c index 1893d01b9e78..a62bf0a65c32 100644 --- a/drivers/net/wireless/rtlwifi/regd.c +++ b/drivers/net/wireless/rtlwifi/regd.c @@ -40,6 +40,7 @@ static struct country_code_to_enum_rd allCountries[] = { {COUNTRY_CODE_GLOBAL_DOMAIN, "JP"}, {COUNTRY_CODE_WORLD_WIDE_13, "EC"}, {COUNTRY_CODE_TELEC_NETGEAR, "EC"}, + {COUNTRY_CODE_WORLD_WIDE_13_5G_ALL, "US"}, }; /* @@ -124,6 +125,17 @@ static const struct ieee80211_regdomain rtl_regdom_14_60_64 = { } }; +static const struct ieee80211_regdomain rtl_regdom_12_13_5g_all = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + RTL819x_2GHZ_CH01_11, + RTL819x_2GHZ_CH12_13, + RTL819x_5GHZ_5150_5350, + RTL819x_5GHZ_5470_5850, + } +}; + static const struct ieee80211_regdomain rtl_regdom_14 = { .n_reg_rules = 3, .alpha2 = "99", @@ -348,6 +360,8 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select( return &rtl_regdom_14_60_64; case COUNTRY_CODE_GLOBAL_DOMAIN: return &rtl_regdom_14; + case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL: + return &rtl_regdom_12_13_5g_all; default: return &rtl_regdom_no_midband; } @@ -384,6 +398,25 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode) return NULL; } +static u8 channel_plan_to_country_code(u8 channelplan) +{ + switch (channelplan) { + case 0x20: + case 0x21: + return COUNTRY_CODE_WORLD_WIDE_13; + case 0x22: + return COUNTRY_CODE_IC; + case 0x32: + return COUNTRY_CODE_TELEC_NETGEAR; + case 0x41: + return COUNTRY_CODE_GLOBAL_DOMAIN; + case 0x7f: + return COUNTRY_CODE_WORLD_WIDE_13_5G_ALL; + default: + return COUNTRY_CODE_MAX; /*Error*/ + } +} + int rtl_regd_init(struct ieee80211_hw *hw, void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)) @@ -396,11 +429,12 @@ int rtl_regd_init(struct ieee80211_hw *hw, return -EINVAL; /* init country_code from efuse channel plan */ - rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan; + rtlpriv->regd.country_code = + channel_plan_to_country_code(rtlpriv->efuse.channel_plan); - RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE, - "rtl: EEPROM regdomain: 0x%0x\n", - rtlpriv->regd.country_code); + RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG, + "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n", + rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code); if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) { RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG, diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h index 3bbbaaa68530..f7f15bce35dd 100644 --- a/drivers/net/wireless/rtlwifi/regd.h +++ b/drivers/net/wireless/rtlwifi/regd.h @@ -49,6 +49,7 @@ enum country_code_type_t { COUNTRY_CODE_GLOBAL_DOMAIN = 10, COUNTRY_CODE_WORLD_WIDE_13 = 11, COUNTRY_CODE_TELEC_NETGEAR = 12, + COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13, /*add new channel plan above this line */ COUNTRY_CODE_MAX diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c index 86ce5b1930e6..8ee83b093c0d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c @@ -1354,27 +1354,11 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci) } } -static void rtl88ee_clear_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 tmp; - - tmp = rtl_read_dword(rtlpriv, REG_HISR); - rtl_write_dword(rtlpriv, REG_HISR, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HISRE); - rtl_write_dword(rtlpriv, REG_HISRE, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HSISR); - rtl_write_dword(rtlpriv, REG_HSISR, tmp); -} - void rtl88ee_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl88ee_clear_interrupt(hw);/*clear it here first*/ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, @@ -1919,8 +1903,8 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) "dev_addr: %pM\n", rtlefuse->dev_addr); /*channel plan */ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN]; - /* set channel paln to world wide 13 */ - rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; + /* set channel plan from efuse */ + rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; /*tx power*/ _rtl88ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c index ef28c8ea1e84..02013df968a0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c @@ -23,7 +23,7 @@ * *****************************************************************************/ -#include "pwrseqcmd.h" +#include "../pwrseqcmd.h" #include "pwrseq.h" /* drivers should parse below arrays and do the corresponding actions */ diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h index 79103347d967..f2d9c6116e5c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h @@ -26,7 +26,7 @@ #ifndef __RTL8723E_PWRSEQ_H__ #define __RTL8723E_PWRSEQ_H__ -#include "pwrseqcmd.h" +#include "../pwrseqcmd.h" /* Check document WM-20110607-Paul-RTL8188EE_Power_Architecture-R02.vsd * There are 6 HW Power States: * 0: POFF--Power Off diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index d310d55d800e..189859617db8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -889,7 +889,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version)); rtl92c_init_beacon_parameters(hw, rtlhal->version); rtl92c_init_ampdu_aggregation(hw); - rtl92c_init_beacon_max_error(hw, true); + rtl92c_init_beacon_max_error(hw); return err; } @@ -1323,7 +1323,6 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw, enum led_ctl_mode ledaction = LED_CTL_NO_LINK; bt_msr &= 0xfc; - rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF); if (type == NL80211_IFTYPE_UNSPECIFIED || type == NL80211_IFTYPE_STATION) { _rtl92cu_stop_tx_beacon(hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index adb810794eef..f3db6bc8596a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -613,7 +613,7 @@ void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, 0x4CA, 0x0708); } -void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode) +void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h index bf53652e4edd..58548e8f2c41 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h @@ -66,7 +66,7 @@ void rtl92c_init_edca_param(struct ieee80211_hw *hw, void rtl92c_init_edca(struct ieee80211_hw *hw); void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw); -void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode); +void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw); void rtl92c_init_rdg_setting(struct ieee80211_hw *hw); void rtl92c_init_retry_function(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c index da0a6125f314..5f14308e8eb3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c @@ -1584,28 +1584,11 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci) } } -static void rtl92ee_clear_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 tmp; - - tmp = rtl_read_dword(rtlpriv, REG_HISR); - rtl_write_dword(rtlpriv, REG_HISR, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HISRE); - rtl_write_dword(rtlpriv, REG_HISRE, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HSISR); - rtl_write_dword(rtlpriv, REG_HSISR, tmp); -} - void rtl92ee_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl92ee_clear_interrupt(hw);/*clear it here first*/ - rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; @@ -2194,8 +2177,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw) "dev_addr: %pM\n", rtlefuse->dev_addr); /*channel plan */ rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; - /* set channel paln to world wide 13 */ - rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; + /* set channel plan from efuse */ + rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; /*tx power*/ _rtl92ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c index 67bb47d77b68..a4b7eac6856f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c @@ -1258,18 +1258,6 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci) } } -static void rtl8723e_clear_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 tmp; - - tmp = rtl_read_dword(rtlpriv, REG_HISR); - rtl_write_dword(rtlpriv, REG_HISR, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HISRE); - rtl_write_dword(rtlpriv, REG_HISRE, tmp); -} - void rtl8723e_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1284,7 +1272,6 @@ void rtl8723e_disable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl8723e_clear_interrupt(hw);/*clear it here first*/ rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED); rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED); rtlpci->irq_enabled = false; diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c index b681af3c7a35..c983d2fe147f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c @@ -1634,28 +1634,11 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci) } } -static void rtl8723be_clear_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 tmp; - - tmp = rtl_read_dword(rtlpriv, REG_HISR); - rtl_write_dword(rtlpriv, REG_HISR, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HISRE); - rtl_write_dword(rtlpriv, REG_HISRE, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HSISR); - rtl_write_dword(rtlpriv, REG_HSISR, tmp); -} - void rtl8723be_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl8723be_clear_interrupt(hw);/*clear it here first*/ - rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; @@ -2139,8 +2122,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); - /* set channel plan to world wide 13 */ - rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; + /* set channel plan from efuse */ + rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; if (rtlhal->oem_id == RT_CID_DEFAULT) { /* Does this one have a Toshiba SMID from group 1? */ diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index 8704eee9f3a4..3236d44b459d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -2253,31 +2253,11 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) } } -static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 tmp; - tmp = rtl_read_dword(rtlpriv, REG_HISR); - /*printk("clear interrupt first:\n"); - printk("0x%x = 0x%08x\n",REG_HISR, tmp);*/ - rtl_write_dword(rtlpriv, REG_HISR, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HISRE); - /*printk("0x%x = 0x%08x\n",REG_HISRE, tmp);*/ - rtl_write_dword(rtlpriv, REG_HISRE, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HSISR); - /*printk("0x%x = 0x%08x\n",REG_HSISR, tmp);*/ - rtl_write_dword(rtlpriv, REG_HSISR, tmp); -} - void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl8821ae_clear_interrupt(hw);/*clear it here first*/ - rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; @@ -3232,8 +3212,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ if (rtlefuse->eeprom_channelplan == 0xff) rtlefuse->eeprom_channelplan = 0x7F; - /* set channel paln to world wide 13 */ - /* rtlefuse->channel_plan = (u8)rtlefuse->eeprom_channelplan; */ + /* set channel plan from efuse */ + rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; /*parse xtal*/ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE]; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 792ada66d983..f1b2c1721917 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1250,7 +1250,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, netdev_err(queue->vif->dev, "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, - (txreq.offset&~PAGE_MASK) + txreq.size); + (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size); xenvif_fatal_tx_err(queue->vif); break; } diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 3d8dbf5f2d39..968787abf78d 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -34,6 +34,8 @@ struct backend_info { enum xenbus_state frontend_state; struct xenbus_watch hotplug_status_watch; u8 have_hotplug_status_watch:1; + + const char *hotplug_script; }; static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); @@ -238,6 +240,7 @@ static int netback_remove(struct xenbus_device *dev) xenvif_free(be->vif); be->vif = NULL; } + kfree(be->hotplug_script); kfree(be); dev_set_drvdata(&dev->dev, NULL); return 0; @@ -255,6 +258,7 @@ static int netback_probe(struct xenbus_device *dev, struct xenbus_transaction xbt; int err; int sg; + const char *script; struct backend_info *be = kzalloc(sizeof(struct backend_info), GFP_KERNEL); if (!be) { @@ -347,6 +351,15 @@ static int netback_probe(struct xenbus_device *dev, if (err) pr_debug("Error writing multi-queue-max-queues\n"); + script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL); + if (IS_ERR(script)) { + err = PTR_ERR(script); + xenbus_dev_fatal(dev, err, "reading script"); + goto fail; + } + + be->hotplug_script = script; + err = xenbus_switch_state(dev, XenbusStateInitWait); if (err) goto fail; @@ -379,22 +392,14 @@ static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env) { struct backend_info *be = dev_get_drvdata(&xdev->dev); - char *val; - val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); - if (IS_ERR(val)) { - int err = PTR_ERR(val); - xenbus_dev_fatal(xdev, err, "reading script"); - return err; - } else { - if (add_uevent_var(env, "script=%s", val)) { - kfree(val); - return -ENOMEM; - } - kfree(val); - } + if (!be) + return 0; - if (!be || !be->vif) + if (add_uevent_var(env, "script=%s", be->hotplug_script)) + return -ENOMEM; + + if (!be->vif) return 0; return add_uevent_var(env, "vif=%s", be->vif->dev->name); @@ -793,6 +798,7 @@ static void connect(struct backend_info *be) goto err; } + queue->credit_bytes = credit_bytes; queue->remaining_credit = credit_bytes; queue->credit_usec = credit_usec; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 3f45afd4382e..c89ca26e254d 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1560,9 +1560,8 @@ static int xennet_init_queue(struct netfront_queue *queue) spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); - init_timer(&queue->rx_refill_timer); - queue->rx_refill_timer.data = (unsigned long)queue; - queue->rx_refill_timer.function = rx_refill_timeout; + setup_timer(&queue->rx_refill_timer, rx_refill_timeout, + (unsigned long)queue); snprintf(queue->name, sizeof(queue->name), "%s-q%u", queue->info->netdev->name, queue->id); @@ -1698,6 +1697,7 @@ static void xennet_destroy_queues(struct netfront_info *info) if (netif_running(info->netdev)) napi_disable(&queue->napi); + del_timer_sync(&queue->rx_refill_timer); netif_napi_del(&queue->napi); } @@ -2102,9 +2102,6 @@ static const struct attribute_group xennet_dev_group = { static int xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); - unsigned int num_queues = info->netdev->real_num_tx_queues; - struct netfront_queue *queue = NULL; - unsigned int i = 0; dev_dbg(&dev->dev, "%s\n", dev->nodename); @@ -2112,16 +2109,7 @@ static int xennet_remove(struct xenbus_device *dev) unregister_netdev(info->netdev); - for (i = 0; i < num_queues; ++i) { - queue = &info->queues[i]; - del_timer_sync(&queue->rx_refill_timer); - } - - if (num_queues) { - kfree(info->queues); - info->queues = NULL; - } - + xennet_destroy_queues(info); xennet_free_netdev(info->netdev); return 0; diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c index cd29b1038c5e..15f9b7c9e4d3 100644 --- a/drivers/ntb/ntb_hw.c +++ b/drivers/ntb/ntb_hw.c @@ -1660,6 +1660,7 @@ static int ntb_atom_detect(struct ntb_device *ndev) u32 ppd; ndev->hw_type = BWD_HW; + ndev->limits.max_mw = BWD_MAX_MW; rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd); if (rc) @@ -1778,7 +1779,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_warn(&pdev->dev, "Cannot remap BAR %d\n", MW_TO_BAR(i)); rc = -EIO; - goto err3; + goto err4; } } diff --git a/drivers/of/base.c b/drivers/of/base.c index 99764db0875a..f0650265febf 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -189,7 +189,7 @@ int __of_attach_node_sysfs(struct device_node *np) return 0; } -static int __init of_init(void) +void __init of_core_init(void) { struct device_node *np; @@ -198,7 +198,8 @@ static int __init of_init(void) of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); if (!of_kset) { mutex_unlock(&of_mutex); - return -ENOMEM; + pr_err("devicetree: failed to register existing nodes\n"); + return; } for_each_of_allnodes(np) __of_attach_node_sysfs(np); @@ -207,10 +208,7 @@ static int __init of_init(void) /* Symlink in /proc as required by userspace ABI */ if (of_root) proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); - - return 0; } -core_initcall(of_init); static struct property *__of_find_property(const struct device_node *np, const char *name, int *lenp) diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 3351ef408125..53826b84e0ec 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np) phandle = __of_get_property(np, "phandle", &sz); if (!phandle) phandle = __of_get_property(np, "linux,phandle", &sz); - if (IS_ENABLED(PPC_PSERIES) && !phandle) + if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle) phandle = __of_get_property(np, "ibm,phandle", &sz); np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 4fd0cacf7ca0..508cc56130e3 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -428,16 +428,19 @@ static void __assign_resources_sorted(struct list_head *head, * consistent. */ if (add_align > dev_res->res->start) { + resource_size_t r_size = resource_size(dev_res->res); + dev_res->res->start = add_align; - dev_res->res->end = add_align + - resource_size(dev_res->res); + dev_res->res->end = add_align + r_size - 1; list_for_each_entry(dev_res2, head, list) { align = pci_resource_alignment(dev_res2->dev, dev_res2->res); - if (add_align > align) + if (add_align > align) { list_move_tail(&dev_res->list, &dev_res2->list); + break; + } } } diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index a53bd5b52df9..fc9b9f0ea91e 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -38,7 +38,9 @@ config ARMADA375_USBCLUSTER_PHY config PHY_DM816X_USB tristate "TI dm816x USB PHY driver" depends on ARCH_OMAP2PLUS + depends on USB_SUPPORT select GENERIC_PHY + select USB_PHY help Enable this for dm816x USB to work. @@ -97,8 +99,9 @@ config OMAP_CONTROL_PHY config OMAP_USB2 tristate "OMAP USB2 PHY Driver" depends on ARCH_OMAP2PLUS - depends on USB_PHY + depends on USB_SUPPORT select GENERIC_PHY + select USB_PHY select OMAP_CONTROL_PHY depends on OMAP_OCP2SCP help @@ -122,8 +125,9 @@ config TI_PIPE3 config TWL4030_USB tristate "TWL4030 USB Transceiver Driver" depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS - depends on USB_PHY + depends on USB_SUPPORT select GENERIC_PHY + select USB_PHY help Enable this to support the USB OTG transceiver on TWL4030 family chips (including the TWL5030 and TPS659x0 devices). @@ -304,7 +308,7 @@ config PHY_STIH41X_USB config PHY_QCOM_UFS tristate "Qualcomm UFS PHY driver" - depends on OF && ARCH_MSM + depends on OF && ARCH_QCOM select GENERIC_PHY help Support for UFS PHY on QCOM chipsets. diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 3791838f4bd4..63bc12d7a73e 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -530,7 +530,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string) { struct phy *phy = phy_get(dev, string); - if (PTR_ERR(phy) == -ENODEV) + if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV)) phy = NULL; return phy; @@ -584,7 +584,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string) { struct phy *phy = devm_phy_get(dev, string); - if (PTR_ERR(phy) == -ENODEV) + if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV)) phy = NULL; return phy; diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c index 183ef4368101..c1a468686bdc 100644 --- a/drivers/phy/phy-omap-usb2.c +++ b/drivers/phy/phy-omap-usb2.c @@ -275,6 +275,7 @@ static int omap_usb2_probe(struct platform_device *pdev) phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); if (IS_ERR(phy->wkupclk)) { dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); + pm_runtime_disable(phy->dev); return PTR_ERR(phy->wkupclk); } else { dev_warn(&pdev->dev, diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c index 778276aba3aa..97d45f47d1ad 100644 --- a/drivers/phy/phy-rcar-gen2.c +++ b/drivers/phy/phy-rcar-gen2.c @@ -23,7 +23,7 @@ #define USBHS_LPSTS 0x02 #define USBHS_UGCTRL 0x80 #define USBHS_UGCTRL2 0x84 -#define USBHS_UGSTS 0x88 /* The manuals have 0x90 */ +#define USBHS_UGSTS 0x88 /* From technical update */ /* Low Power Status register (LPSTS) */ #define USBHS_LPSTS_SUSPM 0x4000 @@ -41,7 +41,7 @@ #define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030 /* USB General status register (UGSTS) */ -#define USBHS_UGSTS_LOCK 0x00000300 /* The manuals have 0x3 */ +#define USBHS_UGSTS_LOCK 0x00000100 /* From technical update */ #define PHYS_PER_CHANNEL 2 diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c index 4ad5c1a996e3..e406e3d8c1c7 100644 --- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c @@ -643,7 +643,9 @@ static const struct cygnus_gpio_pin_range cygnus_gpio_pintable[] = { CYGNUS_PINRANGE(87, 104, 12), CYGNUS_PINRANGE(99, 102, 2), CYGNUS_PINRANGE(101, 90, 4), - CYGNUS_PINRANGE(105, 116, 10), + CYGNUS_PINRANGE(105, 116, 6), + CYGNUS_PINRANGE(111, 100, 2), + CYGNUS_PINRANGE(113, 122, 4), CYGNUS_PINRANGE(123, 11, 1), CYGNUS_PINRANGE(124, 38, 4), CYGNUS_PINRANGE(128, 43, 1), diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 82f691eeeec4..732ff757a95f 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1292,6 +1292,49 @@ static void chv_gpio_irq_unmask(struct irq_data *d) chv_gpio_irq_mask_unmask(d, false); } +static unsigned chv_gpio_irq_startup(struct irq_data *d) +{ + /* + * Check if the interrupt has been requested with 0 as triggering + * type. In that case it is assumed that the current values + * programmed to the hardware are used (e.g BIOS configured + * defaults). + * + * In that case ->irq_set_type() will never be called so we need to + * read back the values from hardware now, set correct flow handler + * and update mappings before the interrupt is being used. + */ + if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) { + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc); + unsigned offset = irqd_to_hwirq(d); + int pin = chv_gpio_offset_to_pin(pctrl, offset); + irq_flow_handler_t handler; + unsigned long flags; + u32 intsel, value; + + intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); + intsel &= CHV_PADCTRL0_INTSEL_MASK; + intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; + + value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1)); + if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL) + handler = handle_level_irq; + else + handler = handle_edge_irq; + + spin_lock_irqsave(&pctrl->lock, flags); + if (!pctrl->intr_lines[intsel]) { + __irq_set_handler_locked(d->irq, handler); + pctrl->intr_lines[intsel] = offset; + } + spin_unlock_irqrestore(&pctrl->lock, flags); + } + + chv_gpio_irq_unmask(d); + return 0; +} + static int chv_gpio_irq_type(struct irq_data *d, unsigned type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -1357,6 +1400,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type) static struct irq_chip chv_gpio_irqchip = { .name = "chv-gpio", + .irq_startup = chv_gpio_irq_startup, .irq_ack = chv_gpio_irq_ack, .irq_mask = chv_gpio_irq_mask, .irq_unmask = chv_gpio_irq_unmask, diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index edcd140e0899..a70a5fe79d44 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -569,7 +569,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc) domain->chip.direction_output = meson_gpio_direction_output; domain->chip.get = meson_gpio_get; domain->chip.set = meson_gpio_set; - domain->chip.base = -1; + domain->chip.base = domain->data->pin_base; domain->chip.ngpio = domain->data->num_pins; domain->chip.can_sleep = false; domain->chip.of_node = domain->of_node; diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index 2f7ea6229880..9677807db364 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c @@ -876,13 +876,13 @@ static struct meson_domain_data meson8b_domain_data[] = { .banks = meson8b_banks, .num_banks = ARRAY_SIZE(meson8b_banks), .pin_base = 0, - .num_pins = 83, + .num_pins = 130, }, { .name = "ao-bank", .banks = meson8b_ao_banks, .num_banks = ARRAY_SIZE(meson8b_ao_banks), - .pin_base = 83, + .pin_base = 130, .num_pins = 16, }, }; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9bb9ad6d4a1b..28f328136f0d 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2897,7 +2897,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason); } -static DEVICE_ATTR_RO(hotkey_wakeup_reason); +static DEVICE_ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL); static void hotkey_wakeup_reason_notify_change(void) { @@ -2913,7 +2913,8 @@ static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack); } -static DEVICE_ATTR_RO(hotkey_wakeup_hotunplug_complete); +static DEVICE_ATTR(wakeup_hotunplug_complete, S_IRUGO, + hotkey_wakeup_hotunplug_complete_show, NULL); static void hotkey_wakeup_hotunplug_complete_notify_change(void) { @@ -2978,8 +2979,8 @@ static struct attribute *hotkey_attributes[] __initdata = { &dev_attr_hotkey_enable.attr, &dev_attr_hotkey_bios_enabled.attr, &dev_attr_hotkey_bios_mask.attr, - &dev_attr_hotkey_wakeup_reason.attr, - &dev_attr_hotkey_wakeup_hotunplug_complete.attr, + &dev_attr_wakeup_reason.attr, + &dev_attr_wakeup_hotunplug_complete.attr, &dev_attr_hotkey_mask.attr, &dev_attr_hotkey_all_mask.attr, &dev_attr_hotkey_recommended_mask.attr, @@ -4393,12 +4394,13 @@ static ssize_t wan_enable_store(struct device *dev, attr, buf, count); } -static DEVICE_ATTR_RW(wan_enable); +static DEVICE_ATTR(wwan_enable, S_IWUSR | S_IRUGO, + wan_enable_show, wan_enable_store); /* --------------------------------------------------------------------- */ static struct attribute *wan_attributes[] = { - &dev_attr_wan_enable.attr, + &dev_attr_wwan_enable.attr, NULL }; @@ -8138,7 +8140,8 @@ static ssize_t fan_pwm1_enable_store(struct device *dev, return count; } -static DEVICE_ATTR_RW(fan_pwm1_enable); +static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, + fan_pwm1_enable_show, fan_pwm1_enable_store); /* sysfs fan pwm1 ------------------------------------------------------ */ static ssize_t fan_pwm1_show(struct device *dev, @@ -8198,7 +8201,7 @@ static ssize_t fan_pwm1_store(struct device *dev, return (rc) ? rc : count; } -static DEVICE_ATTR_RW(fan_pwm1); +static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, fan_pwm1_show, fan_pwm1_store); /* sysfs fan fan1_input ------------------------------------------------ */ static ssize_t fan_fan1_input_show(struct device *dev, @@ -8215,7 +8218,7 @@ static ssize_t fan_fan1_input_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", speed); } -static DEVICE_ATTR_RO(fan_fan1_input); +static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL); /* sysfs fan fan2_input ------------------------------------------------ */ static ssize_t fan_fan2_input_show(struct device *dev, @@ -8232,7 +8235,7 @@ static ssize_t fan_fan2_input_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", speed); } -static DEVICE_ATTR_RO(fan_fan2_input); +static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL); /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ static ssize_t fan_fan_watchdog_show(struct device_driver *drv, @@ -8265,8 +8268,8 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO, /* --------------------------------------------------------------------- */ static struct attribute *fan_attributes[] = { - &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr, - &dev_attr_fan_fan1_input.attr, + &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr, + &dev_attr_fan1_input.attr, NULL, /* for fan2_input */ NULL }; @@ -8400,7 +8403,7 @@ static int __init fan_init(struct ibm_init_struct *iibm) if (tp_features.second_fan) { /* attach second fan tachometer */ fan_attributes[ARRAY_SIZE(fan_attributes)-2] = - &dev_attr_fan_fan2_input.attr; + &dev_attr_fan2_input.attr; } rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, &fan_attr_group); @@ -8848,7 +8851,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME); } -static DEVICE_ATTR_RO(thinkpad_acpi_pdev_name); +static DEVICE_ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL); /* --------------------------------------------------------------------- */ @@ -9390,8 +9393,7 @@ static void thinkpad_acpi_module_exit(void) hwmon_device_unregister(tpacpi_hwmon); if (tp_features.sensors_pdev_attrs_registered) - device_remove_file(&tpacpi_sensors_pdev->dev, - &dev_attr_thinkpad_acpi_pdev_name); + device_remove_file(&tpacpi_sensors_pdev->dev, &dev_attr_name); if (tpacpi_sensors_pdev) platform_device_unregister(tpacpi_sensors_pdev); if (tpacpi_pdev) @@ -9512,8 +9514,7 @@ static int __init thinkpad_acpi_module_init(void) thinkpad_acpi_module_exit(); return ret; } - ret = device_create_file(&tpacpi_sensors_pdev->dev, - &dev_attr_thinkpad_acpi_pdev_name); + ret = device_create_file(&tpacpi_sensors_pdev->dev, &dev_attr_name); if (ret) { pr_err("unable to create sysfs hwmon device attributes\n"); thinkpad_acpi_module_exit(); diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c index 8a4df7a1f2ee..e628d4c2f2ae 100644 --- a/drivers/regulator/da9052-regulator.c +++ b/drivers/regulator/da9052-regulator.c @@ -394,6 +394,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id, static int da9052_regulator_probe(struct platform_device *pdev) { + const struct mfd_cell *cell = mfd_get_cell(pdev); struct regulator_config config = { }; struct da9052_regulator *regulator; struct da9052 *da9052; @@ -409,7 +410,7 @@ static int da9052_regulator_probe(struct platform_device *pdev) regulator->da9052 = da9052; regulator->info = find_regulator_info(regulator->da9052->chip_id, - pdev->id); + cell->id); if (regulator->info == NULL) { dev_err(&pdev->dev, "invalid regulator ID specified\n"); return -EINVAL; @@ -419,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev) config.driver_data = regulator; config.regmap = da9052->regmap; if (pdata && pdata->regulators) { - config.init_data = pdata->regulators[pdev->id]; + config.init_data = pdata->regulators[cell->id]; } else { #ifdef CONFIG_OF struct device_node *nproot = da9052->dev->of_node; diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index 81e83a65a193..32070099c333 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,9 +8,9 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 1028760b8a22..447cf7ce606e 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,9 +8,9 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 98897434bcb4..f11d325fe696 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,9 +8,9 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index b7391a3f9f0b..2f0700796842 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index e0b3b2d1f27a..0c84e1c0763a 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 923a2b5a2439..1f74760ce86c 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ @@ -50,7 +50,7 @@ static unsigned int enable_msix = 1; MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); MODULE_VERSION(BUILD_STR); -MODULE_AUTHOR("Emulex Corporation"); +MODULE_AUTHOR("Avago Technologies"); MODULE_LICENSE("GPL"); module_param(be_iopoll_budget, int, 0); module_param(enable_msix, int, 0); @@ -552,7 +552,7 @@ MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); static struct scsi_host_template beiscsi_sht = { .module = THIS_MODULE, - .name = "Emulex 10Gbe open-iscsi Initiator Driver", + .name = "Avago Technologies 10Gbe open-iscsi Initiator Driver", .proc_name = DRV_NAME, .queuecommand = iscsi_queuecommand, .change_queue_depth = scsi_change_queue_depth, diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 7ee0ffc38514..e70ea26bbc2b 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ @@ -37,7 +37,7 @@ #define DRV_NAME "be2iscsi" #define BUILD_STR "10.4.114.0" -#define BE_NAME "Emulex OneConnect" \ +#define BE_NAME "Avago Technologies OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 681d4e8f003a..c2c4d6975fb7 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index bd81446936fc..9356b9a86b66 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index cb73cf9e9ba5..c140f99772ca 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1129,25 +1129,6 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) phba->lpfc_release_scsi_buf(phba, psb); } -/** - * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB - * @data: A pointer to the immediate command data portion of the IOCB. - * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. - * - * The routine copies the entire FCP command from @fcp_cmnd to @data while - * byte swapping the data to big endian format for transmission on the wire. - **/ -static void -lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) -{ - int i, j; - - for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); - i += sizeof(uint32_t), j++) { - ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); - } -} - /** * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. @@ -1283,7 +1264,6 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * we need to set word 4 of IOCB here */ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); - lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); return 0; } @@ -4146,6 +4126,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_release_scsi_buf(phba, lpfc_cmd); } +/** + * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB + * @data: A pointer to the immediate command data portion of the IOCB. + * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. + * + * The routine copies the entire FCP command from @fcp_cmnd to @data while + * byte swapping the data to big endian format for transmission on the wire. + **/ +static void +lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) +{ + int i, j; + for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); + i += sizeof(uint32_t), j++) { + ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); + } +} + /** * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit * @vport: The virtual port for which this call is being executed. @@ -4225,6 +4223,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, fcp_cmnd->fcpCntl3 = 0; phba->fc4ControlRequests++; } + if (phba->sli_rev == 3 && + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) + lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); /* * Finish initializing those IOCB fields that are independent * of the scsi_cmnd request_buffer diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 68c2002e78bf..e32d24ec7a11 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -1020,8 +1021,7 @@ static void tcm_qla2xxx_depend_tpg(struct work_struct *work) struct se_portal_group *se_tpg = &base_tpg->se_tpg; struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; - if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, - &se_tpg->tpg_group.cg_item)) { + if (!target_depend_item(&se_tpg->tpg_group.cg_item)) { atomic_set(&base_tpg->lport_tpg_enabled, 1); qlt_enable_vha(base_vha); } @@ -1037,8 +1037,7 @@ static void tcm_qla2xxx_undepend_tpg(struct work_struct *work) if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { atomic_set(&base_tpg->lport_tpg_enabled, 0); - configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, - &se_tpg->tpg_group.cg_item); + target_undepend_item(&se_tpg->tpg_group.cg_item); } complete(&base_tpg->tpg_base_comp); } diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 8f6d0fb2cd80..a7cfc270bd08 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 79beebf53302..7f9d65fe4fd9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1600,6 +1600,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) { u64 start_lba = blk_rq_pos(scmd->request); u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); + u64 factor = scmd->device->sector_size / 512; u64 bad_lba; int info_valid; /* @@ -1621,16 +1622,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) if (scsi_bufflen(scmd) <= scmd->device->sector_size) return 0; - if (scmd->device->sector_size < 512) { - /* only legitimate sector_size here is 256 */ - start_lba <<= 1; - end_lba <<= 1; - } else { - /* be careful ... don't want any overflows */ - unsigned int factor = scmd->device->sector_size / 512; - do_div(start_lba, factor); - do_div(end_lba, factor); - } + /* be careful ... don't want any overflows */ + do_div(start_lba, factor); + do_div(end_lba, factor); /* The bad lba was reported incorrectly, we have no idea where * the error is. @@ -2188,8 +2182,7 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) if (sector_size != 512 && sector_size != 1024 && sector_size != 2048 && - sector_size != 4096 && - sector_size != 256) { + sector_size != 4096) { sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", sector_size); /* @@ -2244,8 +2237,6 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) sdkp->capacity <<= 2; else if (sector_size == 1024) sdkp->capacity <<= 1; - else if (sector_size == 256) - sdkp->capacity >>= 1; blk_queue_physical_block_size(sdp->request_queue, sdkp->physical_block_size); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index d9dad90344d5..3c6584ff65c1 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1600,8 +1600,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) break; default: vm_srb->data_in = UNKNOWN_TYPE; - vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN | - SRB_FLAGS_DATA_OUT); + vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; break; } diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index bcdb22d5e215..3c1850332a90 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -4,6 +4,7 @@ config MTK_PMIC_WRAP tristate "MediaTek PMIC Wrapper Support" depends on ARCH_MEDIATEK + depends on RESET_CONTROLLER select REGMAP help Say yes here to add support for MediaTek PMIC Wrapper found diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index db5be1eec54c..f432291feee9 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -443,11 +443,6 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp, static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { int ret; - u32 val; - - val = pwrap_readl(wrp, PWRAP_WACS2_RDATA); - if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR) - pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); if (ret) @@ -462,11 +457,6 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { int ret; - u32 val; - - val = pwrap_readl(wrp, PWRAP_WACS2_RDATA); - if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR) - pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); if (ret) @@ -480,6 +470,8 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA)); + pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); + return 0; } @@ -563,45 +555,17 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp) static int pwrap_init_reg_clock(struct pmic_wrapper *wrp) { - unsigned long rate_spi; - int ck_mhz; - - rate_spi = clk_get_rate(wrp->clk_spi); - - if (rate_spi > 26000000) - ck_mhz = 26; - else if (rate_spi > 18000000) - ck_mhz = 18; - else - ck_mhz = 0; - - switch (ck_mhz) { - case 18: - if (pwrap_is_mt8135(wrp)) - pwrap_writel(wrp, 0xc, PWRAP_CSHEXT); - pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE); - pwrap_writel(wrp, 0xc, PWRAP_CSHEXT_READ); - pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START); - pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END); - break; - case 26: - if (pwrap_is_mt8135(wrp)) - pwrap_writel(wrp, 0x4, PWRAP_CSHEXT); + if (pwrap_is_mt8135(wrp)) { + pwrap_writel(wrp, 0x4, PWRAP_CSHEXT); pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE); pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ); pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START); pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END); - break; - case 0: - if (pwrap_is_mt8135(wrp)) - pwrap_writel(wrp, 0xf, PWRAP_CSHEXT); - pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_WRITE); - pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_READ); - pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_START); - pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_END); - break; - default: - return -EINVAL; + } else { + pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE); + pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ); + pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START); + pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END); } return 0; diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index 09428412139e..c5352ea4821e 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -621,8 +621,8 @@ static u32 ssb_pmu_get_alp_clock_clk0(struct ssb_chipcommon *cc) u32 crystalfreq; const struct pmu0_plltab_entry *e = NULL; - crystalfreq = chipco_read32(cc, SSB_CHIPCO_PMU_CTL) & - SSB_CHIPCO_PMU_CTL_XTALFREQ >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT; + crystalfreq = (chipco_read32(cc, SSB_CHIPCO_PMU_CTL) & + SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT; e = pmu0_plltab_find_entry(crystalfreq); BUG_ON(!e); return e->freq * 1000; @@ -634,7 +634,7 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc) switch (bus->chip_id) { case 0x5354: - ssb_pmu_get_alp_clock_clk0(cc); + return ssb_pmu_get_alp_clock_clk0(cc); default: ssb_err("ERROR: PMU alp clock unknown for device %04X\n", bus->chip_id); diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index 15a7ee3859dd..5fe1c22e289b 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c @@ -359,12 +359,13 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) /* * Accessing PCI config without a proper delay after devices reset (not - * GPIO reset) was causing reboots on WRT300N v1.0. + * GPIO reset) was causing reboots on WRT300N v1.0 (BCM4704). * Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it * completely. Flushing all writes was also tested but with no luck. + * The same problem was reported for WRT350N v1 (BCM4705), so we just + * sleep here unconditionally. */ - if (pc->dev->bus->chip_id == 0x4704) - usleep_range(1000, 2000); + usleep_range(1000, 2000); /* Enable PCI bridge BAR0 prefetch and burst */ val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c index 5ff4716b72c3..784b5ecfa849 100644 --- a/drivers/staging/ozwpan/ozhcd.c +++ b/drivers/staging/ozwpan/ozhcd.c @@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport) /* * Context: softirq */ -void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc, - int length, int offset, int total_size) +void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc, + u8 length, u16 offset, u16 total_size) { struct oz_port *port = hport; struct urb *urb; @@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc, if (!urb) return; if (status == 0) { - int copy_len; - int required_size = urb->transfer_buffer_length; + unsigned int copy_len; + unsigned int required_size = urb->transfer_buffer_length; if (required_size > total_size) required_size = total_size; diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h index 4249fa374012..d2a6085345be 100644 --- a/drivers/staging/ozwpan/ozusbif.h +++ b/drivers/staging/ozwpan/ozusbif.h @@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd); /* Confirmation functions. */ -void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, - const u8 *desc, int length, int offset, int total_size); +void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, + const u8 *desc, u8 length, u16 offset, u16 total_size); void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data, int data_len); diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c index d434d8c6fff6..f660bb198c65 100644 --- a/drivers/staging/ozwpan/ozusbsvc1.c +++ b/drivers/staging/ozwpan/ozusbsvc1.c @@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx, struct oz_multiple_fixed *body = (struct oz_multiple_fixed *)data_hdr; u8 *data = body->data; - int n = (len - sizeof(struct oz_multiple_fixed)+1) + unsigned int n; + if (!body->unit_size || + len < sizeof(struct oz_multiple_fixed) - 1) + break; + n = (len - (sizeof(struct oz_multiple_fixed) - 1)) / body->unit_size; while (n--) { oz_hcd_data_ind(usb_ctx->hport, body->endpoint, @@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) case OZ_GET_DESC_RSP: { struct oz_get_desc_rsp *body = (struct oz_get_desc_rsp *)usb_hdr; - int data_len = elt->length - - sizeof(struct oz_get_desc_rsp) + 1; - u16 offs = le16_to_cpu(get_unaligned(&body->offset)); - u16 total_size = + u16 offs, total_size; + u8 data_len; + + if (elt->length < sizeof(struct oz_get_desc_rsp) - 1) + break; + data_len = elt->length - + (sizeof(struct oz_get_desc_rsp) - 1); + offs = le16_to_cpu(get_unaligned(&body->offset)); + total_size = le16_to_cpu(get_unaligned(&body->total_size)); oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c index f1d47a0676c3..ada8d5dafd49 100644 --- a/drivers/staging/rtl8712/rtl8712_led.c +++ b/drivers/staging/rtl8712/rtl8712_led.c @@ -898,11 +898,11 @@ static void SwLedControlMode1(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -921,11 +921,11 @@ static void SwLedControlMode1(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedLinkBlinkInProgress = true; @@ -946,15 +946,15 @@ static void SwLedControlMode1(struct _adapter *padapter, if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -975,11 +975,11 @@ static void SwLedControlMode1(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } pLed->bLedBlinkInProgress = true; @@ -998,19 +998,19 @@ static void SwLedControlMode1(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1025,23 +1025,23 @@ static void SwLedControlMode1(struct _adapter *padapter, break; case LED_CTL_STOP_WPS: if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); else pLed->bLedWPSBlinkInProgress = true; pLed->CurrLedState = LED_BLINK_WPS_STOP; @@ -1057,7 +1057,7 @@ static void SwLedControlMode1(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1073,23 +1073,23 @@ static void SwLedControlMode1(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1116,7 +1116,7 @@ static void SwLedControlMode2(struct _adapter *padapter, return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1154,11 +1154,11 @@ static void SwLedControlMode2(struct _adapter *padapter, pLed->CurrLedState = LED_ON; pLed->BlinkingLedState = LED_ON; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } @@ -1170,11 +1170,11 @@ static void SwLedControlMode2(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1214,15 +1214,15 @@ static void SwLedControlMode2(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1248,7 +1248,7 @@ static void SwLedControlMode3(struct _adapter *padapter, if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1286,11 +1286,11 @@ static void SwLedControlMode3(struct _adapter *padapter, pLed->CurrLedState = LED_ON; pLed->BlinkingLedState = LED_ON; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1300,11 +1300,11 @@ static void SwLedControlMode3(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1319,7 +1319,7 @@ static void SwLedControlMode3(struct _adapter *padapter, break; case LED_CTL_STOP_WPS: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } else pLed->bLedWPSBlinkInProgress = true; @@ -1336,7 +1336,7 @@ static void SwLedControlMode3(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->CurrLedState = LED_OFF; @@ -1357,15 +1357,15 @@ static void SwLedControlMode3(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1388,7 +1388,7 @@ static void SwLedControlMode4(struct _adapter *padapter, case LED_CTL_START_TO_LINK: if (pLed1->bLedWPSBlinkInProgress) { pLed1->bLedWPSBlinkInProgress = false; - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); pLed1->BlinkingLedState = LED_OFF; pLed1->CurrLedState = LED_OFF; if (pLed1->bLedOn) @@ -1400,11 +1400,11 @@ static void SwLedControlMode4(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } pLed->bLedStartToLinkBlinkInProgress = true; @@ -1426,7 +1426,7 @@ static void SwLedControlMode4(struct _adapter *padapter, if (LedAction == LED_CTL_LINK) { if (pLed1->bLedWPSBlinkInProgress) { pLed1->bLedWPSBlinkInProgress = false; - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); pLed1->BlinkingLedState = LED_OFF; pLed1->CurrLedState = LED_OFF; if (pLed1->bLedOn) @@ -1439,7 +1439,7 @@ static void SwLedControlMode4(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1460,11 +1460,11 @@ static void SwLedControlMode4(struct _adapter *padapter, if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1485,7 +1485,7 @@ static void SwLedControlMode4(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } pLed->bLedBlinkInProgress = true; @@ -1503,7 +1503,7 @@ static void SwLedControlMode4(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed1->bLedWPSBlinkInProgress) { pLed1->bLedWPSBlinkInProgress = false; - del_timer_sync(&(pLed1->BlinkTimer)); + del_timer(&pLed1->BlinkTimer); pLed1->BlinkingLedState = LED_OFF; pLed1->CurrLedState = LED_OFF; if (pLed1->bLedOn) @@ -1512,15 +1512,15 @@ static void SwLedControlMode4(struct _adapter *padapter, } if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1538,7 +1538,7 @@ static void SwLedControlMode4(struct _adapter *padapter, break; case LED_CTL_STOP_WPS: /*WPS connect success*/ if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1552,7 +1552,7 @@ static void SwLedControlMode4(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL: /*WPS authentication fail*/ if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1565,7 +1565,7 @@ static void SwLedControlMode4(struct _adapter *padapter, msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); /*LED1 settings*/ if (pLed1->bLedWPSBlinkInProgress) - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); else pLed1->bLedWPSBlinkInProgress = true; pLed1->CurrLedState = LED_BLINK_WPS_STOP; @@ -1578,7 +1578,7 @@ static void SwLedControlMode4(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL_OVERLAP: /*WPS session overlap*/ if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1591,7 +1591,7 @@ static void SwLedControlMode4(struct _adapter *padapter, msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); /*LED1 settings*/ if (pLed1->bLedWPSBlinkInProgress) - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); else pLed1->bLedWPSBlinkInProgress = true; pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP; @@ -1607,31 +1607,31 @@ static void SwLedControlMode4(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedStartToLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedStartToLinkBlinkInProgress = false; } if (pLed1->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); pLed1->bLedWPSBlinkInProgress = false; } pLed1->BlinkingLedState = LED_UNKNOWN; @@ -1671,7 +1671,7 @@ static void SwLedControlMode5(struct _adapter *padapter, ; /* dummy branch */ else if (pLed->bLedScanBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1705,7 +1705,7 @@ static void SwLedControlMode5(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } SwLedOff(padapter, pLed); @@ -1756,7 +1756,7 @@ static void SwLedControlMode6(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1772,7 +1772,7 @@ static void SwLedControlMode6(struct _adapter *padapter, case LED_CTL_STOP_WPS_FAIL: case LED_CTL_STOP_WPS: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->CurrLedState = LED_ON; @@ -1784,11 +1784,11 @@ static void SwLedControlMode6(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } SwLedOff(padapter, pLed); diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c index 1a1c38f885d6..e35854d28f90 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.c +++ b/drivers/staging/rtl8712/rtl871x_cmd.c @@ -910,7 +910,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter, if (pcmd->res != H2C_SUCCESS) mod_timer(&pmlmepriv->assoc_timer, jiffies + msecs_to_jiffies(1)); - del_timer_sync(&pmlmepriv->assoc_timer); + del_timer(&pmlmepriv->assoc_timer); #ifdef __BIG_ENDIAN /* endian_convert */ pnetwork->Length = le32_to_cpu(pnetwork->Length); diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c index fb2b195b90af..c044b0e55ba9 100644 --- a/drivers/staging/rtl8712/rtl871x_mlme.c +++ b/drivers/staging/rtl8712/rtl871x_mlme.c @@ -582,7 +582,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf) spin_lock_irqsave(&pmlmepriv->lock, irqL); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { - del_timer_sync(&pmlmepriv->scan_to_timer); + del_timer(&pmlmepriv->scan_to_timer); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); } @@ -696,7 +696,7 @@ void r8712_ind_disconnect(struct _adapter *padapter) } if (padapter->pwrctrlpriv.pwr_mode != padapter->registrypriv.power_mgnt) { - del_timer_sync(&pmlmepriv->dhcp_timer); + del_timer(&pmlmepriv->dhcp_timer); r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, padapter->registrypriv.smart_ps); } @@ -910,7 +910,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf) if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) r8712_indicate_connect(adapter); - del_timer_sync(&pmlmepriv->assoc_timer); + del_timer(&pmlmepriv->assoc_timer); } else goto ignore_joinbss_callback; } else { diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c index aaa584435c87..9bc04f474d18 100644 --- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c +++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c @@ -103,7 +103,7 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter, if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80)) return; - del_timer_sync(&padapter->pwrctrlpriv.rpwm_check_timer); + del_timer(&padapter->pwrctrlpriv.rpwm_check_timer); _enter_pwrlock(&pwrpriv->lock); pwrpriv->cpwm = (preportpwrstate->state) & 0xf; if (pwrpriv->cpwm >= PS_STATE_S2) { diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c index 7bb96c47f188..a9b93d0f6f56 100644 --- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c +++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c @@ -198,7 +198,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta) * cancel reordering_ctrl_timer */ for (i = 0; i < 16; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; - del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); + del_timer(&preorder_ctrl->reordering_ctrl_timer); } spin_lock(&(pfree_sta_queue->lock)); /* insert into free_sta_queue; 20061114 */ diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c index bc95ce89af06..5ab2f6978209 100644 --- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c @@ -379,7 +379,7 @@ void rtw_cfg80211_indicate_disconnect(struct rtw_adapter *padapter) GFP_ATOMIC); } else { cfg80211_disconnected(padapter->pnetdev, 0, NULL, - 0, GFP_ATOMIC); + 0, false, GFP_ATOMIC); } } } diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 7c87aecf4744..342e2b30c48f 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -722,7 +722,7 @@ void prism2_connect_result(wlandevice_t *wlandev, u8 failed) void prism2_disconnected(wlandevice_t *wlandev) { cfg80211_disconnected(wlandev->netdev, 0, NULL, - 0, GFP_KERNEL); + 0, false, GFP_KERNEL); } void prism2_roamed(wlandevice_t *wlandev) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 34871a628b11..eb66d36db5f7 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -230,7 +231,7 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) * Here we serialize access across the TIQN+TPG Tuple. */ ret = down_interruptible(&tpg->np_login_sem); - if ((ret != 0) || signal_pending(current)) + if (ret != 0) return -1; spin_lock_bh(&tpg->tpg_state_lock); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 8ce94ff744e6..70d799dfab03 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -346,6 +346,7 @@ static int iscsi_login_zero_tsih_s1( if (IS_ERR(sess->se_sess)) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); + kfree(sess->sess_ops); kfree(sess); return -ENOMEM; } diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index e8a240818353..5e3295fe404d 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -161,10 +161,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np( int iscsit_get_tpg( struct iscsi_portal_group *tpg) { - int ret; - - ret = mutex_lock_interruptible(&tpg->tpg_access_lock); - return ((ret != 0) || signal_pending(current)) ? -1 : 0; + return mutex_lock_interruptible(&tpg->tpg_access_lock); } void iscsit_put_tpg(struct iscsi_portal_group *tpg) diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 75cbde1f7c5b..4f8d4d459aa4 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -704,7 +704,7 @@ target_alua_state_check(struct se_cmd *cmd) if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; if (!port) @@ -2377,7 +2377,7 @@ ssize_t core_alua_store_secondary_write_metadata( int core_setup_alua(struct se_device *dev) { - if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && + if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { struct t10_alua_lu_gp_member *lu_gp_mem; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index ddaf76a4ac2a..e7b0430a0575 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -212,10 +212,6 @@ static struct config_group *target_core_register_fabric( pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" " %s\n", tf->tf_group.cg_item.ci_name); - /* - * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() - */ - tf->tf_ops.tf_subsys = tf->tf_subsys; tf->tf_fabric = &tf->tf_group.cg_item; pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" " for %s\n", name); @@ -291,10 +287,17 @@ static struct configfs_subsystem target_core_fabrics = { }, }; -struct configfs_subsystem *target_core_subsystem[] = { - &target_core_fabrics, - NULL, -}; +int target_depend_item(struct config_item *item) +{ + return configfs_depend_item(&target_core_fabrics, item); +} +EXPORT_SYMBOL(target_depend_item); + +void target_undepend_item(struct config_item *item) +{ + return configfs_undepend_item(&target_core_fabrics, item); +} +EXPORT_SYMBOL(target_undepend_item); /*############################################################################## // Start functions called by external Target Fabrics Modules @@ -467,7 +470,6 @@ int target_register_template(const struct target_core_fabric_ops *fo) * struct target_fabric_configfs->tf_cit_tmpl */ tf->tf_module = fo->module; - tf->tf_subsys = target_core_subsystem[0]; snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); tf->tf_ops = *fo; @@ -809,7 +811,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev, { int ret; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return sprintf(page, "Passthrough\n"); spin_lock(&dev->dev_reservation_lock); @@ -960,7 +962,7 @@ SE_DEV_PR_ATTR_RO(res_pr_type); static ssize_t target_core_dev_pr_show_attr_res_type( struct se_device *dev, char *page) { - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return sprintf(page, "SPC_PASSTHROUGH\n"); else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return sprintf(page, "SPC2_RESERVATIONS\n"); @@ -973,7 +975,7 @@ SE_DEV_PR_ATTR_RO(res_type); static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( struct se_device *dev, char *page) { - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; return sprintf(page, "APTPL Bit Status: %s\n", @@ -988,7 +990,7 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active); static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( struct se_device *dev, char *page) { - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; return sprintf(page, "Ready to process PR APTPL metadata..\n"); @@ -1035,7 +1037,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( u16 port_rpti = 0, tpgt = 0; u8 type = 0, scope; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return 0; @@ -2870,7 +2872,7 @@ static int __init target_core_init_configfs(void) { struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; struct config_group *lu_gp_cg = NULL; - struct configfs_subsystem *subsys; + struct configfs_subsystem *subsys = &target_core_fabrics; struct t10_alua_lu_gp *lu_gp; int ret; @@ -2878,7 +2880,6 @@ static int __init target_core_init_configfs(void) " Engine: %s on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); - subsys = target_core_subsystem[0]; config_group_init(&subsys->su_group); mutex_init(&subsys->su_mutex); @@ -3008,13 +3009,10 @@ static int __init target_core_init_configfs(void) static void __exit target_core_exit_configfs(void) { - struct configfs_subsystem *subsys; struct config_group *hba_cg, *alua_cg, *lu_gp_cg; struct config_item *item; int i; - subsys = target_core_subsystem[0]; - lu_gp_cg = &alua_lu_gps_group; for (i = 0; lu_gp_cg->default_groups[i]; i++) { item = &lu_gp_cg->default_groups[i]->cg_item; @@ -3045,8 +3043,8 @@ static void __exit target_core_exit_configfs(void) * We expect subsys->su_group.default_groups to be released * by configfs subsystem provider logic.. */ - configfs_unregister_subsystem(subsys); - kfree(subsys->su_group.default_groups); + configfs_unregister_subsystem(&target_core_fabrics); + kfree(target_core_fabrics.su_group.default_groups); core_alua_free_lu_gp(default_lu_gp); default_lu_gp = NULL; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 7faa6aef9a4d..ce5f768181ff 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -527,7 +528,7 @@ static void core_export_port( list_add_tail(&port->sep_list, &dev->dev_sep_list); spin_unlock(&dev->se_port_lock); - if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && + if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { @@ -1603,7 +1604,7 @@ int target_configure_device(struct se_device *dev) * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI * passthrough because this is being provided by the backend LLD. */ - if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { + if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) { strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); strncpy(&dev->t10_wwn.model[0], dev->transport->inquiry_prod, 16); @@ -1707,3 +1708,76 @@ void core_dev_release_virtual_lun0(void) target_free_device(g_lun0_dev); core_delete_hba(hba); } + +/* + * Common CDB parsing for kernel and user passthrough. + */ +sense_reason_t +passthrough_parse_cdb(struct se_cmd *cmd, + sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) +{ + unsigned char *cdb = cmd->t_task_cdb; + + /* + * Clear a lun set in the cdb if the initiator talking to use spoke + * and old standards version, as we can't assume the underlying device + * won't choke up on it. + */ + switch (cdb[0]) { + case READ_10: /* SBC - RDProtect */ + case READ_12: /* SBC - RDProtect */ + case READ_16: /* SBC - RDProtect */ + case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ + case VERIFY: /* SBC - VRProtect */ + case VERIFY_16: /* SBC - VRProtect */ + case WRITE_VERIFY: /* SBC - VRProtect */ + case WRITE_VERIFY_12: /* SBC - VRProtect */ + case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ + break; + default: + cdb[1] &= 0x1f; /* clear logical unit number */ + break; + } + + /* + * For REPORT LUNS we always need to emulate the response, for everything + * else, pass it up. + */ + if (cdb[0] == REPORT_LUNS) { + cmd->execute_cmd = spc_emulate_report_luns; + return TCM_NO_SENSE; + } + + /* Set DATA_CDB flag for ops that should have it */ + switch (cdb[0]) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + case WRITE_VERIFY: + case WRITE_VERIFY_12: + case 0x8e: /* WRITE_VERIFY_16 */ + case COMPARE_AND_WRITE: + case XDWRITEREAD_10: + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + break; + case VARIABLE_LENGTH_CMD: + switch (get_unaligned_be16(&cdb[8])) { + case READ_32: + case WRITE_32: + case 0x0c: /* WRITE_VERIFY_32 */ + case XDWRITEREAD_32: + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + break; + } + } + + cmd->execute_cmd = exec_cmd; + + return TCM_NO_SENSE; +} +EXPORT_SYMBOL(passthrough_parse_cdb); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index f7e6e51aed36..a3a3d85142e5 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -958,7 +959,6 @@ static struct se_subsystem_api fileio_template = { .inquiry_prod = "FILEIO", .inquiry_rev = FD_VERSION, .owner = THIS_MODULE, - .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, .attach_hba = fd_attach_hba, .detach_hba = fd_detach_hba, .alloc_device = fd_alloc_device, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 1b7947c2510f..8c965683789f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -904,7 +904,6 @@ static struct se_subsystem_api iblock_template = { .inquiry_prod = "IBLOCK", .inquiry_rev = IBLOCK_VERSION, .owner = THIS_MODULE, - .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, .attach_hba = iblock_attach_hba, .detach_hba = iblock_detach_hba, .alloc_device = iblock_alloc_device, diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 874a9bc988d8..68bd7f5d9f73 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -4,9 +4,6 @@ /* target_core_alua.c */ extern struct t10_alua_lu_gp *default_lu_gp; -/* target_core_configfs.c */ -extern struct configfs_subsystem *target_core_subsystem[]; - /* target_core_device.c */ extern struct mutex g_device_mutex; extern struct list_head g_device_list; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index c1aa9655e96e..61dac494423e 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -1367,41 +1368,26 @@ void core_scsi3_free_all_registrations( static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) { - return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, - &tpg->tpg_group.cg_item); + return target_depend_item(&tpg->tpg_group.cg_item); } static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) { - configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, - &tpg->tpg_group.cg_item); - + target_undepend_item(&tpg->tpg_group.cg_item); atomic_dec_mb(&tpg->tpg_pr_ref_count); } static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) { - struct se_portal_group *tpg = nacl->se_tpg; - if (nacl->dynamic_node_acl) return 0; - - return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, - &nacl->acl_group.cg_item); + return target_depend_item(&nacl->acl_group.cg_item); } static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) { - struct se_portal_group *tpg = nacl->se_tpg; - - if (nacl->dynamic_node_acl) { - atomic_dec_mb(&nacl->acl_pr_ref_count); - return; - } - - configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, - &nacl->acl_group.cg_item); - + if (!nacl->dynamic_node_acl) + target_undepend_item(&nacl->acl_group.cg_item); atomic_dec_mb(&nacl->acl_pr_ref_count); } @@ -1419,8 +1405,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) nacl = lun_acl->se_lun_nacl; tpg = nacl->se_tpg; - return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, - &lun_acl->se_lun_group.cg_item); + return target_depend_item(&lun_acl->se_lun_group.cg_item); } static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) @@ -1438,9 +1423,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) nacl = lun_acl->se_lun_nacl; tpg = nacl->se_tpg; - configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, - &lun_acl->se_lun_group.cg_item); - + target_undepend_item(&lun_acl->se_lun_group.cg_item); atomic_dec_mb(&se_deve->pr_ref_count); } @@ -4111,7 +4094,7 @@ target_check_reservation(struct se_cmd *cmd) return 0; if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; spin_lock(&dev->dev_reservation_lock); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index f6c954c4635f..ecc5eaef13d6 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev) " pdv_host_id: %d\n", pdv->pdv_host_id); return -EINVAL; } + pdv->pdv_lld_host = sh; } } else { if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { @@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev) if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && (phv->phv_lld_host != NULL)) scsi_host_put(phv->phv_lld_host); + else if (pdv->pdv_lld_host) + scsi_host_put(pdv->pdv_lld_host); if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) scsi_device_put(sd); @@ -970,64 +973,13 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } -/* - * Clear a lun set in the cdb if the initiator talking to use spoke - * and old standards version, as we can't assume the underlying device - * won't choke up on it. - */ -static inline void pscsi_clear_cdb_lun(unsigned char *cdb) -{ - switch (cdb[0]) { - case READ_10: /* SBC - RDProtect */ - case READ_12: /* SBC - RDProtect */ - case READ_16: /* SBC - RDProtect */ - case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ - case VERIFY: /* SBC - VRProtect */ - case VERIFY_16: /* SBC - VRProtect */ - case WRITE_VERIFY: /* SBC - VRProtect */ - case WRITE_VERIFY_12: /* SBC - VRProtect */ - case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ - break; - default: - cdb[1] &= 0x1f; /* clear logical unit number */ - break; - } -} - static sense_reason_t pscsi_parse_cdb(struct se_cmd *cmd) { - unsigned char *cdb = cmd->t_task_cdb; - if (cmd->se_cmd_flags & SCF_BIDI) return TCM_UNSUPPORTED_SCSI_OPCODE; - pscsi_clear_cdb_lun(cdb); - - /* - * For REPORT LUNS we always need to emulate the response, for everything - * else the default for pSCSI is to pass the command to the underlying - * LLD / physical hardware. - */ - switch (cdb[0]) { - case REPORT_LUNS: - cmd->execute_cmd = spc_emulate_report_luns; - return 0; - case READ_6: - case READ_10: - case READ_12: - case READ_16: - case WRITE_6: - case WRITE_10: - case WRITE_12: - case WRITE_16: - case WRITE_VERIFY: - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - /* FALLTHROUGH*/ - default: - cmd->execute_cmd = pscsi_execute_cmd; - return 0; - } + return passthrough_parse_cdb(cmd, pscsi_execute_cmd); } static sense_reason_t @@ -1189,7 +1141,7 @@ static struct configfs_attribute *pscsi_backend_dev_attrs[] = { static struct se_subsystem_api pscsi_template = { .name = "pscsi", .owner = THIS_MODULE, - .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, + .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index 1bd757dff8ee..820d3052b775 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -45,6 +45,7 @@ struct pscsi_dev_virt { int pdv_lun_id; struct block_device *pdv_bd; struct scsi_device *pdv_sd; + struct Scsi_Host *pdv_lld_host; } ____cacheline_aligned; typedef enum phv_modes { diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index a263bf5fab8d..d16489b6a1a4 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -733,7 +733,6 @@ static struct se_subsystem_api rd_mcp_template = { .name = "rd_mcp", .inquiry_prod = "RAMDISK-MCP", .inquiry_rev = RD_MCP_VERSION, - .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, .attach_hba = rd_attach_hba, .detach_hba = rd_detach_hba, .alloc_device = rd_alloc_device, diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 8855781ac653..733824e3825f 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -568,7 +568,7 @@ sbc_compare_and_write(struct se_cmd *cmd) * comparision using SGLs at cmd->t_bidi_data_sg.. */ rc = down_interruptible(&dev->caw_sem); - if ((rc != 0) || signal_pending(current)) { + if (rc != 0) { cmd->transport_complete_callback = NULL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 3fe5cb240b6f..2b17bddeff0f 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -1196,7 +1197,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) * Check if SAM Task Attribute emulation is enabled for this * struct se_device storage object */ - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; if (cmd->sam_task_attr == TCM_ACA_TAG) { @@ -1770,7 +1771,7 @@ static int target_write_prot_action(struct se_cmd *cmd) sectors, 0, NULL, 0); if (unlikely(cmd->pi_err)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, cmd->pi_err); return -1; @@ -1787,7 +1788,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return false; /* @@ -1868,7 +1869,7 @@ void target_execute_cmd(struct se_cmd *cmd) if (target_handle_task_attr(cmd)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); spin_unlock_irq(&cmd->t_state_lock); return; } @@ -1912,7 +1913,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return; if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { @@ -1957,8 +1958,7 @@ static void transport_complete_qf(struct se_cmd *cmd) case DMA_TO_DEVICE: if (cmd->se_cmd_flags & SCF_BIDI) { ret = cmd->se_tfo->queue_data_in(cmd); - if (ret < 0) - break; + break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index dbc872a6c981..edc955558250 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -71,13 +72,6 @@ struct tcmu_hba { u32 host_id; }; -/* User wants all cmds or just some */ -enum passthru_level { - TCMU_PASS_ALL = 0, - TCMU_PASS_IO, - TCMU_PASS_INVALID, -}; - #define TCMU_CONFIG_LEN 256 struct tcmu_dev { @@ -89,7 +83,6 @@ struct tcmu_dev { #define TCMU_DEV_BIT_OPEN 0 #define TCMU_DEV_BIT_BROKEN 1 unsigned long flags; - enum passthru_level pass_level; struct uio_info uio_info; @@ -683,8 +676,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) setup_timer(&udev->timeout, tcmu_device_timedout, (unsigned long)udev); - udev->pass_level = TCMU_PASS_ALL; - return &udev->se_dev; } @@ -948,13 +939,13 @@ static void tcmu_free_device(struct se_device *dev) } enum { - Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level, + Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, }; static match_table_t tokens = { {Opt_dev_config, "dev_config=%s"}, {Opt_dev_size, "dev_size=%u"}, - {Opt_pass_level, "pass_level=%u"}, + {Opt_hw_block_size, "hw_block_size=%u"}, {Opt_err, NULL} }; @@ -965,7 +956,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, char *orig, *ptr, *opts, *arg_p; substring_t args[MAX_OPT_ARGS]; int ret = 0, token; - int arg; + unsigned long tmp_ul; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -998,15 +989,23 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, if (ret < 0) pr_err("kstrtoul() failed for dev_size=\n"); break; - case Opt_pass_level: - match_int(args, &arg); - if (arg >= TCMU_PASS_INVALID) { - pr_warn("TCMU: Invalid pass_level: %d\n", arg); + case Opt_hw_block_size: + arg_p = match_strdup(&args[0]); + if (!arg_p) { + ret = -ENOMEM; break; } - - pr_debug("TCMU: Setting pass_level to %d\n", arg); - udev->pass_level = arg; + ret = kstrtoul(arg_p, 0, &tmp_ul); + kfree(arg_p); + if (ret < 0) { + pr_err("kstrtoul() failed for hw_block_size=\n"); + break; + } + if (!tmp_ul) { + pr_err("hw_block_size must be nonzero\n"); + break; + } + dev->dev_attrib.hw_block_size = tmp_ul; break; default: break; @@ -1024,8 +1023,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) bl = sprintf(b + bl, "Config: %s ", udev->dev_config[0] ? udev->dev_config : "NULL"); - bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n", - udev->dev_size, udev->pass_level); + bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); return bl; } @@ -1038,20 +1036,6 @@ static sector_t tcmu_get_blocks(struct se_device *dev) dev->dev_attrib.block_size); } -static sense_reason_t -tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents, - enum dma_data_direction data_direction) -{ - int ret; - - ret = tcmu_queue_cmd(se_cmd); - - if (ret != 0) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - else - return TCM_NO_SENSE; -} - static sense_reason_t tcmu_pass_op(struct se_cmd *se_cmd) { @@ -1063,91 +1047,29 @@ tcmu_pass_op(struct se_cmd *se_cmd) return TCM_NO_SENSE; } -static struct sbc_ops tcmu_sbc_ops = { - .execute_rw = tcmu_execute_rw, - .execute_sync_cache = tcmu_pass_op, - .execute_write_same = tcmu_pass_op, - .execute_write_same_unmap = tcmu_pass_op, - .execute_unmap = tcmu_pass_op, -}; - static sense_reason_t tcmu_parse_cdb(struct se_cmd *cmd) { - unsigned char *cdb = cmd->t_task_cdb; - struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev); - sense_reason_t ret; - - switch (udev->pass_level) { - case TCMU_PASS_ALL: - /* We're just like pscsi, then */ - /* - * For REPORT LUNS we always need to emulate the response, for everything - * else, pass it up. - */ - switch (cdb[0]) { - case REPORT_LUNS: - cmd->execute_cmd = spc_emulate_report_luns; - break; - case READ_6: - case READ_10: - case READ_12: - case READ_16: - case WRITE_6: - case WRITE_10: - case WRITE_12: - case WRITE_16: - case WRITE_VERIFY: - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - /* FALLTHROUGH */ - default: - cmd->execute_cmd = tcmu_pass_op; - } - ret = TCM_NO_SENSE; - break; - case TCMU_PASS_IO: - ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops); - break; - default: - pr_err("Unknown tcm-user pass level %d\n", udev->pass_level); - ret = TCM_CHECK_CONDITION_ABORT_CMD; - } - - return ret; + return passthrough_parse_cdb(cmd, tcmu_pass_op); } -DEF_TB_DEFAULT_ATTRIBS(tcmu); +DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type); +TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type); + +DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size); +TB_DEV_ATTR_RO(tcmu, hw_block_size); + +DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors); +TB_DEV_ATTR_RO(tcmu, hw_max_sectors); + +DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth); +TB_DEV_ATTR_RO(tcmu, hw_queue_depth); static struct configfs_attribute *tcmu_backend_dev_attrs[] = { - &tcmu_dev_attrib_emulate_model_alias.attr, - &tcmu_dev_attrib_emulate_dpo.attr, - &tcmu_dev_attrib_emulate_fua_write.attr, - &tcmu_dev_attrib_emulate_fua_read.attr, - &tcmu_dev_attrib_emulate_write_cache.attr, - &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr, - &tcmu_dev_attrib_emulate_tas.attr, - &tcmu_dev_attrib_emulate_tpu.attr, - &tcmu_dev_attrib_emulate_tpws.attr, - &tcmu_dev_attrib_emulate_caw.attr, - &tcmu_dev_attrib_emulate_3pc.attr, - &tcmu_dev_attrib_pi_prot_type.attr, &tcmu_dev_attrib_hw_pi_prot_type.attr, - &tcmu_dev_attrib_pi_prot_format.attr, - &tcmu_dev_attrib_enforce_pr_isids.attr, - &tcmu_dev_attrib_is_nonrot.attr, - &tcmu_dev_attrib_emulate_rest_reord.attr, - &tcmu_dev_attrib_force_pr_aptpl.attr, &tcmu_dev_attrib_hw_block_size.attr, - &tcmu_dev_attrib_block_size.attr, &tcmu_dev_attrib_hw_max_sectors.attr, - &tcmu_dev_attrib_optimal_sectors.attr, &tcmu_dev_attrib_hw_queue_depth.attr, - &tcmu_dev_attrib_queue_depth.attr, - &tcmu_dev_attrib_max_unmap_lba_count.attr, - &tcmu_dev_attrib_max_unmap_block_desc_count.attr, - &tcmu_dev_attrib_unmap_granularity.attr, - &tcmu_dev_attrib_unmap_granularity_alignment.attr, - &tcmu_dev_attrib_max_write_same_len.attr, NULL, }; @@ -1156,7 +1078,7 @@ static struct se_subsystem_api tcmu_template = { .inquiry_prod = "USER", .inquiry_rev = TCMU_VERSION, .owner = THIS_MODULE, - .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, + .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, .attach_hba = tcmu_attach_hba, .detach_hba = tcmu_detach_hba, .alloc_device = tcmu_alloc_device, diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index a600ff15dcfd..8fd680ac941b 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -58,7 +58,6 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op bool src) { struct se_device *se_dev; - struct configfs_subsystem *subsys = target_core_subsystem[0]; unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; int rc; @@ -90,8 +89,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op " se_dev\n", xop->src_dev); } - rc = configfs_depend_item(subsys, - &se_dev->dev_group.cg_item); + rc = target_depend_item(&se_dev->dev_group.cg_item); if (rc != 0) { pr_err("configfs_depend_item attempt failed:" " %d for se_dev: %p\n", rc, se_dev); @@ -99,8 +97,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op return rc; } - pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p" - " se_dev->se_dev_group: %p\n", subsys, se_dev, + pr_debug("Called configfs_depend_item for se_dev: %p" + " se_dev->se_dev_group: %p\n", se_dev, &se_dev->dev_group); mutex_unlock(&g_device_mutex); @@ -373,7 +371,6 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) { - struct configfs_subsystem *subsys = target_core_subsystem[0]; struct se_device *remote_dev; if (xop->op_origin == XCOL_SOURCE_RECV_OP) @@ -381,11 +378,11 @@ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) else remote_dev = xop->src_dev; - pr_debug("Calling configfs_undepend_item for subsys: %p" + pr_debug("Calling configfs_undepend_item for" " remote_dev: %p remote_dev->dev_group: %p\n", - subsys, remote_dev, &remote_dev->dev_group.cg_item); + remote_dev, &remote_dev->dev_group.cg_item); - configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item); + target_undepend_item(&remote_dev->dev_group.cg_item); } static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c index 04d9e23d1ee1..358323c83b4f 100644 --- a/drivers/tty/mips_ejtag_fdc.c +++ b/drivers/tty/mips_ejtag_fdc.c @@ -174,13 +174,13 @@ struct mips_ejtag_fdc_tty { static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv, unsigned int offs, unsigned int data) { - iowrite32(data, priv->reg + offs); + __raw_writel(data, priv->reg + offs); } static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv, unsigned int offs) { - return ioread32(priv->reg + offs); + return __raw_readl(priv->reg + offs); } /* Encoding of byte stream in FDC words */ @@ -347,9 +347,9 @@ static void mips_ejtag_fdc_console_write(struct console *c, const char *s, s += inc[word.bytes - 1]; /* Busy wait until there's space in fifo */ - while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF) + while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF) ; - iowrite32(word.word, regs + REG_FDTX(c->index)); + __raw_writel(word.word, regs + REG_FDTX(c->index)); } out: local_irq_restore(flags); @@ -1227,7 +1227,7 @@ static int kgdbfdc_read_char(void) /* Read next word from KGDB channel */ do { - stat = ioread32(regs + REG_FDSTAT); + stat = __raw_readl(regs + REG_FDSTAT); /* No data waiting? */ if (stat & REG_FDSTAT_RXE) @@ -1236,7 +1236,7 @@ static int kgdbfdc_read_char(void) /* Read next word */ channel = (stat & REG_FDSTAT_RXCHAN) >> REG_FDSTAT_RXCHAN_SHIFT; - data = ioread32(regs + REG_FDRX); + data = __raw_readl(regs + REG_FDRX); } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN); /* Decode into rbuf */ @@ -1266,9 +1266,10 @@ static void kgdbfdc_push_one(void) return; /* Busy wait until there's space in fifo */ - while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF) + while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF) ; - iowrite32(word.word, regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN)); + __raw_writel(word.word, + regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN)); } /* flush the whole write buffer to the TX FIFO */ diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index cc57a3a6b02b..396344cb011f 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x, return put_user(x, ptr); } +static inline int tty_copy_to_user(struct tty_struct *tty, + void __user *to, + const void *from, + unsigned long n) +{ + struct n_tty_data *ldata = tty->disc_data; + + tty_audit_add_data(tty, to, n, ldata->icanon); + return copy_to_user(to, from, n); +} + /** * n_tty_kick_worker - start input worker (if required) * @tty: terminal @@ -2070,8 +2081,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, size = N_TTY_BUF_SIZE - tail; n = eol - tail; - if (n > 4096) - n += 4096; + if (n > N_TTY_BUF_SIZE) + n += N_TTY_BUF_SIZE; n += found; c = n; @@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, __func__, eol, found, n, c, size, more); if (n > size) { - ret = copy_to_user(*b, read_buf_addr(ldata, tail), size); + ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size); if (ret) return -EFAULT; - ret = copy_to_user(*b + size, ldata->read_buf, n - size); + ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size); } else - ret = copy_to_user(*b, read_buf_addr(ldata, tail), n); + ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n); if (ret) return -EFAULT; diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 9289999cb7c6..dce1a23706e8 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -562,12 +562,36 @@ static irqreturn_t omap_wake_irq(int irq, void *dev_id) return IRQ_NONE; } +#ifdef CONFIG_SERIAL_8250_DMA +static int omap_8250_dma_handle_irq(struct uart_port *port); +#endif + +static irqreturn_t omap8250_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int iir; + int ret; + +#ifdef CONFIG_SERIAL_8250_DMA + if (up->dma) { + ret = omap_8250_dma_handle_irq(port); + return IRQ_RETVAL(ret); + } +#endif + + serial8250_rpm_get(up); + iir = serial_port_in(port, UART_IIR); + ret = serial8250_handle_irq(port, iir); + serial8250_rpm_put(up); + + return IRQ_RETVAL(ret); +} + static int omap_8250_startup(struct uart_port *port) { - struct uart_8250_port *up = - container_of(port, struct uart_8250_port, port); + struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = port->private_data; - int ret; if (priv->wakeirq) { @@ -580,10 +604,31 @@ static int omap_8250_startup(struct uart_port *port) pm_runtime_get_sync(port->dev); - ret = serial8250_do_startup(port); - if (ret) + up->mcr = 0; + serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + + serial_out(up, UART_LCR, UART_LCR_WLEN8); + + up->lsr_saved_flags = 0; + up->msr_saved_flags = 0; + + if (up->dma) { + ret = serial8250_request_dma(up); + if (ret) { + dev_warn_ratelimited(port->dev, + "failed to request DMA\n"); + up->dma = NULL; + } + } + + ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED, + dev_name(port->dev), port); + if (ret < 0) goto err; + up->ier = UART_IER_RLSI | UART_IER_RDI; + serial_out(up, UART_IER, up->ier); + #ifdef CONFIG_PM up->capabilities |= UART_CAP_RPM; #endif @@ -610,8 +655,7 @@ static int omap_8250_startup(struct uart_port *port) static void omap_8250_shutdown(struct uart_port *port) { - struct uart_8250_port *up = - container_of(port, struct uart_8250_port, port); + struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = port->private_data; flush_work(&priv->qos_work); @@ -621,11 +665,24 @@ static void omap_8250_shutdown(struct uart_port *port) pm_runtime_get_sync(port->dev); serial_out(up, UART_OMAP_WER, 0); - serial8250_do_shutdown(port); + + up->ier = 0; + serial_out(up, UART_IER, 0); + + if (up->dma) + serial8250_release_dma(up); + + /* + * Disable break condition and FIFOs + */ + if (up->lcr & UART_LCR_SBC) + serial_out(up, UART_LCR, up->lcr & ~UART_LCR_SBC); + serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); pm_runtime_mark_last_busy(port->dev); pm_runtime_put_autosuspend(port->dev); + free_irq(port->irq, port); if (priv->wakeirq) free_irq(priv->wakeirq, port); } @@ -974,6 +1031,13 @@ static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir) } #endif +static int omap8250_no_handle_irq(struct uart_port *port) +{ + /* IRQ has not been requested but handling irq? */ + WARN_ONCE(1, "Unexpected irq handling before port startup\n"); + return 0; +} + static int omap8250_probe(struct platform_device *pdev) { struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1075,6 +1139,7 @@ static int omap8250_probe(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); omap_serial_fill_features_erratas(&up, priv); + up.port.handle_irq = omap8250_no_handle_irq; #ifdef CONFIG_SERIAL_8250_DMA if (pdev->dev.of_node) { /* @@ -1088,7 +1153,6 @@ static int omap8250_probe(struct platform_device *pdev) ret = of_property_count_strings(pdev->dev.of_node, "dma-names"); if (ret == 2) { up.dma = &priv->omap8250_dma; - up.port.handle_irq = omap_8250_dma_handle_irq; priv->omap8250_dma.fn = the_no_dma_filter_fn; priv->omap8250_dma.tx_dma = omap_8250_tx_dma; priv->omap8250_dma.rx_dma = omap_8250_rx_dma; diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 6f5a0720a8c8..763eb20fe321 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1249,20 +1249,19 @@ __acquires(&uap->port.lock) /* * Transmit a character - * There must be at least one free entry in the TX FIFO to accept the char. * - * Returns true if the FIFO might have space in it afterwards; - * returns false if the FIFO definitely became full. + * Returns true if the character was successfully queued to the FIFO. + * Returns false otherwise. */ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c) { + if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) + return false; /* unable to transmit character */ + writew(c, uap->port.membase + UART01x_DR); uap->port.icount.tx++; - if (likely(uap->tx_irq_seen > 1)) - return true; - - return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF); + return true; } static bool pl011_tx_chars(struct uart_amba_port *uap) @@ -1296,7 +1295,8 @@ static bool pl011_tx_chars(struct uart_amba_port *uap) return false; if (uap->port.x_char) { - pl011_tx_char(uap, uap->port.x_char); + if (!pl011_tx_char(uap, uap->port.x_char)) + goto done; uap->port.x_char = 0; --count; } diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index c8cfa0637128..88250395b0ce 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -911,6 +911,14 @@ static void dma_rx_callback(void *data) status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state); count = RX_BUF_SIZE - state.residue; + + if (readl(sport->port.membase + USR2) & USR2_IDLE) { + /* In condition [3] the SDMA counted up too early */ + count--; + + writel(USR2_IDLE, sport->port.membase + USR2); + } + dev_dbg(sport->port.dev, "We get %d bytes.\n", count); if (count) { diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index fdab715a0631..c0eafa6fd403 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -339,7 +339,7 @@ #define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c #define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10 -#define DWC3_DGCMD_STATUS(n) (((n) >> 15) & 1) +#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F) #define DWC3_DGCMD_CMDACT (1 << 10) #define DWC3_DGCMD_CMDIOC (1 << 8) @@ -355,7 +355,7 @@ #define DWC3_DEPCMD_PARAM_SHIFT 16 #define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT) #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) -#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1) +#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) #define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) #define DWC3_DEPCMD_CMDACT (1 << 10) #define DWC3_DEPCMD_CMDIOC (1 << 8) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 6bdb57069044..3507f880eb74 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -315,7 +315,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, return ret; } - set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); return len; } break; @@ -847,7 +846,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) ret = ep->status; if (io_data->read && ret > 0) { ret = copy_to_iter(data, ret, &io_data->data); - if (unlikely(iov_iter_count(&io_data->data))) + if (!ret) ret = -EFAULT; } } @@ -1463,8 +1462,7 @@ static void ffs_data_clear(struct ffs_data *ffs) { ENTER(); - if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags)) - ffs_closed(ffs); + ffs_closed(ffs); BUG_ON(ffs->gadget); @@ -3422,9 +3420,13 @@ static int ffs_ready(struct ffs_data *ffs) ffs_obj->desc_ready = true; ffs_obj->ffs_data = ffs; - if (ffs_obj->ffs_ready_callback) + if (ffs_obj->ffs_ready_callback) { ret = ffs_obj->ffs_ready_callback(ffs); + if (ret) + goto done; + } + set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); done: ffs_dev_unlock(); return ret; @@ -3443,7 +3445,8 @@ static void ffs_closed(struct ffs_data *ffs) ffs_obj->desc_ready = false; - if (ffs_obj->ffs_closed_callback) + if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) && + ffs_obj->ffs_closed_callback) ffs_obj->ffs_closed_callback(ffs); if (!ffs_obj->opts || ffs_obj->opts->no_configfs diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 259b656c0b3e..6316aa5b1c49 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -973,7 +973,13 @@ static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page) int result; mutex_lock(&opts->lock); - result = strlcpy(page, opts->id, PAGE_SIZE); + if (opts->id) { + result = strlcpy(page, opts->id, PAGE_SIZE); + } else { + page[0] = 0; + result = 0; + } + mutex_unlock(&opts->lock); return result; diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index 9719abfb6145..7856b3394494 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -588,7 +588,10 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt) if (intf == 1) { if (alt == 1) { - config_ep_by_speed(cdev->gadget, f, out_ep); + err = config_ep_by_speed(cdev->gadget, f, out_ep); + if (err) + return err; + usb_ep_enable(out_ep); out_ep->driver_data = audio; audio->copy_buf = f_audio_buffer_alloc(audio_buf_size); diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c index 7b9ef7e257d2..e821931c965c 100644 --- a/drivers/usb/gadget/legacy/g_ffs.c +++ b/drivers/usb/gadget/legacy/g_ffs.c @@ -304,8 +304,10 @@ static int functionfs_ready_callback(struct ffs_data *ffs) gfs_registered = true; ret = usb_composite_probe(&gfs_driver); - if (unlikely(ret < 0)) + if (unlikely(ret < 0)) { + ++missing_funcs; gfs_registered = false; + } return ret; } diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index b808951491cc..99fd9a5667df 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -1487,7 +1487,7 @@ static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on) dprintk(DEBUG_NORMAL, "%s()\n", __func__); - s3c2410_udc_set_pullup(udc, is_on ? 0 : 1); + s3c2410_udc_set_pullup(udc, is_on); return 0; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ec8ac1674854..36bf089b708f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); unsigned long flags; - int ret; + int ret, slot_id; struct xhci_command *command; command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); if (!command) return 0; + /* xhci->slot_id and xhci->addr_dev are not thread-safe */ + mutex_lock(&xhci->mutex); spin_lock_irqsave(&xhci->lock, flags); command->completion = &xhci->addr_dev; ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); + mutex_unlock(&xhci->mutex); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); kfree(command); return 0; @@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) spin_unlock_irqrestore(&xhci->lock, flags); wait_for_completion(command->completion); + slot_id = xhci->slot_id; + mutex_unlock(&xhci->mutex); - if (!xhci->slot_id || command->status != COMP_SUCCESS) { + if (!slot_id || command->status != COMP_SUCCESS) { xhci_err(xhci, "Error while assigning device slot ID\n"); xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", HCS_MAX_SLOTS( @@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) * xhci_discover_or_reset_device(), which may be called as part of * mass storage driver error handling. */ - if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { + if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); goto disable_slot; } - udev->slot_id = xhci->slot_id; + udev->slot_id = slot_id; #ifndef CONFIG_USB_DEFAULT_PERSIST /* @@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_slot_ctx *slot_ctx; struct xhci_input_control_ctx *ctrl_ctx; u64 temp_64; - struct xhci_command *command; + struct xhci_command *command = NULL; + + mutex_lock(&xhci->mutex); if (!udev->slot_id) { xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Bad Slot ID %d", udev->slot_id); - return -EINVAL; + ret = -EINVAL; + goto out; } virt_dev = xhci->devs[udev->slot_id]; @@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, */ xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", udev->slot_id); - return -EINVAL; + ret = -EINVAL; + goto out; } if (setup == SETUP_CONTEXT_ONLY) { @@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == SLOT_STATE_DEFAULT) { xhci_dbg(xhci, "Slot already in default state\n"); - return 0; + goto out; } } command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); - if (!command) - return -ENOMEM; + if (!command) { + ret = -ENOMEM; + goto out; + } command->in_ctx = virt_dev->in_ctx; command->completion = &xhci->addr_dev; @@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, if (!ctrl_ctx) { xhci_warn(xhci, "%s: Could not get input context, bad type.\n", __func__); - kfree(command); - return -EINVAL; + ret = -EINVAL; + goto out; } /* * If this is the first Set Address since device plug-in or @@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg_trace(xhci, trace_xhci_dbg_address, "FIXME: allocate a command ring segment"); - kfree(command); - return ret; + goto out; } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); @@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, ret = -EINVAL; break; } - if (ret) { - kfree(command); - return ret; - } + if (ret) + goto out; temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Op regs DCBAA ptr = %#016llx", temp_64); @@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Internal device address = %d", le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); +out: + mutex_unlock(&xhci->mutex); kfree(command); - return 0; + return ret; } int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) @@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) return 0; } + mutex_init(&xhci->mutex); xhci->cap_regs = hcd->regs; xhci->op_regs = hcd->regs + HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); @@ -5011,4 +5022,12 @@ static int __init xhci_hcd_init(void) BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); return 0; } + +/* + * If an init function is provided, an exit function must also be provided + * to allow module unload. + */ +static void __exit xhci_hcd_fini(void) { } + module_init(xhci_hcd_init); +module_exit(xhci_hcd_fini); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index ea75e8ccd3c1..6977f8491fa7 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1497,6 +1497,8 @@ struct xhci_hcd { struct list_head lpm_failed_devs; /* slot enabling and address device helpers */ + /* these are not thread safe so use mutex */ + struct mutex mutex; struct completion addr_dev; int slot_id; /* For USB 3.0 LPM enable/disable. */ diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 3789b08ef67b..6dca3d794ced 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2021,13 +2021,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (musb->ops->quirks) musb->io.quirks = musb->ops->quirks; - /* At least tusb6010 has it's own offsets.. */ - if (musb->ops->ep_offset) - musb->io.ep_offset = musb->ops->ep_offset; - if (musb->ops->ep_select) - musb->io.ep_select = musb->ops->ep_select; - - /* ..and some devices use indexed offset or flat offset */ + /* Most devices use indexed offset or flat offset */ if (musb->io.quirks & MUSB_INDEXED_EP) { musb->io.ep_offset = musb_indexed_ep_offset; musb->io.ep_select = musb_indexed_ep_select; @@ -2036,6 +2030,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb->io.ep_select = musb_flat_ep_select; } + /* At least tusb6010 has its own offsets */ + if (musb->ops->ep_offset) + musb->io.ep_offset = musb->ops->ep_offset; + if (musb->ops->ep_select) + musb->io.ep_select = musb->ops->ep_select; + if (musb->ops->fifo_mode) fifo_mode = musb->ops->fifo_mode; else diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c index 7225d526df04..03ab0c699f74 100644 --- a/drivers/usb/phy/phy-ab8500-usb.c +++ b/drivers/usb/phy/phy-ab8500-usb.c @@ -1179,7 +1179,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev, } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_link_status_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, + IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT, "usb-link-status", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for link status irq\n"); @@ -1195,7 +1195,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev, } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_disconnect_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, + IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT, "usb-id-fall", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for ID fall irq\n"); @@ -1211,7 +1211,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev, } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_disconnect_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, + IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT, "usb-vbus-fall", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for Vbus fall irq\n"); diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c index 845f658276b1..2b28443d07b9 100644 --- a/drivers/usb/phy/phy-tahvo.c +++ b/drivers/usb/phy/phy-tahvo.c @@ -401,7 +401,8 @@ static int tahvo_usb_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, tu); tu->irq = platform_get_irq(pdev, 0); - ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0, + ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, + IRQF_ONESHOT, "tahvo-vbus", tu); if (ret) { dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n", diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 8597cf9cfceb..c0f5c652d272 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -611,6 +611,8 @@ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = { static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) { struct usbhs_pipe *pipe = pkt->pipe; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); if (usbhs_pipe_is_busy(pipe)) return 0; @@ -624,6 +626,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) usbhs_pipe_data_sequence(pipe, pkt->sequence); pkt->sequence = -1; /* -1 sequence will be ignored */ + if (usbhs_pipe_is_dcp(pipe)) + usbhsf_fifo_clear(pipe, fifo); + usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); usbhs_pipe_enable(pipe); usbhs_pipe_running(pipe, 1); @@ -673,7 +678,14 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done) *is_done = 1; usbhsf_rx_irq_ctrl(pipe, 0); usbhs_pipe_running(pipe, 0); - usbhs_pipe_disable(pipe); /* disable pipe first */ + /* + * If function mode, since this controller is possible to enter + * Control Write status stage at this timing, this driver + * should not disable the pipe. If such a case happens, this + * controller is not able to complete the status stage. + */ + if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe)) + usbhs_pipe_disable(pipe); /* disable pipe first */ } /* @@ -1227,15 +1239,21 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo, { char name[16]; - snprintf(name, sizeof(name), "tx%d", channel); - fifo->tx_chan = dma_request_slave_channel_reason(dev, name); - if (IS_ERR(fifo->tx_chan)) - fifo->tx_chan = NULL; - - snprintf(name, sizeof(name), "rx%d", channel); - fifo->rx_chan = dma_request_slave_channel_reason(dev, name); - if (IS_ERR(fifo->rx_chan)) - fifo->rx_chan = NULL; + /* + * To avoid complex handing for DnFIFOs, the driver uses each + * DnFIFO as TX or RX direction (not bi-direction). + * So, the driver uses odd channels for TX, even channels for RX. + */ + snprintf(name, sizeof(name), "ch%d", channel); + if (channel & 1) { + fifo->tx_chan = dma_request_slave_channel_reason(dev, name); + if (IS_ERR(fifo->tx_chan)) + fifo->tx_chan = NULL; + } else { + fifo->rx_chan = dma_request_slave_channel_reason(dev, name); + if (IS_ERR(fifo->rx_chan)) + fifo->rx_chan = NULL; + } } static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 9031750e7404..ffd739e31bfc 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8eb68a31cab6..4c8b3b82103d 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) }, { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4e4f46f3c89c..792e054126de 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -155,6 +155,7 @@ #define XSENS_AWINDA_STATION_PID 0x0101 #define XSENS_AWINDA_DONGLE_PID 0x0102 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ +#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ /* Xsens devices using FTDI VID */ diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 5e19bb53b3a9..83bbb26f3183 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -1409,8 +1410,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, * dependency now. */ se_tpg = &tpg->se_tpg; - ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, - &se_tpg->tpg_group.cg_item); + ret = target_depend_item(&se_tpg->tpg_group.cg_item); if (ret) { pr_warn("configfs_depend_item() failed: %d\n", ret); kfree(vs_tpg); @@ -1513,8 +1513,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. */ se_tpg = &tpg->se_tpg; - configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, - &se_tpg->tpg_group.cg_item); + target_undepend_item(&se_tpg->tpg_group.cg_item); } if (match) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 3a145a643e0d..6897f1c1bc73 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -274,6 +274,10 @@ static int pwm_backlight_probe(struct platform_device *pdev) pb->pwm = devm_pwm_get(&pdev->dev, NULL); if (IS_ERR(pb->pwm)) { + ret = PTR_ERR(pb->pwm); + if (ret == -EPROBE_DEFER) + goto err_alloc; + dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n"); pb->legacy = true; pb->pwm = pwm_request(data->pwm_id, "pwm-backlight"); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index e894eb278d83..eba1b7ac7294 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { + cpumask_clear(mask); cpumask_set_cpu(cpu, mask); irq_set_affinity_hint(irq, mask); } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 241ef68d2893..cd46e4158830 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -918,7 +918,7 @@ static int load_elf_binary(struct linux_binprm *bprm) total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum); if (!total_size) { - error = -EINVAL; + retval = -EINVAL; goto out_free_dentry; } } diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 9de772ee0031..614aaa1969bd 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -880,6 +880,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info, * indirect refs to their parent bytenr. * When roots are found, they're added to the roots list * + * NOTE: This can return values > 0 + * * FIXME some caching might speed things up */ static int find_parent_nodes(struct btrfs_trans_handle *trans, @@ -1198,6 +1200,19 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans, return ret; } +/** + * btrfs_check_shared - tell us whether an extent is shared + * + * @trans: optional trans handle + * + * btrfs_check_shared uses the backref walking code but will short + * circuit as soon as it finds a root or inode that doesn't match the + * one passed in. This provides a significant performance benefit for + * callers (such as fiemap) which want to know whether the extent is + * shared but do not need a ref count. + * + * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. + */ int btrfs_check_shared(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 root_objectid, u64 inum, u64 bytenr) @@ -1226,11 +1241,13 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans, ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, roots, NULL, root_objectid, inum); if (ret == BACKREF_FOUND_SHARED) { + /* this is the only condition under which we return 1 */ ret = 1; break; } if (ret < 0 && ret != -ENOENT) break; + ret = 0; node = ulist_next(tmp, &uiter); if (!node) break; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7effed6f2fa6..0ec3acd14cbf 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8829,6 +8829,24 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, goto again; } + /* + * if we are changing raid levels, try to allocate a corresponding + * block group with the new raid level. + */ + alloc_flags = update_block_group_flags(root, cache->flags); + if (alloc_flags != cache->flags) { + ret = do_chunk_alloc(trans, root, alloc_flags, + CHUNK_ALLOC_FORCE); + /* + * ENOSPC is allowed here, we may have enough space + * already allocated at the new raid level to + * carry on + */ + if (ret == -ENOSPC) + ret = 0; + if (ret < 0) + goto out; + } ret = set_block_group_ro(cache, 0); if (!ret) @@ -8842,7 +8860,9 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, out: if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { alloc_flags = update_block_group_flags(root, cache->flags); + lock_chunks(root->fs_info->chunk_root); check_system_chunk(trans, root, alloc_flags); + unlock_chunks(root->fs_info->chunk_root); } mutex_unlock(&root->fs_info->ro_block_group_mutex); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 96aebf3bcd5b..174f5e1e00ab 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4625,6 +4625,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, { u64 chunk_offset; + ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex)); chunk_offset = find_next_chunk(extent_root->fs_info); return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); } diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 430e0348c99e..7dc886c9a78f 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -24,6 +24,7 @@ #include "cifsfs.h" #include "dns_resolve.h" #include "cifs_debug.h" +#include "cifs_unicode.h" static LIST_HEAD(cifs_dfs_automount_list); @@ -312,7 +313,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) xid = get_xid(); rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_remap(cifs_sb)); free_xid(xid); cifs_put_tlink(tlink); diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 0303c6793d90..5a53ac6b1e02 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -27,41 +27,6 @@ #include "cifsglob.h" #include "cifs_debug.h" -/* - * cifs_utf16_bytes - how long will a string be after conversion? - * @utf16 - pointer to input string - * @maxbytes - don't go past this many bytes of input string - * @codepage - destination codepage - * - * Walk a utf16le string and return the number of bytes that the string will - * be after being converted to the given charset, not including any null - * termination required. Don't walk past maxbytes in the source buffer. - */ -int -cifs_utf16_bytes(const __le16 *from, int maxbytes, - const struct nls_table *codepage) -{ - int i; - int charlen, outlen = 0; - int maxwords = maxbytes / 2; - char tmp[NLS_MAX_CHARSET_SIZE]; - __u16 ftmp; - - for (i = 0; i < maxwords; i++) { - ftmp = get_unaligned_le16(&from[i]); - if (ftmp == 0) - break; - - charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE); - if (charlen > 0) - outlen += charlen; - else - outlen++; - } - - return outlen; -} - int cifs_remap(struct cifs_sb_info *cifs_sb) { int map_type; @@ -155,10 +120,13 @@ convert_sfm_char(const __u16 src_char, char *target) * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE). */ static int -cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, +cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp, int maptype) { int len = 1; + __u16 src_char; + + src_char = *from; if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target)) return len; @@ -168,10 +136,23 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, /* if character not one of seven in special remap set */ len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE); - if (len <= 0) { - *target = '?'; - len = 1; - } + if (len <= 0) + goto surrogate_pair; + + return len; + +surrogate_pair: + /* convert SURROGATE_PAIR and IVS */ + if (strcmp(cp->charset, "utf8")) + goto unknown; + len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6); + if (len <= 0) + goto unknown; + return len; + +unknown: + *target = '?'; + len = 1; return len; } @@ -206,7 +187,7 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, int nullsize = nls_nullsize(codepage); int fromwords = fromlen / 2; char tmp[NLS_MAX_CHARSET_SIZE]; - __u16 ftmp; + __u16 ftmp[3]; /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */ /* * because the chars can be of varying widths, we need to take care @@ -217,9 +198,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize); for (i = 0; i < fromwords; i++) { - ftmp = get_unaligned_le16(&from[i]); - if (ftmp == 0) + ftmp[0] = get_unaligned_le16(&from[i]); + if (ftmp[0] == 0) break; + if (i + 1 < fromwords) + ftmp[1] = get_unaligned_le16(&from[i + 1]); + else + ftmp[1] = 0; + if (i + 2 < fromwords) + ftmp[2] = get_unaligned_le16(&from[i + 2]); + else + ftmp[2] = 0; /* * check to see if converting this character might make the @@ -234,6 +223,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, /* put converted char into 'to' buffer */ charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type); outlen += charlen; + + /* charlen (=bytes of UTF-8 for 1 character) + * 4bytes UTF-8(surrogate pair) is charlen=4 + * (4bytes UTF-16 code) + * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4 + * (2 UTF-8 pairs divided to 2 UTF-16 pairs) */ + if (charlen == 4) + i++; + else if (charlen >= 5) + /* 5-6bytes UTF-8 */ + i += 2; } /* properly null-terminate string */ @@ -295,6 +295,46 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len, return i; } +/* + * cifs_utf16_bytes - how long will a string be after conversion? + * @utf16 - pointer to input string + * @maxbytes - don't go past this many bytes of input string + * @codepage - destination codepage + * + * Walk a utf16le string and return the number of bytes that the string will + * be after being converted to the given charset, not including any null + * termination required. Don't walk past maxbytes in the source buffer. + */ +int +cifs_utf16_bytes(const __le16 *from, int maxbytes, + const struct nls_table *codepage) +{ + int i; + int charlen, outlen = 0; + int maxwords = maxbytes / 2; + char tmp[NLS_MAX_CHARSET_SIZE]; + __u16 ftmp[3]; + + for (i = 0; i < maxwords; i++) { + ftmp[0] = get_unaligned_le16(&from[i]); + if (ftmp[0] == 0) + break; + if (i + 1 < maxwords) + ftmp[1] = get_unaligned_le16(&from[i + 1]); + else + ftmp[1] = 0; + if (i + 2 < maxwords) + ftmp[2] = get_unaligned_le16(&from[i + 2]); + else + ftmp[2] = 0; + + charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD); + outlen += charlen; + } + + return outlen; +} + /* * cifs_strndup_from_utf16 - copy a string from wire format to the local * codepage @@ -409,10 +449,15 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, char src_char; __le16 dst_char; wchar_t tmp; + wchar_t *wchar_to; /* UTF-16 */ + int ret; + unicode_t u; if (map_chars == NO_MAP_UNI_RSVD) return cifs_strtoUTF16(target, source, PATH_MAX, cp); + wchar_to = kzalloc(6, GFP_KERNEL); + for (i = 0; i < srclen; j++) { src_char = source[i]; charlen = 1; @@ -441,11 +486,55 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, * if no match, use question mark, which at least in * some cases serves as wild card */ - if (charlen < 1) { - dst_char = cpu_to_le16(0x003f); - charlen = 1; + if (charlen > 0) + goto ctoUTF16; + + /* convert SURROGATE_PAIR */ + if (strcmp(cp->charset, "utf8") || !wchar_to) + goto unknown; + if (*(source + i) & 0x80) { + charlen = utf8_to_utf32(source + i, 6, &u); + if (charlen < 0) + goto unknown; + } else + goto unknown; + ret = utf8s_to_utf16s(source + i, charlen, + UTF16_LITTLE_ENDIAN, + wchar_to, 6); + if (ret < 0) + goto unknown; + + i += charlen; + dst_char = cpu_to_le16(*wchar_to); + if (charlen <= 3) + /* 1-3bytes UTF-8 to 2bytes UTF-16 */ + put_unaligned(dst_char, &target[j]); + else if (charlen == 4) { + /* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16 + * 7-8bytes UTF-8(IVS) divided to 2 UTF-16 + * (charlen=3+4 or 4+4) */ + put_unaligned(dst_char, &target[j]); + dst_char = cpu_to_le16(*(wchar_to + 1)); + j++; + put_unaligned(dst_char, &target[j]); + } else if (charlen >= 5) { + /* 5-6bytes UTF-8 to 6bytes UTF-16 */ + put_unaligned(dst_char, &target[j]); + dst_char = cpu_to_le16(*(wchar_to + 1)); + j++; + put_unaligned(dst_char, &target[j]); + dst_char = cpu_to_le16(*(wchar_to + 2)); + j++; + put_unaligned(dst_char, &target[j]); } + continue; + +unknown: + dst_char = cpu_to_le16(0x003f); + charlen = 1; } + +ctoUTF16: /* * character may take more than one byte in the source string, * but will take exactly two bytes in the target string @@ -456,6 +545,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, ctoUTF16_out: put_unaligned(0, &target[j]); /* Null terminate target unicode string */ + kfree(wchar_to); return j; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f5089bde3635..0a9fb6b53126 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -469,6 +469,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root) seq_puts(s, ",nouser_xattr"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) seq_puts(s, ",mapchars"); + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) + seq_puts(s, ",mapposix"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) seq_puts(s, ",sfu"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index c31ce98c1704..c63fd1dde25b 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -361,11 +361,11 @@ extern int CIFSUnixCreateHardLink(const unsigned int xid, extern int CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, - const struct nls_table *nls_codepage); + const struct nls_table *nls_codepage, int remap); extern int CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char **syminfo, - const struct nls_table *nls_codepage); + const struct nls_table *nls_codepage, int remap); extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, char **symlinkinfo, const struct nls_table *nls_codepage); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 84650a51c7c4..f26ffbfc64d8 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -2784,7 +2784,7 @@ CIFSSMBCopy(const unsigned int xid, struct cifs_tcon *tcon, int CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, - const struct nls_table *nls_codepage) + const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; @@ -2804,9 +2804,9 @@ CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = - cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName, - /* find define for this maxpathcomponent */ - PATH_MAX, nls_codepage); + cifsConvertToUTF16((__le16 *) pSMB->FileName, fromName, + /* find define for this maxpathcomponent */ + PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; @@ -2828,9 +2828,9 @@ CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, data_offset = (char *) (&pSMB->hdr.Protocol) + offset; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len_target = - cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX - /* find define for this maxpathcomponent */ - , nls_codepage); + cifsConvertToUTF16((__le16 *) data_offset, toName, + /* find define for this maxpathcomponent */ + PATH_MAX, nls_codepage, remap); name_len_target++; /* trailing null */ name_len_target *= 2; } else { /* BB improve the check for buffer overruns BB */ @@ -3034,7 +3034,7 @@ CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon, int CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char **symlinkinfo, - const struct nls_table *nls_codepage) + const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_FILE_UNIX_LINK */ TRANSACTION2_QPI_REQ *pSMB = NULL; @@ -3055,8 +3055,9 @@ CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = - cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName, - PATH_MAX, nls_codepage); + cifsConvertToUTF16((__le16 *) pSMB->FileName, + searchName, PATH_MAX, nls_codepage, + remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ @@ -4917,7 +4918,7 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses, strncpy(pSMB->RequestFileName, search_name, name_len); } - if (ses->server && ses->server->sign) + if (ses->server->sign) pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->hdr.Uid = ses->Suid; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index f3bfe08e177b..8383d5ea4202 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -386,6 +386,7 @@ cifs_reconnect(struct TCP_Server_Info *server) rc = generic_ip_connect(server); if (rc) { cifs_dbg(FYI, "reconnect error %d\n", rc); + mutex_unlock(&server->srv_mutex); msleep(3000); } else { atomic_inc(&tcpSesReconnectCount); @@ -393,8 +394,8 @@ cifs_reconnect(struct TCP_Server_Info *server) if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsNeedNegotiate; spin_unlock(&GlobalMid_Lock); + mutex_unlock(&server->srv_mutex); } - mutex_unlock(&server->srv_mutex); } while (server->tcpStatus == CifsNeedReconnect); return rc; diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 338d56936f6a..c3eb998a99bd 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -620,8 +620,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, } rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_remap(cifs_sb)); if (rc) goto mknod_out; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index cafbf10521d5..3f50cee79df9 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -140,8 +140,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, posix_flags = cifs_posix_convert_flags(f_flags); rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, poplock, full_path, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_remap(cifs_sb)); cifs_put_tlink(tlink); if (rc) @@ -1553,8 +1552,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, rc = server->ops->mand_unlock_range(cfile, flock, xid); out: - if (flock->fl_flags & FL_POSIX) - posix_lock_file_wait(file, flock); + if (flock->fl_flags & FL_POSIX && !rc) + rc = posix_lock_file_wait(file, flock); return rc; } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 55b58112d122..f621b44cb800 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -373,8 +373,7 @@ int cifs_get_inode_info_unix(struct inode **pinode, /* could have done a find first instead but this returns more info */ rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data, - cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_sb->local_nls, cifs_remap(cifs_sb)); cifs_put_tlink(tlink); if (!rc) { @@ -402,9 +401,25 @@ int cifs_get_inode_info_unix(struct inode **pinode, rc = -ENOMEM; } else { /* we already have inode, update it */ + + /* if uniqueid is different, return error */ + if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM && + CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) { + rc = -ESTALE; + goto cgiiu_exit; + } + + /* if filetype is different, return error */ + if (unlikely(((*pinode)->i_mode & S_IFMT) != + (fattr.cf_mode & S_IFMT))) { + rc = -ESTALE; + goto cgiiu_exit; + } + cifs_fattr_to_inode(*pinode, &fattr); } +cgiiu_exit: return rc; } @@ -839,6 +854,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, if (!*inode) rc = -ENOMEM; } else { + /* we already have inode, update it */ + + /* if filetype is different, return error */ + if (unlikely(((*inode)->i_mode & S_IFMT) != + (fattr.cf_mode & S_IFMT))) { + rc = -ESTALE; + goto cgii_exit; + } + cifs_fattr_to_inode(*inode, &fattr); } @@ -2215,8 +2239,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) pTcon = tlink_tcon(tlink); rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_remap(cifs_sb)); cifs_put_tlink(tlink); } diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 252e672d5604..e6c707cc62b3 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -717,7 +717,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname); else if (pTcon->unix_ext) rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, - cifs_sb->local_nls); + cifs_sb->local_nls, + cifs_remap(cifs_sb)); /* else rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName, cifs_sb_target->local_nls); */ diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index b4a47237486b..b1eede3678a9 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -90,6 +90,8 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, if (dentry) { inode = d_inode(dentry); if (inode) { + if (d_mountpoint(dentry)) + goto out; /* * If we're generating inode numbers, then we don't * want to clobber the existing one with the one that diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 7bfdd6066276..fc537c29044e 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -960,7 +960,8 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, /* Check for unix extensions */ if (cap_unix(tcon->ses)) { rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path, - cifs_sb->local_nls); + cifs_sb->local_nls, + cifs_remap(cifs_sb)); if (rc == -EREMOTE) rc = cifs_unix_dfs_readlink(xid, tcon, full_path, target_path, diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 65cd7a84c8bc..54cbe19d9c08 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -110,7 +110,7 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ , /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ - if ((tcon->ses) && + if ((tcon->ses) && (tcon->ses->server) && (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) hdr->CreditCharge = cpu_to_le16(1); /* else CreditCharge MBZ */ diff --git a/fs/dcache.c b/fs/dcache.c index 656ce522a218..37b5afdaf698 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1239,13 +1239,13 @@ static void d_walk(struct dentry *parent, void *data, /* might go back up the wrong parent if we have had a rename. */ if (need_seqretry(&rename_lock, seq)) goto rename_retry; - next = child->d_child.next; - while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { + /* go into the first sibling still alive */ + do { + next = child->d_child.next; if (next == &this_parent->d_subdirs) goto ascend; child = list_entry(next, struct dentry, d_child); - next = next->next; - } + } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); rcu_read_unlock(); goto resume; } diff --git a/fs/fhandle.c b/fs/fhandle.c index 999ff5c3cab0..d59712dfa3e7 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c @@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, goto out_err; } /* copy the full handle */ - if (copy_from_user(handle, ufh, - sizeof(struct file_handle) + + *handle = f_handle; + if (copy_from_user(&handle->f_handle, + &ufh->f_handle, f_handle.handle_bytes)) { retval = -EFAULT; goto out_handle; diff --git a/fs/omfs/bitmap.c b/fs/omfs/bitmap.c index 082234581d05..83f4e76511c2 100644 --- a/fs/omfs/bitmap.c +++ b/fs/omfs/bitmap.c @@ -159,7 +159,7 @@ int omfs_allocate_range(struct super_block *sb, goto out; found: - *return_block = i * bits_per_entry + bit; + *return_block = (u64) i * bits_per_entry + bit; *return_size = run; ret = set_run(sb, i, bits_per_entry, bit, run, 1); diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index 138321b0c6c2..3d935c81789a 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -306,7 +306,8 @@ static const struct super_operations omfs_sops = { */ static int omfs_get_imap(struct super_block *sb) { - unsigned int bitmap_size, count, array_size; + unsigned int bitmap_size, array_size; + int count; struct omfs_sb_info *sbi = OMFS_SB(sb); struct buffer_head *bh; unsigned long **ptr; @@ -359,7 +360,7 @@ static int omfs_get_imap(struct super_block *sb) } enum { - Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask + Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err }; static const match_table_t tokens = { @@ -368,6 +369,7 @@ static const match_table_t tokens = { {Opt_umask, "umask=%o"}, {Opt_dmask, "dmask=%o"}, {Opt_fmask, "fmask=%o"}, + {Opt_err, NULL}, }; static int parse_options(char *options, struct omfs_sb_info *sbi) @@ -548,8 +550,10 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent) } sb->s_root = d_make_root(root); - if (!sb->s_root) + if (!sb->s_root) { + ret = -ENOMEM; goto out_brelse_bh2; + } printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name); ret = 0; diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 24f640441bd9..84d693d37428 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -299,6 +299,9 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, struct cred *override_cred; char *link = NULL; + if (WARN_ON(!workdir)) + return -EROFS; + ovl_path_upper(parent, &parentpath); upperdir = parentpath.dentry; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index d139405d2bfa..692ceda3bc21 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -222,6 +222,9 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry, struct kstat stat; int err; + if (WARN_ON(!workdir)) + return ERR_PTR(-EROFS); + err = ovl_lock_rename_workdir(workdir, upperdir); if (err) goto out; @@ -322,6 +325,9 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, struct dentry *newdentry; int err; + if (WARN_ON(!workdir)) + return -EROFS; + err = ovl_lock_rename_workdir(workdir, upperdir); if (err) goto out; @@ -506,11 +512,28 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) struct dentry *opaquedir = NULL; int err; - if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { - opaquedir = ovl_check_empty_and_clear(dentry); - err = PTR_ERR(opaquedir); - if (IS_ERR(opaquedir)) - goto out; + if (WARN_ON(!workdir)) + return -EROFS; + + if (is_dir) { + if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { + opaquedir = ovl_check_empty_and_clear(dentry); + err = PTR_ERR(opaquedir); + if (IS_ERR(opaquedir)) + goto out; + } else { + LIST_HEAD(list); + + /* + * When removing an empty opaque directory, then it + * makes no sense to replace it with an exact replica of + * itself. But emptiness still needs to be checked. + */ + err = ovl_check_empty_dir(dentry, &list); + ovl_cache_free(&list); + if (err) + goto out; + } } err = ovl_lock_rename_workdir(workdir, upperdir); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 5f0d1993e6e3..bf8537c7f455 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -529,7 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data) { struct ovl_fs *ufs = sb->s_fs_info; - if (!(*flags & MS_RDONLY) && !ufs->upper_mnt) + if (!(*flags & MS_RDONLY) && (!ufs->upper_mnt || !ufs->workdir)) return -EROFS; return 0; @@ -925,9 +925,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); err = PTR_ERR(ufs->workdir); if (IS_ERR(ufs->workdir)) { - pr_err("overlayfs: failed to create directory %s/%s\n", - ufs->config.workdir, OVL_WORKDIR_NAME); - goto out_put_upper_mnt; + pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n", + ufs->config.workdir, OVL_WORKDIR_NAME, -err); + sb->s_flags |= MS_RDONLY; + ufs->workdir = NULL; } } @@ -997,7 +998,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) kfree(ufs->lower_mnt); out_put_workdir: dput(ufs->workdir); -out_put_upper_mnt: mntput(ufs->upper_mnt); out_put_lowerpath: for (i = 0; i < numlower; i++) diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 04e79d57bca6..e9d401ce93bb 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -574,8 +574,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) * After the last attribute is removed revert to original inode format, * making all literal area available to the data fork once more. */ -STATIC void -xfs_attr_fork_reset( +void +xfs_attr_fork_remove( struct xfs_inode *ip, struct xfs_trans *tp) { @@ -641,7 +641,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) (mp->m_flags & XFS_MOUNT_ATTR2) && (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && !(args->op_flags & XFS_DA_OP_ADDNAME)) { - xfs_attr_fork_reset(dp, args->trans); + xfs_attr_fork_remove(dp, args->trans); } else { xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); @@ -905,7 +905,7 @@ xfs_attr3_leaf_to_shortform( if (forkoff == -1) { ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE); - xfs_attr_fork_reset(dp, args->trans); + xfs_attr_fork_remove(dp, args->trans); goto out; } diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h index 025c4b820c03..882c8d338891 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.h +++ b/fs/xfs/libxfs/xfs_attr_leaf.h @@ -53,7 +53,7 @@ int xfs_attr_shortform_remove(struct xfs_da_args *args); int xfs_attr_shortform_list(struct xfs_attr_list_context *context); int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes); - +void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp); /* * Internal routines when attribute fork size == XFS_LBSIZE(mp). diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index aeffeaaac0ec..f1026e86dabc 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -3224,12 +3224,24 @@ xfs_bmap_extsize_align( align_alen += temp; align_off -= temp; } + + /* Same adjustment for the end of the requested area. */ + temp = (align_alen % extsz); + if (temp) + align_alen += extsz - temp; + /* - * Same adjustment for the end of the requested area. + * For large extent hint sizes, the aligned extent might be larger than + * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls + * the length back under MAXEXTLEN. The outer allocation loops handle + * short allocation just fine, so it is safe to do this. We only want to + * do it when we are forced to, though, because it means more allocation + * operations are required. */ - if ((temp = (align_alen % extsz))) { - align_alen += extsz - temp; - } + while (align_alen > MAXEXTLEN) + align_alen -= extsz; + ASSERT(align_alen <= MAXEXTLEN); + /* * If the previous block overlaps with this proposed allocation * then move the start forward without adjusting the length. @@ -3318,7 +3330,9 @@ xfs_bmap_extsize_align( return -EINVAL; } else { ASSERT(orig_off >= align_off); - ASSERT(orig_end <= align_off + align_alen); + /* see MAXEXTLEN handling above */ + ASSERT(orig_end <= align_off + align_alen || + align_alen + extsz > MAXEXTLEN); } #ifdef DEBUG @@ -4099,13 +4113,6 @@ xfs_bmapi_reserve_delalloc( /* Figure out the extent size, adjust alen */ extsz = xfs_get_extsz_hint(ip); if (extsz) { - /* - * Make sure we don't exceed a single extent length when we - * align the extent by reducing length we are going to - * allocate by the maximum amount extent size aligment may - * require. - */ - alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1)); error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof, 1, 0, &aoff, &alen); ASSERT(!error); diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 07349a183a11..1c9e75521250 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -376,7 +376,7 @@ xfs_ialloc_ag_alloc( */ newlen = args.mp->m_ialloc_inos; if (args.mp->m_maxicount && - percpu_counter_read(&args.mp->m_icount) + newlen > + percpu_counter_read_positive(&args.mp->m_icount) + newlen > args.mp->m_maxicount) return -ENOSPC; args.minlen = args.maxlen = args.mp->m_ialloc_blks; @@ -1339,10 +1339,13 @@ xfs_dialloc( * If we have already hit the ceiling of inode blocks then clear * okalloc so we scan all available agi structures for a free * inode. + * + * Read rough value of mp->m_icount by percpu_counter_read_positive, + * which will sacrifice the preciseness but improve the performance. */ if (mp->m_maxicount && - percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos > - mp->m_maxicount) { + percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos + > mp->m_maxicount) { noroom = 1; okalloc = 0; } diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index f9c1c64782d3..3fbf167cfb4c 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c @@ -380,23 +380,31 @@ xfs_attr3_root_inactive( return error; } +/* + * xfs_attr_inactive kills all traces of an attribute fork on an inode. It + * removes both the on-disk and in-memory inode fork. Note that this also has to + * handle the condition of inodes without attributes but with an attribute fork + * configured, so we can't use xfs_inode_hasattr() here. + * + * The in-memory attribute fork is removed even on error. + */ int -xfs_attr_inactive(xfs_inode_t *dp) +xfs_attr_inactive( + struct xfs_inode *dp) { - xfs_trans_t *trans; - xfs_mount_t *mp; - int error; + struct xfs_trans *trans; + struct xfs_mount *mp; + int cancel_flags = 0; + int lock_mode = XFS_ILOCK_SHARED; + int error = 0; mp = dp->i_mount; ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); - xfs_ilock(dp, XFS_ILOCK_SHARED); - if (!xfs_inode_hasattr(dp) || - dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { - xfs_iunlock(dp, XFS_ILOCK_SHARED); - return 0; - } - xfs_iunlock(dp, XFS_ILOCK_SHARED); + xfs_ilock(dp, lock_mode); + if (!XFS_IFORK_Q(dp)) + goto out_destroy_fork; + xfs_iunlock(dp, lock_mode); /* * Start our first transaction of the day. @@ -408,13 +416,18 @@ xfs_attr_inactive(xfs_inode_t *dp) * the inode in every transaction to let it float upward through * the log. */ + lock_mode = 0; trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL); error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); - if (error) { - xfs_trans_cancel(trans, 0); - return error; - } - xfs_ilock(dp, XFS_ILOCK_EXCL); + if (error) + goto out_cancel; + + lock_mode = XFS_ILOCK_EXCL; + cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT; + xfs_ilock(dp, lock_mode); + + if (!XFS_IFORK_Q(dp)) + goto out_cancel; /* * No need to make quota reservations here. We expect to release some @@ -422,29 +435,31 @@ xfs_attr_inactive(xfs_inode_t *dp) */ xfs_trans_ijoin(trans, dp, 0); - /* - * Decide on what work routines to call based on the inode size. - */ - if (!xfs_inode_hasattr(dp) || - dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { - error = 0; - goto out; + /* invalidate and truncate the attribute fork extents */ + if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) { + error = xfs_attr3_root_inactive(&trans, dp); + if (error) + goto out_cancel; + + error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); + if (error) + goto out_cancel; } - error = xfs_attr3_root_inactive(&trans, dp); - if (error) - goto out; - error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); - if (error) - goto out; + /* Reset the attribute fork - this also destroys the in-core fork */ + xfs_attr_fork_remove(dp, trans); error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES); - xfs_iunlock(dp, XFS_ILOCK_EXCL); - + xfs_iunlock(dp, lock_mode); return error; -out: - xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); - xfs_iunlock(dp, XFS_ILOCK_EXCL); +out_cancel: + xfs_trans_cancel(trans, cancel_flags); +out_destroy_fork: + /* kill the in-core attr fork before we drop the inode lock */ + if (dp->i_afp) + xfs_idestroy_fork(dp, XFS_ATTR_FORK); + if (lock_mode) + xfs_iunlock(dp, lock_mode); return error; } diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 8121e75352ee..3b7591224f4a 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -124,7 +124,7 @@ xfs_iozero( status = 0; } while (count); - return (-status); + return status; } int diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index d6ebc85192b7..539a85fddbc2 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1946,21 +1946,17 @@ xfs_inactive( /* * If there are attributes associated with the file then blow them away * now. The code calls a routine that recursively deconstructs the - * attribute fork. We need to just commit the current transaction - * because we can't use it for xfs_attr_inactive(). + * attribute fork. If also blows away the in-core attribute fork. */ - if (ip->i_d.di_anextents > 0) { - ASSERT(ip->i_d.di_forkoff != 0); - + if (XFS_IFORK_Q(ip)) { error = xfs_attr_inactive(ip); if (error) return; } - if (ip->i_afp) - xfs_idestroy_fork(ip, XFS_ATTR_FORK); - + ASSERT(!ip->i_afp); ASSERT(ip->i_d.di_anextents == 0); + ASSERT(ip->i_d.di_forkoff == 0); /* * Free the inode. @@ -2883,7 +2879,13 @@ xfs_rename_alloc_whiteout( if (error) return error; - /* Satisfy xfs_bumplink that this is a real tmpfile */ + /* + * Prepare the tmpfile inode as if it were created through the VFS. + * Otherwise, the link increment paths will complain about nlink 0->1. + * Drop the link count as done by d_tmpfile(), complete the inode setup + * and flag it as linkable. + */ + drop_nlink(VFS_I(tmpfile)); xfs_finish_inode_setup(tmpfile); VFS_I(tmpfile)->i_state |= I_LINKABLE; @@ -3151,7 +3153,7 @@ xfs_rename( * intermediate state on disk. */ if (wip) { - ASSERT(wip->i_d.di_nlink == 0); + ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0); error = xfs_bumplink(tp, wip); if (error) goto out_trans_abort; diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 2ce7ee3b4ec1..6f23fbdfb365 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1084,14 +1084,18 @@ xfs_log_sbcount(xfs_mount_t *mp) return xfs_sync_sb(mp, true); } +/* + * Deltas for the inode count are +/-64, hence we use a large batch size + * of 128 so we don't need to take the counter lock on every update. + */ +#define XFS_ICOUNT_BATCH 128 int xfs_mod_icount( struct xfs_mount *mp, int64_t delta) { - /* deltas are +/-64, hence the large batch size of 128. */ - __percpu_counter_add(&mp->m_icount, delta, 128); - if (percpu_counter_compare(&mp->m_icount, 0) < 0) { + __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH); + if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) { ASSERT(0); percpu_counter_add(&mp->m_icount, -delta); return -EINVAL; @@ -1113,6 +1117,14 @@ xfs_mod_ifree( return 0; } +/* + * Deltas for the block count can vary from 1 to very large, but lock contention + * only occurs on frequent small block count updates such as in the delayed + * allocation path for buffered writes (page a time updates). Hence we set + * a large batch count (1024) to minimise global counter updates except when + * we get near to ENOSPC and we have to be very accurate with our updates. + */ +#define XFS_FDBLOCKS_BATCH 1024 int xfs_mod_fdblocks( struct xfs_mount *mp, @@ -1151,25 +1163,19 @@ xfs_mod_fdblocks( * Taking blocks away, need to be more accurate the closer we * are to zero. * - * batch size is set to a maximum of 1024 blocks - if we are - * allocating of freeing extents larger than this then we aren't - * going to be hammering the counter lock so a lock per update - * is not a problem. - * * If the counter has a value of less than 2 * max batch size, * then make everything serialise as we are real close to * ENOSPC. */ -#define __BATCH 1024 - if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0) + if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH, + XFS_FDBLOCKS_BATCH) < 0) batch = 1; else - batch = __BATCH; -#undef __BATCH + batch = XFS_FDBLOCKS_BATCH; __percpu_counter_add(&mp->m_fdblocks, delta, batch); - if (percpu_counter_compare(&mp->m_fdblocks, - XFS_ALLOC_SET_ASIDE(mp)) >= 0) { + if (__percpu_counter_compare(&mp->m_fdblocks, XFS_ALLOC_SET_ASIDE(mp), + XFS_FDBLOCKS_BATCH) >= 0) { /* we had space! */ return 0; } diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h index ebd63fd05649..dc4254b8cbbc 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h @@ -289,5 +289,7 @@ #define UBI32_CORE1_CLK 279 #define UBI32_CORE2_CLK 280 #define EBI2_AON_CLK 281 +#define NSSTCM_CLK_SRC 282 +#define NSSTCM_CLK 283 #endif diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h new file mode 100644 index 000000000000..172744a72eb7 --- /dev/null +++ b/include/dt-bindings/net/ti-dp83867.h @@ -0,0 +1,45 @@ +/* + * Device Tree constants for the Texas Instruments DP83867 PHY + * + * Author: Dan Murphy + * + * Copyright: (C) 2015 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _DT_BINDINGS_TI_DP83867_H +#define _DT_BINDINGS_TI_DP83867_H + +/* PHY CTRL bits */ +#define DP83867_PHYCR_FIFO_DEPTH_3_B_NIB 0x00 +#define DP83867_PHYCR_FIFO_DEPTH_4_B_NIB 0x01 +#define DP83867_PHYCR_FIFO_DEPTH_6_B_NIB 0x02 +#define DP83867_PHYCR_FIFO_DEPTH_8_B_NIB 0x03 + +/* RGMIIDCTL internal delay for rx and tx */ +#define DP83867_RGMIIDCTL_250_PS 0x0 +#define DP83867_RGMIIDCTL_500_PS 0x1 +#define DP83867_RGMIIDCTL_750_PS 0x2 +#define DP83867_RGMIIDCTL_1_NS 0x3 +#define DP83867_RGMIIDCTL_1_25_NS 0x4 +#define DP83867_RGMIIDCTL_1_50_NS 0x5 +#define DP83867_RGMIIDCTL_1_75_NS 0x6 +#define DP83867_RGMIIDCTL_2_00_NS 0x7 +#define DP83867_RGMIIDCTL_2_25_NS 0x8 +#define DP83867_RGMIIDCTL_2_50_NS 0x9 +#define DP83867_RGMIIDCTL_2_75_NS 0xa +#define DP83867_RGMIIDCTL_3_00_NS 0xb +#define DP83867_RGMIIDCTL_3_25_NS 0xc +#define DP83867_RGMIIDCTL_3_50_NS 0xd +#define DP83867_RGMIIDCTL_3_75_NS 0xe +#define DP83867_RGMIIDCTL_4_00_NS 0xf + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h index 0ad5ef930b5d..de9c8140931a 100644 --- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h +++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h @@ -129,4 +129,47 @@ #define USB30_1_PHY_RESET 112 #define NSSFB0_RESET 113 #define NSSFB1_RESET 114 +#define UBI32_CORE1_CLKRST_CLAMP_RESET 115 +#define UBI32_CORE1_CLAMP_RESET 116 +#define UBI32_CORE1_AHB_RESET 117 +#define UBI32_CORE1_AXI_RESET 118 +#define UBI32_CORE2_CLKRST_CLAMP_RESET 119 +#define UBI32_CORE2_CLAMP_RESET 120 +#define UBI32_CORE2_AHB_RESET 121 +#define UBI32_CORE2_AXI_RESET 122 +#define GMAC_CORE1_RESET 123 +#define GMAC_CORE2_RESET 124 +#define GMAC_CORE3_RESET 125 +#define GMAC_CORE4_RESET 126 +#define GMAC_AHB_RESET 127 +#define NSS_CH0_RST_RX_CLK_N_RESET 128 +#define NSS_CH0_RST_TX_CLK_N_RESET 129 +#define NSS_CH0_RST_RX_125M_N_RESET 130 +#define NSS_CH0_HW_RST_RX_125M_N_RESET 131 +#define NSS_CH0_RST_TX_125M_N_RESET 132 +#define NSS_CH1_RST_RX_CLK_N_RESET 133 +#define NSS_CH1_RST_TX_CLK_N_RESET 134 +#define NSS_CH1_RST_RX_125M_N_RESET 135 +#define NSS_CH1_HW_RST_RX_125M_N_RESET 136 +#define NSS_CH1_RST_TX_125M_N_RESET 137 +#define NSS_CH2_RST_RX_CLK_N_RESET 138 +#define NSS_CH2_RST_TX_CLK_N_RESET 139 +#define NSS_CH2_RST_RX_125M_N_RESET 140 +#define NSS_CH2_HW_RST_RX_125M_N_RESET 141 +#define NSS_CH2_RST_TX_125M_N_RESET 142 +#define NSS_CH3_RST_RX_CLK_N_RESET 143 +#define NSS_CH3_RST_TX_CLK_N_RESET 144 +#define NSS_CH3_RST_RX_125M_N_RESET 145 +#define NSS_CH3_HW_RST_RX_125M_N_RESET 146 +#define NSS_CH3_RST_TX_125M_N_RESET 147 +#define NSS_RST_RX_250M_125M_N_RESET 148 +#define NSS_RST_TX_250M_125M_N_RESET 149 +#define NSS_QSGMII_TXPI_RST_N_RESET 150 +#define NSS_QSGMII_CDR_RST_N_RESET 151 +#define NSS_SGMII2_CDR_RST_N_RESET 152 +#define NSS_SGMII3_CDR_RST_N_RESET 153 +#define NSS_CAL_PRBS_RST_N_RESET 154 +#define NSS_LCKDT_RST_N_RESET 155 +#define NSS_SRDS_N_RESET 156 + #endif diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index aff923ae8c4b..d87d8eced064 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -116,7 +116,6 @@ __printf(3, 4) int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); -void bdi_unregister(struct backing_dev_info *bdi); int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, enum wb_reason reason); diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index e34f906647d3..2ff4a9961e1d 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -305,6 +305,15 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner); extern void bcma_driver_unregister(struct bcma_driver *drv); +/* module_bcma_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_bcma_driver(__bcma_driver) \ + module_driver(__bcma_driver, bcma_driver_register, \ + bcma_driver_unregister) + /* Set a fallback SPROM. * See kdoc at the function definition for complete documentation. */ extern int bcma_arch_register_fallback_sprom( diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8821b9a8689e..2235aee8096a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -105,7 +105,8 @@ struct bpf_verifier_ops { */ bool (*is_valid_access)(int off, int size, enum bpf_access_type type); - u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off, + u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, + int src_reg, int ctx_off, struct bpf_insn *insn); }; @@ -123,7 +124,10 @@ struct bpf_prog_aux { const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; - struct work_struct work; + union { + struct work_struct work; + struct rcu_head rcu; + }; }; struct bpf_array { @@ -153,6 +157,7 @@ void bpf_register_map_type(struct bpf_map_type_list *tl); struct bpf_prog *bpf_prog_get(u32 ufd); void bpf_prog_put(struct bpf_prog *prog); +void bpf_prog_put_rcu(struct bpf_prog *prog); struct bpf_map *bpf_map_get(struct fd f); void bpf_map_put(struct bpf_map *map); @@ -182,5 +187,6 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; +extern const struct bpf_func_proto bpf_ktime_get_ns_proto; #endif /* _LINUX_BPF_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index ae2982c0f7a6..656da2a12ffe 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -17,7 +17,7 @@ #define PHY_ID_BCM7250 0xae025280 #define PHY_ID_BCM7364 0xae025260 #define PHY_ID_BCM7366 0x600d8490 -#define PHY_ID_BCM7425 0x03625e60 +#define PHY_ID_BCM7425 0x600d86b0 #define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7439_2 0xae025080 diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 27e285b92b5f..59915ea5373c 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -151,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, return 1; } -static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) +static inline unsigned int cpumask_local_spread(unsigned int i, int node) { - set_bit(0, cpumask_bits(dstp)); - return 0; } @@ -208,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); -int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); +unsigned int cpumask_local_spread(unsigned int i, int node); /** * for_each_cpu - iterate over every cpu in a mask diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index b9ab677c0c0a..67ce5bd3b56a 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, /** * __vlan_get_tag - get the VLAN ID that is part of the payload * @skb: skbuff to query - * @vlan_tci: buffer to store vlaue + * @vlan_tci: buffer to store value * * Returns error if the skb is not of VLAN type */ @@ -435,7 +435,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) /** * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] * @skb: skbuff to query - * @vlan_tci: buffer to store vlaue + * @vlan_tci: buffer to store value * * Returns error if @skb->vlan_tci is not set correctly */ @@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, /** * vlan_get_tag - get the VLAN ID from the skb * @skb: skbuff to query - * @vlan_tci: buffer to store vlaue + * @vlan_tci: buffer to store value * * Returns error if the skb is not VLAN tagged */ @@ -628,4 +628,24 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, return features; } +/** + * compare_vlan_header - Compare two vlan headers + * @h1: Pointer to vlan header + * @h2: Pointer to vlan header + * + * Compare two vlan headers, returns 0 if equal. + * + * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. + */ +static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1, + const struct vlan_hdr *h2) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return *(u32 *)h1 ^ *(u32 *)h2; +#else + return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) | + ((__force u32)h1->h_vlan_encapsulated_proto ^ + (__force u32)h2->h_vlan_encapsulated_proto); +#endif +} #endif /* !(_LINUX_IF_VLAN_H_) */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 796ef9645827..a240e61a7700 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -115,13 +115,14 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) * Extended Capability Register */ +#define ecap_pasid(e) ((e >> 40) & 0x1) #define ecap_pss(e) ((e >> 35) & 0x1f) #define ecap_eafs(e) ((e >> 34) & 0x1) #define ecap_nwfs(e) ((e >> 33) & 0x1) #define ecap_srs(e) ((e >> 31) & 0x1) #define ecap_ers(e) ((e >> 30) & 0x1) #define ecap_prs(e) ((e >> 29) & 0x1) -#define ecap_pasid(e) ((e >> 28) & 0x1) +/* PASID support used to be on bit 28 */ #define ecap_dis(e) ((e >> 27) & 0x1) #define ecap_nest(e) ((e >> 26) & 0x1) #define ecap_mts(e) ((e >> 25) & 0x1) diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 5fc3d1083071..2b6a204bd8d4 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -166,19 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) } #if BITS_PER_LONG < 64 -extern u64 __ktime_divns(const ktime_t kt, s64 div); -static inline u64 ktime_divns(const ktime_t kt, s64 div) +extern s64 __ktime_divns(const ktime_t kt, s64 div); +static inline s64 ktime_divns(const ktime_t kt, s64 div) { + /* + * Negative divisors could cause an inf loop, + * so bug out here. + */ + BUG_ON(div < 0); if (__builtin_constant_p(div) && !(div >> 32)) { - u64 ns = kt.tv64; - do_div(ns, div); - return ns; + s64 ns = kt.tv64; + u64 tmp = ns < 0 ? -ns : ns; + + do_div(tmp, div); + return ns < 0 ? -tmp : tmp; } else { return __ktime_divns(kt, div); } } #else /* BITS_PER_LONG < 64 */ -# define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) +static inline s64 ktime_divns(const ktime_t kt, s64 div) +{ + /* + * 32-bit implementation cannot handle negative divisors, + * so catch them on 64bit as well. + */ + WARN_ON(div < 0); + return kt.tv64 / div; +} #endif static inline s64 ktime_to_us(const ktime_t kt) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 83e80ab94500..ad31e476873f 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -46,8 +46,9 @@ #define MAX_MSIX_P_PORT 17 #define MAX_MSIX 64 -#define MSIX_LEGACY_SZ 4 #define MIN_MSIX_P_PORT 5 +#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \ + (dev_cap).num_ports * MIN_MSIX_P_PORT) #define MLX4_MAX_100M_UNITS_VAL 255 /* * work around: can't set values @@ -528,7 +529,6 @@ struct mlx4_caps { int num_eqs; int reserved_eqs; int num_comp_vectors; - int comp_pool; int num_mpts; int max_fmr_maps; int num_mtts; @@ -1332,10 +1332,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_SYNC_TPT(struct mlx4_dev *dev); int mlx4_test_interrupts(struct mlx4_dev *dev); -int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, - int *vector); +u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port); +bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector); +struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port); +int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector); void mlx4_release_eq(struct mlx4_dev *dev, int vec); +int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector); int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec); int mlx4_get_phys_port_id(struct mlx4_dev *dev); diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 2695ced222df..abc4767695e4 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -169,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, struct mlx5_query_cq_mbox_out *out); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, struct mlx5_modify_cq_mbox_in *in, int in_sz); +int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, + struct mlx5_core_cq *cq, u16 cq_period, + u16 cq_max_count); int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index abf65c790421..b2c43508a737 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -35,6 +35,7 @@ #include #include +#include #if defined(__LITTLE_ENDIAN) #define MLX5_SET_HOST_ENDIANNESS 0 @@ -58,6 +59,8 @@ #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) +#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) +#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) @@ -70,6 +73,14 @@ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) +#define MLX5_SET_TO_ONES(typ, p, fld) do { \ + BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ + *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ + cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ + (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ + << __mlx5_dw_bit_off(typ, fld))); \ +} while (0) + #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ __mlx5_mask(typ, fld)) @@ -88,6 +99,12 @@ __mlx5_mask(typ, fld)) #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) +#define MLX5_GET64_PR(typ, p, fld) ({ \ + u64 ___t = MLX5_GET64(typ, p, fld); \ + pr_debug(#fld " = 0x%llx\n", ___t); \ + ___t; \ +}) + enum { MLX5_MAX_COMMANDS = 32, MLX5_CMD_DATA_BLOCK_SIZE = 512, @@ -264,6 +281,7 @@ enum { MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, MLX5_OPCODE_SEND = 0x0a, MLX5_OPCODE_SEND_IMM = 0x0b, + MLX5_OPCODE_LSO = 0x0e, MLX5_OPCODE_RDMA_READ = 0x10, MLX5_OPCODE_ATOMIC_CS = 0x11, MLX5_OPCODE_ATOMIC_FA = 0x12, @@ -312,13 +330,6 @@ enum { MLX5_CAP_OFF_CMDIF_CSUM = 46, }; -enum { - HCA_CAP_OPMOD_GET_MAX = 0, - HCA_CAP_OPMOD_GET_CUR = 1, - HCA_CAP_OPMOD_GET_ODP_MAX = 4, - HCA_CAP_OPMOD_GET_ODP_CUR = 5 -}; - struct mlx5_inbox_hdr { __be16 opcode; u8 rsvd[4]; @@ -541,6 +552,10 @@ struct mlx5_cmd_prot_block { u8 sig; }; +enum { + MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, +}; + struct mlx5_err_cqe { u8 rsvd0[32]; __be32 srqn; @@ -554,13 +569,22 @@ struct mlx5_err_cqe { }; struct mlx5_cqe64 { - u8 rsvd0[17]; + u8 rsvd0[4]; + u8 lro_tcppsh_abort_dupack; + u8 lro_min_ttl; + __be16 lro_tcp_win; + __be32 lro_ack_seq_num; + __be32 rss_hash_result; + u8 rss_hash_type; u8 ml_path; - u8 rsvd20[4]; + u8 rsvd20[2]; + __be16 check_sum; __be16 slid; __be32 flags_rqpn; - u8 rsvd28[4]; - __be32 srqn; + u8 hds_ip_ext; + u8 l4_hdr_type_etc; + __be16 vlan_info; + __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ __be32 imm_inval_pkey; u8 rsvd40[4]; __be32 byte_cnt; @@ -571,6 +595,40 @@ struct mlx5_cqe64 { u8 op_own; }; +static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) +{ + return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; +} + +static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) +{ + return (cqe->l4_hdr_type_etc >> 4) & 0x7; +} + +static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) +{ + return !!(cqe->l4_hdr_type_etc & 0x1); +} + +enum { + CQE_L4_HDR_TYPE_NONE = 0x0, + CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, + CQE_L4_HDR_TYPE_UDP = 0x2, + CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, + CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, +}; + +enum { + CQE_RSS_HTYPE_IP = 0x3 << 6, + CQE_RSS_HTYPE_L4 = 0x3 << 2, +}; + +enum { + CQE_L2_OK = 1 << 0, + CQE_L3_OK = 1 << 1, + CQE_L4_OK = 1 << 2, +}; + struct mlx5_sig_err_cqe { u8 rsvd0[16]; __be32 expected_trans_sig; @@ -996,4 +1054,135 @@ struct mlx5_destroy_psv_out { u8 rsvd[8]; }; +#define MLX5_CMD_OP_MAX 0x920 + +enum { + VPORT_STATE_DOWN = 0x0, + VPORT_STATE_UP = 0x1, +}; + +enum { + MLX5_L3_PROT_TYPE_IPV4 = 0, + MLX5_L3_PROT_TYPE_IPV6 = 1, +}; + +enum { + MLX5_L4_PROT_TYPE_TCP = 0, + MLX5_L4_PROT_TYPE_UDP = 1, +}; + +enum { + MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, + MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, + MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, + MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, + MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, +}; + +enum { + MLX5_MATCH_OUTER_HEADERS = 1 << 0, + MLX5_MATCH_MISC_PARAMETERS = 1 << 1, + MLX5_MATCH_INNER_HEADERS = 1 << 2, + +}; + +enum { + MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, + MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, +}; + +enum { + MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0, + MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1, + MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, +}; + +enum { + MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, + MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, +}; + +/* MLX5 DEV CAPs */ + +/* TODO: EAT.ME */ +enum mlx5_cap_mode { + HCA_CAP_OPMOD_GET_MAX = 0, + HCA_CAP_OPMOD_GET_CUR = 1, +}; + +enum mlx5_cap_type { + MLX5_CAP_GENERAL = 0, + MLX5_CAP_ETHERNET_OFFLOADS, + MLX5_CAP_ODP, + MLX5_CAP_ATOMIC, + MLX5_CAP_ROCE, + MLX5_CAP_IPOIB_OFFLOADS, + MLX5_CAP_EOIB_OFFLOADS, + MLX5_CAP_FLOW_TABLE, + /* NUM OF CAP Types */ + MLX5_CAP_NUM +}; + +/* GET Dev Caps macros */ +#define MLX5_CAP_GEN(mdev, cap) \ + MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) + +#define MLX5_CAP_GEN_MAX(mdev, cap) \ + MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) + +#define MLX5_CAP_ETH(mdev, cap) \ + MLX5_GET(per_protocol_networking_offload_caps,\ + mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) + +#define MLX5_CAP_ETH_MAX(mdev, cap) \ + MLX5_GET(per_protocol_networking_offload_caps,\ + mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) + +#define MLX5_CAP_ROCE(mdev, cap) \ + MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) + +#define MLX5_CAP_ROCE_MAX(mdev, cap) \ + MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) + +#define MLX5_CAP_ATOMIC(mdev, cap) \ + MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) + +#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ + MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) + +#define MLX5_CAP_FLOWTABLE(mdev, cap) \ + MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) + +#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ + MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) + +#define MLX5_CAP_ODP(mdev, cap)\ + MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) + +enum { + MLX5_CMD_STAT_OK = 0x0, + MLX5_CMD_STAT_INT_ERR = 0x1, + MLX5_CMD_STAT_BAD_OP_ERR = 0x2, + MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, + MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + MLX5_CMD_STAT_BAD_RES_ERR = 0x5, + MLX5_CMD_STAT_RES_BUSY = 0x6, + MLX5_CMD_STAT_LIM_ERR = 0x8, + MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + MLX5_CMD_STAT_IX_ERR = 0xa, + MLX5_CMD_STAT_NO_RES_ERR = 0xf, + MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, + MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, + MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +}; + +static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) +{ + if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) + return 0; + return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; +} + #endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 9a90e7523dc2..6093bde16b94 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -44,7 +44,6 @@ #include #include -#include enum { MLX5_BOARD_ID_LEN = 64, @@ -85,7 +84,7 @@ enum { }; enum { - MLX5_MAX_EQ_NAME = 32 + MLX5_MAX_IRQ_NAME = 32 }; enum { @@ -108,6 +107,7 @@ enum { MLX5_REG_PUDE = 0x5009, MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, + MLX5_REG_PVLC = 0x500f, MLX5_REG_PMLP = 0, /* TBD */ MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, @@ -150,6 +150,11 @@ enum mlx5_dev_event { MLX5_DEV_EVENT_CLIENT_REREG, }; +enum mlx5_port_status { + MLX5_PORT_UP = 1 << 1, + MLX5_PORT_DOWN = 1 << 2, +}; + struct mlx5_uuar_info { struct mlx5_uar *uars; int num_uars; @@ -269,56 +274,7 @@ struct mlx5_cmd { struct mlx5_port_caps { int gid_table_len; int pkey_table_len; -}; - -struct mlx5_general_caps { - u8 log_max_eq; - u8 log_max_cq; - u8 log_max_qp; - u8 log_max_mkey; - u8 log_max_pd; - u8 log_max_srq; - u8 log_max_strq; - u8 log_max_mrw_sz; - u8 log_max_bsf_list_size; - u8 log_max_klm_list_size; - u32 max_cqes; - int max_wqes; - u32 max_eqes; - u32 max_indirection; - int max_sq_desc_sz; - int max_rq_desc_sz; - int max_dc_sq_desc_sz; - u64 flags; - u16 stat_rate_support; - int log_max_msg; - int num_ports; - u8 log_max_ra_res_qp; - u8 log_max_ra_req_qp; - int max_srq_wqes; - int bf_reg_size; - int bf_regs_per_page; - struct mlx5_port_caps port[MLX5_MAX_PORTS]; - u8 ext_port_cap[MLX5_MAX_PORTS]; - int max_vf; - u32 reserved_lkey; - u8 local_ca_ack_delay; - u8 log_max_mcg; - u32 max_qp_mcg; - int min_page_sz; - int pd_cap; - u32 max_qp_counters; - u32 pkey_table_size; - u8 log_max_ra_req_dc; - u8 log_max_ra_res_dc; - u32 uar_sz; - u8 min_log_pg_sz; - u8 log_max_xrcd; - u16 log_uar_page_sz; -}; - -struct mlx5_caps { - struct mlx5_general_caps gen; + u8 ext_port_cap; }; struct mlx5_cmd_mailbox { @@ -334,8 +290,6 @@ struct mlx5_buf_list { struct mlx5_buf { struct mlx5_buf_list direct; - struct mlx5_buf_list *page_list; - int nbufs; int npages; int size; u8 page_shift; @@ -351,7 +305,6 @@ struct mlx5_eq { u8 eqn; int nent; u64 mask; - char name[MLX5_MAX_EQ_NAME]; struct list_head list; int index; struct mlx5_rsc_debug *dbg; @@ -387,6 +340,8 @@ struct mlx5_core_mr { enum mlx5_res_type { MLX5_RES_QP, + MLX5_RES_SRQ, + MLX5_RES_XSRQ, }; struct mlx5_core_rsc_common { @@ -396,6 +351,7 @@ struct mlx5_core_rsc_common { }; struct mlx5_core_srq { + struct mlx5_core_rsc_common common; /* must be first */ u32 srqn; int max; int max_gs; @@ -414,7 +370,6 @@ struct mlx5_eq_table { struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; - struct msix_entry *msix_arr; int num_comp_vectors; /* protect EQs list */ @@ -467,9 +422,16 @@ struct mlx5_mr_table { struct radix_tree_root tree; }; +struct mlx5_irq_info { + cpumask_var_t mask; + char name[MLX5_MAX_IRQ_NAME]; +}; + struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; + struct msix_entry *msix_arr; + struct mlx5_irq_info *irq_info; struct mlx5_uuar_info uuari; MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); @@ -520,7 +482,9 @@ struct mlx5_core_dev { u8 rev_id; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; - struct mlx5_caps caps; + struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; + u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; void (*event) (struct mlx5_core_dev *dev, @@ -529,6 +493,7 @@ struct mlx5_core_dev { struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; + u32 issi; }; struct mlx5_db { @@ -549,6 +514,11 @@ enum { MLX5_COMP_EQ_SIZE = 1024, }; +enum { + MLX5_PTYS_IB = 1 << 0, + MLX5_PTYS_EN = 1 << 2, +}; + struct mlx5_db_pgdir { struct list_head list; DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); @@ -584,13 +554,44 @@ struct mlx5_pas { u8 log_sz; }; +enum port_state_policy { + MLX5_AAA_000 +}; + +enum phy_port_state { + MLX5_AAA_111 +}; + +struct mlx5_hca_vport_context { + u32 field_select; + bool sm_virt_aware; + bool has_smi; + bool has_raw; + enum port_state_policy policy; + enum phy_port_state phys_state; + enum ib_port_state vport_state; + u8 port_physical_state; + u64 sys_image_guid; + u64 port_guid; + u64 node_guid; + u32 cap_mask1; + u32 cap_mask1_perm; + u32 cap_mask2; + u32 cap_mask2_perm; + u16 lid; + u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ + u8 lmc; + u8 subnet_timeout; + u16 sm_lid; + u8 sm_sl; + u16 qkey_violation_counter; + u16 pkey_violation_counter; + bool grh_required; +}; + static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) { - if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) return buf->direct.buf + offset; - else - return buf->page_list[offset >> PAGE_SHIFT].buf + - (offset & (PAGE_SIZE - 1)); } extern struct workqueue_struct *mlx5_core_wq; @@ -654,8 +655,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); int mlx5_cmd_status_to_err_v2(void *ptr); -int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, - u16 opmod); +int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, + enum mlx5_cap_mode cap_mode); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, @@ -665,19 +666,21 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); +void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(void); void __init mlx5_health_init(void); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, - struct mlx5_buf *buf); +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen); + struct mlx5_create_srq_mbox_in *in, int inlen, + int is_xrc); int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_query_srq_mbox_out *out); @@ -734,7 +737,32 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); +int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, + int ptys_size, int proto_mask, u8 local_port); +int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, + u32 *proto_cap, int proto_mask); +int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, + u32 *proto_admin, int proto_mask); +int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, + u8 *link_width_oper, u8 local_port); +int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, int proto_mask, + u8 local_port); +int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, + int proto_mask); +int mlx5_set_port_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status); +int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); + +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu); +int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, + u8 local_port); +int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, + u8 local_port); +int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, + u8 *vl_hw_cap, u8 local_port); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); @@ -799,6 +827,7 @@ struct mlx5_interface { void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); +int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); struct mlx5_profile { u64 mask; @@ -809,4 +838,14 @@ struct mlx5_profile { } mr_cache[MAX_MR_CACHE_ENTRIES]; }; +static inline int mlx5_get_gid_table_len(u16 param) +{ + if (param > 4) { + pr_warn("gid table length is zero\n"); + return 0; + } + + return 8 * (1 << param); +} + #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h new file mode 100644 index 000000000000..5f922c6d4fc2 --- /dev/null +++ b/include/linux/mlx5/flow_table.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_FLOW_TABLE_H +#define MLX5_FLOW_TABLE_H + +#include + +struct mlx5_flow_table_group { + u8 log_sz; + u8 match_criteria_enable; + u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; +}; + +void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, + u16 num_groups, + struct mlx5_flow_table_group *group); +void mlx5_destroy_flow_table(void *flow_table); +int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable, + void *match_criteria, void *flow_context, + u32 *flow_index); +void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index); +u32 mlx5_get_flow_table_id(void *flow_table); + +#endif /* MLX5_FLOW_TABLE_H */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index cb3ad17edd1f..6d2f6fee041c 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -28,11 +28,44 @@ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. - */ - +*/ #ifndef MLX5_IFC_H #define MLX5_IFC_H +enum { + MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0, + MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1, + MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2, + MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3, + MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13, + MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14, + MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c, + MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d, + MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4, + MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5, + MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7, + MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc, + MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10, + MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11, + MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12, + MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8, + MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9, + MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15, + MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19, + MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a, + MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b, + MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f, + MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa, + MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb +}; + +enum { + MLX5_MODIFY_TIR_BITMASK_LRO = 0x0, + MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1, + MLX5_MODIFY_TIR_BITMASK_HASH = 0x2, + MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3 +}; + enum { MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, MLX5_CMD_OP_QUERY_ADAPTER = 0x101, @@ -43,6 +76,8 @@ enum { MLX5_CMD_OP_QUERY_PAGES = 0x107, MLX5_CMD_OP_MANAGE_PAGES = 0x108, MLX5_CMD_OP_SET_HCA_CAP = 0x109, + MLX5_CMD_OP_QUERY_ISSI = 0x10a, + MLX5_CMD_OP_SET_ISSI = 0x10b, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, @@ -66,6 +101,7 @@ enum { MLX5_CMD_OP_2ERR_QP = 0x507, MLX5_CMD_OP_2RST_QP = 0x50a, MLX5_CMD_OP_QUERY_QP = 0x50b, + MLX5_CMD_OP_SQD_RTS_QP = 0x50c, MLX5_CMD_OP_INIT2INIT_QP = 0x50e, MLX5_CMD_OP_CREATE_PSV = 0x600, MLX5_CMD_OP_DESTROY_PSV = 0x601, @@ -73,7 +109,10 @@ enum { MLX5_CMD_OP_DESTROY_SRQ = 0x701, MLX5_CMD_OP_QUERY_SRQ = 0x702, MLX5_CMD_OP_ARM_RQ = 0x703, - MLX5_CMD_OP_RESIZE_SRQ = 0x704, + MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705, + MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706, + MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707, + MLX5_CMD_OP_ARM_XRC_SRQ = 0x708, MLX5_CMD_OP_CREATE_DCT = 0x710, MLX5_CMD_OP_DESTROY_DCT = 0x711, MLX5_CMD_OP_DRAIN_DCT = 0x712, @@ -85,8 +124,12 @@ enum { MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, - MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760, + MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760, MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, + MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762, + MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763, + MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, + MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, @@ -98,7 +141,7 @@ enum { MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, MLX5_CMD_OP_ACCESS_REG = 0x805, MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, - MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, + MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807, MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, MLX5_CMD_OP_MAD_IFC = 0x50d, MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, @@ -106,23 +149,22 @@ enum { MLX5_CMD_OP_NOP = 0x80d, MLX5_CMD_OP_ALLOC_XRCD = 0x80e, MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, - MLX5_CMD_OP_SET_BURST_SIZE = 0x812, - MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813, - MLX5_CMD_OP_ACTIVATE_TRACER = 0x814, - MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815, - MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820, - MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821, - MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822, - MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823, - MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824, + MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816, + MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817, + MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822, + MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823, + MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824, + MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825, + MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826, + MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827, + MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828, + MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, + MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, + MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, MLX5_CMD_OP_CREATE_TIR = 0x900, MLX5_CMD_OP_MODIFY_TIR = 0x901, MLX5_CMD_OP_DESTROY_TIR = 0x902, MLX5_CMD_OP_QUERY_TIR = 0x903, - MLX5_CMD_OP_CREATE_TIS = 0x912, - MLX5_CMD_OP_MODIFY_TIS = 0x913, - MLX5_CMD_OP_DESTROY_TIS = 0x914, - MLX5_CMD_OP_QUERY_TIS = 0x915, MLX5_CMD_OP_CREATE_SQ = 0x904, MLX5_CMD_OP_MODIFY_SQ = 0x905, MLX5_CMD_OP_DESTROY_SQ = 0x906, @@ -135,9 +177,430 @@ enum { MLX5_CMD_OP_MODIFY_RMP = 0x90d, MLX5_CMD_OP_DESTROY_RMP = 0x90e, MLX5_CMD_OP_QUERY_RMP = 0x90f, - MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910, - MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911, - MLX5_CMD_OP_MAX = 0x911 + MLX5_CMD_OP_CREATE_TIS = 0x912, + MLX5_CMD_OP_MODIFY_TIS = 0x913, + MLX5_CMD_OP_DESTROY_TIS = 0x914, + MLX5_CMD_OP_QUERY_TIS = 0x915, + MLX5_CMD_OP_CREATE_RQT = 0x916, + MLX5_CMD_OP_MODIFY_RQT = 0x917, + MLX5_CMD_OP_DESTROY_RQT = 0x918, + MLX5_CMD_OP_QUERY_RQT = 0x919, + MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930, + MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931, + MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932, + MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933, + MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934, + MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935, + MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, + MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, + MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938 +}; + +struct mlx5_ifc_flow_table_fields_supported_bits { + u8 outer_dmac[0x1]; + u8 outer_smac[0x1]; + u8 outer_ether_type[0x1]; + u8 reserved_0[0x1]; + u8 outer_first_prio[0x1]; + u8 outer_first_cfi[0x1]; + u8 outer_first_vid[0x1]; + u8 reserved_1[0x1]; + u8 outer_second_prio[0x1]; + u8 outer_second_cfi[0x1]; + u8 outer_second_vid[0x1]; + u8 reserved_2[0x1]; + u8 outer_sip[0x1]; + u8 outer_dip[0x1]; + u8 outer_frag[0x1]; + u8 outer_ip_protocol[0x1]; + u8 outer_ip_ecn[0x1]; + u8 outer_ip_dscp[0x1]; + u8 outer_udp_sport[0x1]; + u8 outer_udp_dport[0x1]; + u8 outer_tcp_sport[0x1]; + u8 outer_tcp_dport[0x1]; + u8 outer_tcp_flags[0x1]; + u8 outer_gre_protocol[0x1]; + u8 outer_gre_key[0x1]; + u8 outer_vxlan_vni[0x1]; + u8 reserved_3[0x5]; + u8 source_eswitch_port[0x1]; + + u8 inner_dmac[0x1]; + u8 inner_smac[0x1]; + u8 inner_ether_type[0x1]; + u8 reserved_4[0x1]; + u8 inner_first_prio[0x1]; + u8 inner_first_cfi[0x1]; + u8 inner_first_vid[0x1]; + u8 reserved_5[0x1]; + u8 inner_second_prio[0x1]; + u8 inner_second_cfi[0x1]; + u8 inner_second_vid[0x1]; + u8 reserved_6[0x1]; + u8 inner_sip[0x1]; + u8 inner_dip[0x1]; + u8 inner_frag[0x1]; + u8 inner_ip_protocol[0x1]; + u8 inner_ip_ecn[0x1]; + u8 inner_ip_dscp[0x1]; + u8 inner_udp_sport[0x1]; + u8 inner_udp_dport[0x1]; + u8 inner_tcp_sport[0x1]; + u8 inner_tcp_dport[0x1]; + u8 inner_tcp_flags[0x1]; + u8 reserved_7[0x9]; + + u8 reserved_8[0x40]; +}; + +struct mlx5_ifc_flow_table_prop_layout_bits { + u8 ft_support[0x1]; + u8 reserved_0[0x1f]; + + u8 reserved_1[0x2]; + u8 log_max_ft_size[0x6]; + u8 reserved_2[0x10]; + u8 max_ft_level[0x8]; + + u8 reserved_3[0x20]; + + u8 reserved_4[0x18]; + u8 log_max_ft_num[0x8]; + + u8 reserved_5[0x18]; + u8 log_max_destination[0x8]; + + u8 reserved_6[0x18]; + u8 log_max_flow[0x8]; + + u8 reserved_7[0x40]; + + struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; + + struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support; +}; + +struct mlx5_ifc_odp_per_transport_service_cap_bits { + u8 send[0x1]; + u8 receive[0x1]; + u8 write[0x1]; + u8 read[0x1]; + u8 reserved_0[0x1]; + u8 srq_receive[0x1]; + u8 reserved_1[0x1a]; +}; + +struct mlx5_ifc_fte_match_set_lyr_2_4_bits { + u8 smac_47_16[0x20]; + + u8 smac_15_0[0x10]; + u8 ethertype[0x10]; + + u8 dmac_47_16[0x20]; + + u8 dmac_15_0[0x10]; + u8 first_prio[0x3]; + u8 first_cfi[0x1]; + u8 first_vid[0xc]; + + u8 ip_protocol[0x8]; + u8 ip_dscp[0x6]; + u8 ip_ecn[0x2]; + u8 vlan_tag[0x1]; + u8 reserved_0[0x1]; + u8 frag[0x1]; + u8 reserved_1[0x4]; + u8 tcp_flags[0x9]; + + u8 tcp_sport[0x10]; + u8 tcp_dport[0x10]; + + u8 reserved_2[0x20]; + + u8 udp_sport[0x10]; + u8 udp_dport[0x10]; + + u8 src_ip[4][0x20]; + + u8 dst_ip[4][0x20]; +}; + +struct mlx5_ifc_fte_match_set_misc_bits { + u8 reserved_0[0x20]; + + u8 reserved_1[0x10]; + u8 source_port[0x10]; + + u8 outer_second_prio[0x3]; + u8 outer_second_cfi[0x1]; + u8 outer_second_vid[0xc]; + u8 inner_second_prio[0x3]; + u8 inner_second_cfi[0x1]; + u8 inner_second_vid[0xc]; + + u8 outer_second_vlan_tag[0x1]; + u8 inner_second_vlan_tag[0x1]; + u8 reserved_2[0xe]; + u8 gre_protocol[0x10]; + + u8 gre_key_h[0x18]; + u8 gre_key_l[0x8]; + + u8 vxlan_vni[0x18]; + u8 reserved_3[0x8]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0xc]; + u8 outer_ipv6_flow_label[0x14]; + + u8 reserved_6[0xc]; + u8 inner_ipv6_flow_label[0x14]; + + u8 reserved_7[0xe0]; +}; + +struct mlx5_ifc_cmd_pas_bits { + u8 pa_h[0x20]; + + u8 pa_l[0x14]; + u8 reserved_0[0xc]; +}; + +struct mlx5_ifc_uint64_bits { + u8 hi[0x20]; + + u8 lo[0x20]; +}; + +enum { + MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0, + MLX5_ADS_STAT_RATE_2_5GBPS = 0x7, + MLX5_ADS_STAT_RATE_10GBPS = 0x8, + MLX5_ADS_STAT_RATE_30GBPS = 0x9, + MLX5_ADS_STAT_RATE_5GBPS = 0xa, + MLX5_ADS_STAT_RATE_20GBPS = 0xb, + MLX5_ADS_STAT_RATE_40GBPS = 0xc, + MLX5_ADS_STAT_RATE_60GBPS = 0xd, + MLX5_ADS_STAT_RATE_80GBPS = 0xe, + MLX5_ADS_STAT_RATE_120GBPS = 0xf, +}; + +struct mlx5_ifc_ads_bits { + u8 fl[0x1]; + u8 free_ar[0x1]; + u8 reserved_0[0xe]; + u8 pkey_index[0x10]; + + u8 reserved_1[0x8]; + u8 grh[0x1]; + u8 mlid[0x7]; + u8 rlid[0x10]; + + u8 ack_timeout[0x5]; + u8 reserved_2[0x3]; + u8 src_addr_index[0x8]; + u8 reserved_3[0x4]; + u8 stat_rate[0x4]; + u8 hop_limit[0x8]; + + u8 reserved_4[0x4]; + u8 tclass[0x8]; + u8 flow_label[0x14]; + + u8 rgid_rip[16][0x8]; + + u8 reserved_5[0x4]; + u8 f_dscp[0x1]; + u8 f_ecn[0x1]; + u8 reserved_6[0x1]; + u8 f_eth_prio[0x1]; + u8 ecn[0x2]; + u8 dscp[0x6]; + u8 udp_sport[0x10]; + + u8 dei_cfi[0x1]; + u8 eth_prio[0x3]; + u8 sl[0x4]; + u8 port[0x8]; + u8 rmac_47_32[0x10]; + + u8 rmac_31_0[0x20]; +}; + +struct mlx5_ifc_flow_table_nic_cap_bits { + u8 reserved_0[0x200]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; + + u8 reserved_1[0x200]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; + + u8 reserved_2[0x200]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; + + u8 reserved_3[0x7200]; +}; + +struct mlx5_ifc_per_protocol_networking_offload_caps_bits { + u8 csum_cap[0x1]; + u8 vlan_cap[0x1]; + u8 lro_cap[0x1]; + u8 lro_psh_flag[0x1]; + u8 lro_time_stamp[0x1]; + u8 reserved_0[0x6]; + u8 max_lso_cap[0x5]; + u8 reserved_1[0x4]; + u8 rss_ind_tbl_cap[0x4]; + u8 reserved_2[0x3]; + u8 tunnel_lso_const_out_ip_id[0x1]; + u8 reserved_3[0x2]; + u8 tunnel_statless_gre[0x1]; + u8 tunnel_stateless_vxlan[0x1]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0x10]; + u8 lro_min_mss_size[0x10]; + + u8 reserved_6[0x120]; + + u8 lro_timer_supported_periods[4][0x20]; + + u8 reserved_7[0x600]; +}; + +struct mlx5_ifc_roce_cap_bits { + u8 roce_apm[0x1]; + u8 reserved_0[0x1f]; + + u8 reserved_1[0x60]; + + u8 reserved_2[0xc]; + u8 l3_type[0x4]; + u8 reserved_3[0x8]; + u8 roce_version[0x8]; + + u8 reserved_4[0x10]; + u8 r_roce_dest_udp_port[0x10]; + + u8 r_roce_max_src_udp_port[0x10]; + u8 r_roce_min_src_udp_port[0x10]; + + u8 reserved_5[0x10]; + u8 roce_address_table_size[0x10]; + + u8 reserved_6[0x700]; +}; + +enum { + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100, +}; + +enum { + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100, +}; + +struct mlx5_ifc_atomic_caps_bits { + u8 reserved_0[0x40]; + + u8 atomic_req_endianness[0x1]; + u8 reserved_1[0x1f]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x10]; + u8 atomic_operations[0x10]; + + u8 reserved_4[0x10]; + u8 atomic_size_qp[0x10]; + + u8 reserved_5[0x10]; + u8 atomic_size_dc[0x10]; + + u8 reserved_6[0x720]; +}; + +struct mlx5_ifc_odp_cap_bits { + u8 reserved_0[0x40]; + + u8 sig[0x1]; + u8 reserved_1[0x1f]; + + u8 reserved_2[0x20]; + + struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; + + struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps; + + struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; + + u8 reserved_3[0x720]; +}; + +enum { + MLX5_WQ_TYPE_LINKED_LIST = 0x0, + MLX5_WQ_TYPE_CYCLIC = 0x1, + MLX5_WQ_TYPE_STRQ = 0x2, +}; + +enum { + MLX5_WQ_END_PAD_MODE_NONE = 0x0, + MLX5_WQ_END_PAD_MODE_ALIGN = 0x1, +}; + +enum { + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4, +}; + +enum { + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5, +}; + +enum { + MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0, + MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1, +}; + +enum { + MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0, + MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1, + MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3, +}; + +enum { + MLX5_CAP_PORT_TYPE_IB = 0x0, + MLX5_CAP_PORT_TYPE_ETH = 0x1, }; struct mlx5_ifc_cmd_hca_cap_bits { @@ -148,9 +611,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_1[0xb]; u8 log_max_qp[0x5]; - u8 log_max_strq_sz[0x8]; - u8 reserved_2[0x3]; - u8 log_max_srqs[0x5]; + u8 reserved_2[0xb]; + u8 log_max_srq[0x5]; u8 reserved_3[0x10]; u8 reserved_4[0x8]; @@ -185,165 +647,6137 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 pad_cap[0x1]; u8 cc_query_allowed[0x1]; u8 cc_modify_allowed[0x1]; - u8 reserved_15[0x1d]; + u8 reserved_15[0xd]; + u8 gid_table_size[0x10]; - u8 reserved_16[0x6]; + u8 out_of_seq_cnt[0x1]; + u8 vport_counters[0x1]; + u8 reserved_16[0x4]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; - u8 eswitch_owner[0x1]; - u8 reserved_17[0xa]; + u8 vport_group_manager[0x1]; + u8 vhca_group_manager[0x1]; + u8 ib_virt[0x1]; + u8 eth_virt[0x1]; + u8 reserved_17[0x1]; + u8 ets[0x1]; + u8 nic_flow_table[0x1]; + u8 reserved_18[0x4]; u8 local_ca_ack_delay[0x5]; - u8 reserved_18[0x8]; + u8 reserved_19[0x6]; + u8 port_type[0x2]; u8 num_ports[0x8]; - u8 reserved_19[0x3]; + u8 reserved_20[0x3]; u8 log_max_msg[0x5]; - u8 reserved_20[0x18]; + u8 reserved_21[0x18]; u8 stat_rate_support[0x10]; - u8 reserved_21[0x10]; + u8 reserved_22[0xc]; + u8 cqe_version[0x4]; - u8 reserved_22[0x10]; + u8 compact_address_vector[0x1]; + u8 reserved_23[0xe]; + u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; - u8 reserved_23[0x1]; + u8 reserved_24[0x1]; u8 wq_signature[0x1]; u8 sctr_data_cqe[0x1]; - u8 reserved_24[0x1]; + u8 reserved_25[0x1]; u8 sho[0x1]; u8 tph[0x1]; u8 rf[0x1]; - u8 dc[0x1]; - u8 reserved_25[0x2]; + u8 dct[0x1]; + u8 reserved_26[0x1]; + u8 eth_net_offloads[0x1]; u8 roce[0x1]; u8 atomic[0x1]; - u8 rsz_srq[0x1]; + u8 reserved_27[0x1]; u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; - u8 sniffer_rule_flow[0x1]; - u8 sniffer_rule_vport[0x1]; - u8 sniffer_rule_phy[0x1]; - u8 reserved_26[0x1]; + u8 reserved_28[0x3]; + u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; - u8 reserved_27[0x3]; + u8 reserved_29[0x1]; + u8 scqe_break_moderation[0x1]; + u8 reserved_30[0x1]; u8 cd[0x1]; - u8 reserved_28[0x1]; + u8 reserved_31[0x1]; u8 apm[0x1]; - u8 reserved_29[0x7]; + u8 reserved_32[0x7]; u8 qkv[0x1]; u8 pkv[0x1]; - u8 reserved_30[0x4]; + u8 reserved_33[0x4]; u8 xrc[0x1]; u8 ud[0x1]; u8 uc[0x1]; u8 rc[0x1]; - u8 reserved_31[0xa]; + u8 reserved_34[0xa]; u8 uar_sz[0x6]; - u8 reserved_32[0x8]; + u8 reserved_35[0x8]; u8 log_pg_sz[0x8]; u8 bf[0x1]; - u8 reserved_33[0xa]; + u8 reserved_36[0x1]; + u8 pad_tx_eth_packet[0x1]; + u8 reserved_37[0x8]; u8 log_bf_reg_size[0x5]; - u8 reserved_34[0x10]; + u8 reserved_38[0x10]; - u8 reserved_35[0x10]; + u8 reserved_39[0x10]; u8 max_wqe_sz_sq[0x10]; - u8 reserved_36[0x10]; + u8 reserved_40[0x10]; u8 max_wqe_sz_rq[0x10]; - u8 reserved_37[0x10]; + u8 reserved_41[0x10]; u8 max_wqe_sz_sq_dc[0x10]; - u8 reserved_38[0x7]; + u8 reserved_42[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_39[0x18]; + u8 reserved_43[0x18]; u8 log_max_mcg[0x8]; - u8 reserved_40[0xb]; + u8 reserved_44[0x3]; + u8 log_max_transport_domain[0x5]; + u8 reserved_45[0x3]; u8 log_max_pd[0x5]; - u8 reserved_41[0xb]; + u8 reserved_46[0xb]; u8 log_max_xrcd[0x5]; - u8 reserved_42[0x20]; + u8 reserved_47[0x20]; - u8 reserved_43[0x3]; + u8 reserved_48[0x3]; u8 log_max_rq[0x5]; - u8 reserved_44[0x3]; + u8 reserved_49[0x3]; u8 log_max_sq[0x5]; - u8 reserved_45[0x3]; + u8 reserved_50[0x3]; u8 log_max_tir[0x5]; - u8 reserved_46[0x3]; + u8 reserved_51[0x3]; u8 log_max_tis[0x5]; - u8 reserved_47[0x13]; - u8 log_max_rq_per_tir[0x5]; - u8 reserved_48[0x3]; + u8 basic_cyclic_rcv_wqe[0x1]; + u8 reserved_52[0x2]; + u8 log_max_rmp[0x5]; + u8 reserved_53[0x3]; + u8 log_max_rqt[0x5]; + u8 reserved_54[0x3]; + u8 log_max_rqt_size[0x5]; + u8 reserved_55[0x3]; u8 log_max_tis_per_sq[0x5]; - u8 reserved_49[0xe0]; + u8 reserved_56[0x3]; + u8 log_max_stride_sz_rq[0x5]; + u8 reserved_57[0x3]; + u8 log_min_stride_sz_rq[0x5]; + u8 reserved_58[0x3]; + u8 log_max_stride_sz_sq[0x5]; + u8 reserved_59[0x3]; + u8 log_min_stride_sz_sq[0x5]; + + u8 reserved_60[0x1b]; + u8 log_max_wq_sz[0x5]; + + u8 reserved_61[0xa0]; - u8 reserved_50[0x10]; + u8 reserved_62[0x3]; + u8 log_max_l2_table[0x5]; + u8 reserved_63[0x8]; u8 log_uar_page_sz[0x10]; - u8 reserved_51[0x100]; + u8 reserved_64[0x100]; - u8 reserved_52[0x1f]; + u8 reserved_65[0x1f]; u8 cqe_zip[0x1]; u8 cqe_zip_timeout[0x10]; u8 cqe_zip_max_num[0x10]; - u8 reserved_53[0x220]; + u8 reserved_66[0x220]; }; -struct mlx5_ifc_set_hca_cap_in_bits { - u8 opcode[0x10]; - u8 reserved_0[0x10]; +enum { + MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1, + MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2, +}; - u8 reserved_1[0x10]; - u8 op_mod[0x10]; +struct mlx5_ifc_dest_format_struct_bits { + u8 destination_type[0x8]; + u8 destination_id[0x18]; - u8 reserved_2[0x40]; + u8 reserved_0[0x20]; +}; + +struct mlx5_ifc_fte_match_param_bits { + struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; + + struct mlx5_ifc_fte_match_set_misc_bits misc_parameters; + + struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; - struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct; + u8 reserved_0[0xa00]; }; -struct mlx5_ifc_query_hca_cap_in_bits { - u8 opcode[0x10]; - u8 reserved_0[0x10]; +enum { + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4, +}; - u8 reserved_1[0x10]; - u8 op_mod[0x10]; +struct mlx5_ifc_rx_hash_field_select_bits { + u8 l3_prot_type[0x1]; + u8 l4_prot_type[0x1]; + u8 selected_fields[0x1e]; +}; - u8 reserved_2[0x40]; +enum { + MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0, + MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1, }; -struct mlx5_ifc_query_hca_cap_out_bits { - u8 status[0x8]; +enum { + MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0, + MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1, +}; + +struct mlx5_ifc_wq_bits { + u8 wq_type[0x4]; + u8 wq_signature[0x1]; + u8 end_padding_mode[0x2]; + u8 cd_slave[0x1]; u8 reserved_0[0x18]; - u8 syndrome[0x20]; + u8 hds_skip_first_sge[0x1]; + u8 log2_hds_buf_size[0x3]; + u8 reserved_1[0x7]; + u8 page_offset[0x5]; + u8 lwm[0x10]; - u8 reserved_1[0x40]; + u8 reserved_2[0x8]; + u8 pd[0x18]; + + u8 reserved_3[0x8]; + u8 uar_page[0x18]; + + u8 dbr_addr[0x40]; + + u8 hw_counter[0x20]; + + u8 sw_counter[0x20]; + + u8 reserved_4[0xc]; + u8 log_wq_stride[0x4]; + u8 reserved_5[0x3]; + u8 log_wq_pg_sz[0x5]; + u8 reserved_6[0x3]; + u8 log_wq_sz[0x5]; + + u8 reserved_7[0x4e0]; - u8 capability_struct[256][0x8]; + struct mlx5_ifc_cmd_pas_bits pas[0]; }; -struct mlx5_ifc_set_hca_cap_out_bits { - u8 status[0x8]; - u8 reserved_0[0x18]; +struct mlx5_ifc_rq_num_bits { + u8 reserved_0[0x8]; + u8 rq_num[0x18]; +}; - u8 syndrome[0x20]; +struct mlx5_ifc_mac_address_layout_bits { + u8 reserved_0[0x10]; + u8 mac_addr_47_32[0x10]; - u8 reserved_1[0x40]; + u8 mac_addr_31_0[0x20]; +}; + +struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { + u8 reserved_0[0xa0]; + + u8 min_time_between_cnps[0x20]; + + u8 reserved_1[0x12]; + u8 cnp_dscp[0x6]; + u8 reserved_2[0x5]; + u8 cnp_802p_prio[0x3]; + + u8 reserved_3[0x720]; +}; + +struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { + u8 reserved_0[0x60]; + + u8 reserved_1[0x4]; + u8 clamp_tgt_rate[0x1]; + u8 reserved_2[0x3]; + u8 clamp_tgt_rate_after_time_inc[0x1]; + u8 reserved_3[0x17]; + + u8 reserved_4[0x20]; + + u8 rpg_time_reset[0x20]; + + u8 rpg_byte_reset[0x20]; + + u8 rpg_threshold[0x20]; + + u8 rpg_max_rate[0x20]; + + u8 rpg_ai_rate[0x20]; + + u8 rpg_hai_rate[0x20]; + + u8 rpg_gd[0x20]; + + u8 rpg_min_dec_fac[0x20]; + + u8 rpg_min_rate[0x20]; + + u8 reserved_5[0xe0]; + + u8 rate_to_set_on_first_cnp[0x20]; + + u8 dce_tcp_g[0x20]; + + u8 dce_tcp_rtt[0x20]; + + u8 rate_reduce_monitor_period[0x20]; + + u8 reserved_6[0x20]; + + u8 initial_alpha_value[0x20]; + + u8 reserved_7[0x4a0]; +}; + +struct mlx5_ifc_cong_control_802_1qau_rp_bits { + u8 reserved_0[0x80]; + + u8 rppp_max_rps[0x20]; + + u8 rpg_time_reset[0x20]; + + u8 rpg_byte_reset[0x20]; + + u8 rpg_threshold[0x20]; + + u8 rpg_max_rate[0x20]; + + u8 rpg_ai_rate[0x20]; + + u8 rpg_hai_rate[0x20]; + + u8 rpg_gd[0x20]; + + u8 rpg_min_dec_fac[0x20]; + + u8 rpg_min_rate[0x20]; + + u8 reserved_1[0x640]; +}; + +enum { + MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1, + MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2, + MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4, +}; + +struct mlx5_ifc_resize_field_select_bits { + u8 resize_field_select[0x20]; +}; + +enum { + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1, + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2, + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4, + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8, +}; + +struct mlx5_ifc_modify_field_select_bits { + u8 modify_field_select[0x20]; +}; + +struct mlx5_ifc_field_select_r_roce_np_bits { + u8 field_select_r_roce_np[0x20]; +}; + +struct mlx5_ifc_field_select_r_roce_rp_bits { + u8 field_select_r_roce_rp[0x20]; +}; + +enum { + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800, +}; + +struct mlx5_ifc_field_select_802_1qau_rp_bits { + u8 field_select_8021qaurp[0x20]; +}; + +struct mlx5_ifc_phys_layer_cntrs_bits { + u8 time_since_last_clear_high[0x20]; + + u8 time_since_last_clear_low[0x20]; + + u8 symbol_errors_high[0x20]; + + u8 symbol_errors_low[0x20]; + + u8 sync_headers_errors_high[0x20]; + + u8 sync_headers_errors_low[0x20]; + + u8 edpl_bip_errors_lane0_high[0x20]; + + u8 edpl_bip_errors_lane0_low[0x20]; + + u8 edpl_bip_errors_lane1_high[0x20]; + + u8 edpl_bip_errors_lane1_low[0x20]; + + u8 edpl_bip_errors_lane2_high[0x20]; + + u8 edpl_bip_errors_lane2_low[0x20]; + + u8 edpl_bip_errors_lane3_high[0x20]; + + u8 edpl_bip_errors_lane3_low[0x20]; + + u8 fc_fec_corrected_blocks_lane0_high[0x20]; + + u8 fc_fec_corrected_blocks_lane0_low[0x20]; + + u8 fc_fec_corrected_blocks_lane1_high[0x20]; + + u8 fc_fec_corrected_blocks_lane1_low[0x20]; + + u8 fc_fec_corrected_blocks_lane2_high[0x20]; + + u8 fc_fec_corrected_blocks_lane2_low[0x20]; + + u8 fc_fec_corrected_blocks_lane3_high[0x20]; + + u8 fc_fec_corrected_blocks_lane3_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane0_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane0_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane1_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane1_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane2_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane2_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane3_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane3_low[0x20]; + + u8 rs_fec_corrected_blocks_high[0x20]; + + u8 rs_fec_corrected_blocks_low[0x20]; + + u8 rs_fec_uncorrectable_blocks_high[0x20]; + + u8 rs_fec_uncorrectable_blocks_low[0x20]; + + u8 rs_fec_no_errors_blocks_high[0x20]; + + u8 rs_fec_no_errors_blocks_low[0x20]; + + u8 rs_fec_single_error_blocks_high[0x20]; + + u8 rs_fec_single_error_blocks_low[0x20]; + + u8 rs_fec_corrected_symbols_total_high[0x20]; + + u8 rs_fec_corrected_symbols_total_low[0x20]; + + u8 rs_fec_corrected_symbols_lane0_high[0x20]; + + u8 rs_fec_corrected_symbols_lane0_low[0x20]; + + u8 rs_fec_corrected_symbols_lane1_high[0x20]; + + u8 rs_fec_corrected_symbols_lane1_low[0x20]; + + u8 rs_fec_corrected_symbols_lane2_high[0x20]; + + u8 rs_fec_corrected_symbols_lane2_low[0x20]; + + u8 rs_fec_corrected_symbols_lane3_high[0x20]; + + u8 rs_fec_corrected_symbols_lane3_low[0x20]; + + u8 link_down_events[0x20]; + + u8 successful_recovery_events[0x20]; + + u8 reserved_0[0x180]; +}; + +struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { + u8 transmit_queue_high[0x20]; + + u8 transmit_queue_low[0x20]; + + u8 reserved_0[0x780]; +}; + +struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { + u8 rx_octets_high[0x20]; + + u8 rx_octets_low[0x20]; + + u8 reserved_0[0xc0]; + + u8 rx_frames_high[0x20]; + + u8 rx_frames_low[0x20]; + + u8 tx_octets_high[0x20]; + + u8 tx_octets_low[0x20]; + + u8 reserved_1[0xc0]; + + u8 tx_frames_high[0x20]; + + u8 tx_frames_low[0x20]; + + u8 rx_pause_high[0x20]; + + u8 rx_pause_low[0x20]; + + u8 rx_pause_duration_high[0x20]; + + u8 rx_pause_duration_low[0x20]; + + u8 tx_pause_high[0x20]; + + u8 tx_pause_low[0x20]; + + u8 tx_pause_duration_high[0x20]; + + u8 tx_pause_duration_low[0x20]; + + u8 rx_pause_transition_high[0x20]; + + u8 rx_pause_transition_low[0x20]; + + u8 reserved_2[0x400]; +}; + +struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { + u8 port_transmit_wait_high[0x20]; + + u8 port_transmit_wait_low[0x20]; + + u8 reserved_0[0x780]; +}; + +struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { + u8 dot3stats_alignment_errors_high[0x20]; + + u8 dot3stats_alignment_errors_low[0x20]; + + u8 dot3stats_fcs_errors_high[0x20]; + + u8 dot3stats_fcs_errors_low[0x20]; + + u8 dot3stats_single_collision_frames_high[0x20]; + + u8 dot3stats_single_collision_frames_low[0x20]; + + u8 dot3stats_multiple_collision_frames_high[0x20]; + + u8 dot3stats_multiple_collision_frames_low[0x20]; + + u8 dot3stats_sqe_test_errors_high[0x20]; + + u8 dot3stats_sqe_test_errors_low[0x20]; + + u8 dot3stats_deferred_transmissions_high[0x20]; + + u8 dot3stats_deferred_transmissions_low[0x20]; + + u8 dot3stats_late_collisions_high[0x20]; + + u8 dot3stats_late_collisions_low[0x20]; + + u8 dot3stats_excessive_collisions_high[0x20]; + + u8 dot3stats_excessive_collisions_low[0x20]; + + u8 dot3stats_internal_mac_transmit_errors_high[0x20]; + + u8 dot3stats_internal_mac_transmit_errors_low[0x20]; + + u8 dot3stats_carrier_sense_errors_high[0x20]; + + u8 dot3stats_carrier_sense_errors_low[0x20]; + + u8 dot3stats_frame_too_longs_high[0x20]; + + u8 dot3stats_frame_too_longs_low[0x20]; + + u8 dot3stats_internal_mac_receive_errors_high[0x20]; + + u8 dot3stats_internal_mac_receive_errors_low[0x20]; + + u8 dot3stats_symbol_errors_high[0x20]; + + u8 dot3stats_symbol_errors_low[0x20]; + + u8 dot3control_in_unknown_opcodes_high[0x20]; + + u8 dot3control_in_unknown_opcodes_low[0x20]; + + u8 dot3in_pause_frames_high[0x20]; + + u8 dot3in_pause_frames_low[0x20]; + + u8 dot3out_pause_frames_high[0x20]; + + u8 dot3out_pause_frames_low[0x20]; + + u8 reserved_0[0x3c0]; +}; + +struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { + u8 ether_stats_drop_events_high[0x20]; + + u8 ether_stats_drop_events_low[0x20]; + + u8 ether_stats_octets_high[0x20]; + + u8 ether_stats_octets_low[0x20]; + + u8 ether_stats_pkts_high[0x20]; + + u8 ether_stats_pkts_low[0x20]; + + u8 ether_stats_broadcast_pkts_high[0x20]; + + u8 ether_stats_broadcast_pkts_low[0x20]; + + u8 ether_stats_multicast_pkts_high[0x20]; + + u8 ether_stats_multicast_pkts_low[0x20]; + + u8 ether_stats_crc_align_errors_high[0x20]; + + u8 ether_stats_crc_align_errors_low[0x20]; + + u8 ether_stats_undersize_pkts_high[0x20]; + + u8 ether_stats_undersize_pkts_low[0x20]; + + u8 ether_stats_oversize_pkts_high[0x20]; + + u8 ether_stats_oversize_pkts_low[0x20]; + + u8 ether_stats_fragments_high[0x20]; + + u8 ether_stats_fragments_low[0x20]; + + u8 ether_stats_jabbers_high[0x20]; + + u8 ether_stats_jabbers_low[0x20]; + + u8 ether_stats_collisions_high[0x20]; + + u8 ether_stats_collisions_low[0x20]; + + u8 ether_stats_pkts64octets_high[0x20]; + + u8 ether_stats_pkts64octets_low[0x20]; + + u8 ether_stats_pkts65to127octets_high[0x20]; + + u8 ether_stats_pkts65to127octets_low[0x20]; + + u8 ether_stats_pkts128to255octets_high[0x20]; + + u8 ether_stats_pkts128to255octets_low[0x20]; + + u8 ether_stats_pkts256to511octets_high[0x20]; + + u8 ether_stats_pkts256to511octets_low[0x20]; + + u8 ether_stats_pkts512to1023octets_high[0x20]; + + u8 ether_stats_pkts512to1023octets_low[0x20]; + + u8 ether_stats_pkts1024to1518octets_high[0x20]; + + u8 ether_stats_pkts1024to1518octets_low[0x20]; + + u8 ether_stats_pkts1519to2047octets_high[0x20]; + + u8 ether_stats_pkts1519to2047octets_low[0x20]; + + u8 ether_stats_pkts2048to4095octets_high[0x20]; + + u8 ether_stats_pkts2048to4095octets_low[0x20]; + + u8 ether_stats_pkts4096to8191octets_high[0x20]; + + u8 ether_stats_pkts4096to8191octets_low[0x20]; + + u8 ether_stats_pkts8192to10239octets_high[0x20]; + + u8 ether_stats_pkts8192to10239octets_low[0x20]; + + u8 reserved_0[0x280]; +}; + +struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { + u8 if_in_octets_high[0x20]; + + u8 if_in_octets_low[0x20]; + + u8 if_in_ucast_pkts_high[0x20]; + + u8 if_in_ucast_pkts_low[0x20]; + + u8 if_in_discards_high[0x20]; + + u8 if_in_discards_low[0x20]; + + u8 if_in_errors_high[0x20]; + + u8 if_in_errors_low[0x20]; + + u8 if_in_unknown_protos_high[0x20]; + + u8 if_in_unknown_protos_low[0x20]; + + u8 if_out_octets_high[0x20]; + + u8 if_out_octets_low[0x20]; + + u8 if_out_ucast_pkts_high[0x20]; + + u8 if_out_ucast_pkts_low[0x20]; + + u8 if_out_discards_high[0x20]; + + u8 if_out_discards_low[0x20]; + + u8 if_out_errors_high[0x20]; + + u8 if_out_errors_low[0x20]; + + u8 if_in_multicast_pkts_high[0x20]; + + u8 if_in_multicast_pkts_low[0x20]; + + u8 if_in_broadcast_pkts_high[0x20]; + + u8 if_in_broadcast_pkts_low[0x20]; + + u8 if_out_multicast_pkts_high[0x20]; + + u8 if_out_multicast_pkts_low[0x20]; + + u8 if_out_broadcast_pkts_high[0x20]; + + u8 if_out_broadcast_pkts_low[0x20]; + + u8 reserved_0[0x480]; +}; + +struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { + u8 a_frames_transmitted_ok_high[0x20]; + + u8 a_frames_transmitted_ok_low[0x20]; + + u8 a_frames_received_ok_high[0x20]; + + u8 a_frames_received_ok_low[0x20]; + + u8 a_frame_check_sequence_errors_high[0x20]; + + u8 a_frame_check_sequence_errors_low[0x20]; + + u8 a_alignment_errors_high[0x20]; + + u8 a_alignment_errors_low[0x20]; + + u8 a_octets_transmitted_ok_high[0x20]; + + u8 a_octets_transmitted_ok_low[0x20]; + + u8 a_octets_received_ok_high[0x20]; + + u8 a_octets_received_ok_low[0x20]; + + u8 a_multicast_frames_xmitted_ok_high[0x20]; + + u8 a_multicast_frames_xmitted_ok_low[0x20]; + + u8 a_broadcast_frames_xmitted_ok_high[0x20]; + + u8 a_broadcast_frames_xmitted_ok_low[0x20]; + + u8 a_multicast_frames_received_ok_high[0x20]; + + u8 a_multicast_frames_received_ok_low[0x20]; + + u8 a_broadcast_frames_received_ok_high[0x20]; + + u8 a_broadcast_frames_received_ok_low[0x20]; + + u8 a_in_range_length_errors_high[0x20]; + + u8 a_in_range_length_errors_low[0x20]; + + u8 a_out_of_range_length_field_high[0x20]; + + u8 a_out_of_range_length_field_low[0x20]; + + u8 a_frame_too_long_errors_high[0x20]; + + u8 a_frame_too_long_errors_low[0x20]; + + u8 a_symbol_error_during_carrier_high[0x20]; + + u8 a_symbol_error_during_carrier_low[0x20]; + + u8 a_mac_control_frames_transmitted_high[0x20]; + + u8 a_mac_control_frames_transmitted_low[0x20]; + + u8 a_mac_control_frames_received_high[0x20]; + + u8 a_mac_control_frames_received_low[0x20]; + + u8 a_unsupported_opcodes_received_high[0x20]; + + u8 a_unsupported_opcodes_received_low[0x20]; + + u8 a_pause_mac_ctrl_frames_received_high[0x20]; + + u8 a_pause_mac_ctrl_frames_received_low[0x20]; + + u8 a_pause_mac_ctrl_frames_transmitted_high[0x20]; + + u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; + + u8 reserved_0[0x300]; +}; + +struct mlx5_ifc_cmd_inter_comp_event_bits { + u8 command_completion_vector[0x20]; + + u8 reserved_0[0xc0]; +}; + +struct mlx5_ifc_stall_vl_event_bits { + u8 reserved_0[0x18]; + u8 port_num[0x1]; + u8 reserved_1[0x3]; + u8 vl[0x4]; + + u8 reserved_2[0xa0]; +}; + +struct mlx5_ifc_db_bf_congestion_event_bits { + u8 event_subtype[0x8]; + u8 reserved_0[0x8]; + u8 congestion_level[0x8]; + u8 reserved_1[0x8]; + + u8 reserved_2[0xa0]; +}; + +struct mlx5_ifc_gpio_event_bits { + u8 reserved_0[0x60]; + + u8 gpio_event_hi[0x20]; + + u8 gpio_event_lo[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_port_state_change_event_bits { + u8 reserved_0[0x40]; + + u8 port_num[0x4]; + u8 reserved_1[0x1c]; + + u8 reserved_2[0x80]; +}; + +struct mlx5_ifc_dropped_packet_logged_bits { + u8 reserved_0[0xe0]; +}; + +enum { + MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, + MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, +}; + +struct mlx5_ifc_cq_error_bits { + u8 reserved_0[0x8]; + u8 cqn[0x18]; + + u8 reserved_1[0x20]; + + u8 reserved_2[0x18]; + u8 syndrome[0x8]; + + u8 reserved_3[0x80]; +}; + +struct mlx5_ifc_rdma_page_fault_event_bits { + u8 bytes_committed[0x20]; + + u8 r_key[0x20]; + + u8 reserved_0[0x10]; + u8 packet_len[0x10]; + + u8 rdma_op_len[0x20]; + + u8 rdma_va[0x40]; + + u8 reserved_1[0x5]; + u8 rdma[0x1]; + u8 write[0x1]; + u8 requestor[0x1]; + u8 qp_number[0x18]; +}; + +struct mlx5_ifc_wqe_associated_page_fault_event_bits { + u8 bytes_committed[0x20]; + + u8 reserved_0[0x10]; + u8 wqe_index[0x10]; + + u8 reserved_1[0x10]; + u8 len[0x10]; + + u8 reserved_2[0x60]; + + u8 reserved_3[0x5]; + u8 rdma[0x1]; + u8 write_read[0x1]; + u8 requestor[0x1]; + u8 qpn[0x18]; +}; + +struct mlx5_ifc_qp_events_bits { + u8 reserved_0[0xa0]; + + u8 type[0x8]; + u8 reserved_1[0x18]; + + u8 reserved_2[0x8]; + u8 qpn_rqn_sqn[0x18]; +}; + +struct mlx5_ifc_dct_events_bits { + u8 reserved_0[0xc0]; + + u8 reserved_1[0x8]; + u8 dct_number[0x18]; +}; + +struct mlx5_ifc_comp_event_bits { + u8 reserved_0[0xc0]; + + u8 reserved_1[0x8]; + u8 cq_number[0x18]; +}; + +enum { + MLX5_QPC_STATE_RST = 0x0, + MLX5_QPC_STATE_INIT = 0x1, + MLX5_QPC_STATE_RTR = 0x2, + MLX5_QPC_STATE_RTS = 0x3, + MLX5_QPC_STATE_SQER = 0x4, + MLX5_QPC_STATE_ERR = 0x6, + MLX5_QPC_STATE_SQD = 0x7, + MLX5_QPC_STATE_SUSPENDED = 0x9, +}; + +enum { + MLX5_QPC_ST_RC = 0x0, + MLX5_QPC_ST_UC = 0x1, + MLX5_QPC_ST_UD = 0x2, + MLX5_QPC_ST_XRC = 0x3, + MLX5_QPC_ST_DCI = 0x5, + MLX5_QPC_ST_QP0 = 0x7, + MLX5_QPC_ST_QP1 = 0x8, + MLX5_QPC_ST_RAW_DATAGRAM = 0x9, + MLX5_QPC_ST_REG_UMR = 0xc, +}; + +enum { + MLX5_QPC_PM_STATE_ARMED = 0x0, + MLX5_QPC_PM_STATE_REARM = 0x1, + MLX5_QPC_PM_STATE_RESERVED = 0x2, + MLX5_QPC_PM_STATE_MIGRATED = 0x3, +}; + +enum { + MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0, + MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1, +}; + +enum { + MLX5_QPC_MTU_256_BYTES = 0x1, + MLX5_QPC_MTU_512_BYTES = 0x2, + MLX5_QPC_MTU_1K_BYTES = 0x3, + MLX5_QPC_MTU_2K_BYTES = 0x4, + MLX5_QPC_MTU_4K_BYTES = 0x5, + MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7, +}; + +enum { + MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1, + MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2, + MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3, + MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4, + MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5, + MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6, + MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7, + MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8, +}; + +enum { + MLX5_QPC_CS_REQ_DISABLE = 0x0, + MLX5_QPC_CS_REQ_UP_TO_32B = 0x11, + MLX5_QPC_CS_REQ_UP_TO_64B = 0x22, +}; + +enum { + MLX5_QPC_CS_RES_DISABLE = 0x0, + MLX5_QPC_CS_RES_UP_TO_32B = 0x1, + MLX5_QPC_CS_RES_UP_TO_64B = 0x2, +}; + +struct mlx5_ifc_qpc_bits { + u8 state[0x4]; + u8 reserved_0[0x4]; + u8 st[0x8]; + u8 reserved_1[0x3]; + u8 pm_state[0x2]; + u8 reserved_2[0x7]; + u8 end_padding_mode[0x2]; + u8 reserved_3[0x2]; + + u8 wq_signature[0x1]; + u8 block_lb_mc[0x1]; + u8 atomic_like_write_en[0x1]; + u8 latency_sensitive[0x1]; + u8 reserved_4[0x1]; + u8 drain_sigerr[0x1]; + u8 reserved_5[0x2]; + u8 pd[0x18]; + + u8 mtu[0x3]; + u8 log_msg_max[0x5]; + u8 reserved_6[0x1]; + u8 log_rq_size[0x4]; + u8 log_rq_stride[0x3]; + u8 no_sq[0x1]; + u8 log_sq_size[0x4]; + u8 reserved_7[0x6]; + u8 rlky[0x1]; + u8 reserved_8[0x4]; + + u8 counter_set_id[0x8]; + u8 uar_page[0x18]; + + u8 reserved_9[0x8]; + u8 user_index[0x18]; + + u8 reserved_10[0x3]; + u8 log_page_size[0x5]; + u8 remote_qpn[0x18]; + + struct mlx5_ifc_ads_bits primary_address_path; + + struct mlx5_ifc_ads_bits secondary_address_path; + + u8 log_ack_req_freq[0x4]; + u8 reserved_11[0x4]; + u8 log_sra_max[0x3]; + u8 reserved_12[0x2]; + u8 retry_count[0x3]; + u8 rnr_retry[0x3]; + u8 reserved_13[0x1]; + u8 fre[0x1]; + u8 cur_rnr_retry[0x3]; + u8 cur_retry_count[0x3]; + u8 reserved_14[0x5]; + + u8 reserved_15[0x20]; + + u8 reserved_16[0x8]; + u8 next_send_psn[0x18]; + + u8 reserved_17[0x8]; + u8 cqn_snd[0x18]; + + u8 reserved_18[0x40]; + + u8 reserved_19[0x8]; + u8 last_acked_psn[0x18]; + + u8 reserved_20[0x8]; + u8 ssn[0x18]; + + u8 reserved_21[0x8]; + u8 log_rra_max[0x3]; + u8 reserved_22[0x1]; + u8 atomic_mode[0x4]; + u8 rre[0x1]; + u8 rwe[0x1]; + u8 rae[0x1]; + u8 reserved_23[0x1]; + u8 page_offset[0x6]; + u8 reserved_24[0x3]; + u8 cd_slave_receive[0x1]; + u8 cd_slave_send[0x1]; + u8 cd_master[0x1]; + + u8 reserved_25[0x3]; + u8 min_rnr_nak[0x5]; + u8 next_rcv_psn[0x18]; + + u8 reserved_26[0x8]; + u8 xrcd[0x18]; + + u8 reserved_27[0x8]; + u8 cqn_rcv[0x18]; + + u8 dbr_addr[0x40]; + + u8 q_key[0x20]; + + u8 reserved_28[0x5]; + u8 rq_type[0x3]; + u8 srqn_rmpn[0x18]; + + u8 reserved_29[0x8]; + u8 rmsn[0x18]; + + u8 hw_sq_wqebb_counter[0x10]; + u8 sw_sq_wqebb_counter[0x10]; + + u8 hw_rq_counter[0x20]; + + u8 sw_rq_counter[0x20]; + + u8 reserved_30[0x20]; + + u8 reserved_31[0xf]; + u8 cgs[0x1]; + u8 cs_req[0x8]; + u8 cs_res[0x8]; + + u8 dc_access_key[0x40]; + + u8 reserved_32[0xc0]; +}; + +struct mlx5_ifc_roce_addr_layout_bits { + u8 source_l3_address[16][0x8]; + + u8 reserved_0[0x3]; + u8 vlan_valid[0x1]; + u8 vlan_id[0xc]; + u8 source_mac_47_32[0x10]; + + u8 source_mac_31_0[0x20]; + + u8 reserved_1[0x14]; + u8 roce_l3_type[0x4]; + u8 roce_version[0x8]; + + u8 reserved_2[0x20]; +}; + +union mlx5_ifc_hca_cap_union_bits { + struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; + struct mlx5_ifc_odp_cap_bits odp_cap; + struct mlx5_ifc_atomic_caps_bits atomic_caps; + struct mlx5_ifc_roce_cap_bits roce_cap; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; + struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; + u8 reserved_0[0x8000]; +}; + +enum { + MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1, + MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, +}; + +struct mlx5_ifc_flow_context_bits { + u8 reserved_0[0x20]; + + u8 group_id[0x20]; + + u8 reserved_1[0x8]; + u8 flow_tag[0x18]; + + u8 reserved_2[0x10]; + u8 action[0x10]; + + u8 reserved_3[0x8]; + u8 destination_list_size[0x18]; + + u8 reserved_4[0x160]; + + struct mlx5_ifc_fte_match_param_bits match_value; + + u8 reserved_5[0x600]; + + struct mlx5_ifc_dest_format_struct_bits destination[0]; +}; + +enum { + MLX5_XRC_SRQC_STATE_GOOD = 0x0, + MLX5_XRC_SRQC_STATE_ERROR = 0x1, +}; + +struct mlx5_ifc_xrc_srqc_bits { + u8 state[0x4]; + u8 log_xrc_srq_size[0x4]; + u8 reserved_0[0x18]; + + u8 wq_signature[0x1]; + u8 cont_srq[0x1]; + u8 reserved_1[0x1]; + u8 rlky[0x1]; + u8 basic_cyclic_rcv_wqe[0x1]; + u8 log_rq_stride[0x3]; + u8 xrcd[0x18]; + + u8 page_offset[0x6]; + u8 reserved_2[0x2]; + u8 cqn[0x18]; + + u8 reserved_3[0x20]; + + u8 user_index_equal_xrc_srqn[0x1]; + u8 reserved_4[0x1]; + u8 log_page_size[0x6]; + u8 user_index[0x18]; + + u8 reserved_5[0x20]; + + u8 reserved_6[0x8]; + u8 pd[0x18]; + + u8 lwm[0x10]; + u8 wqe_cnt[0x10]; + + u8 reserved_7[0x40]; + + u8 db_record_addr_h[0x20]; + + u8 db_record_addr_l[0x1e]; + u8 reserved_8[0x2]; + + u8 reserved_9[0x80]; +}; + +struct mlx5_ifc_traffic_counter_bits { + u8 packets[0x40]; + + u8 octets[0x40]; +}; + +struct mlx5_ifc_tisc_bits { + u8 reserved_0[0xc]; + u8 prio[0x4]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x100]; + + u8 reserved_3[0x8]; + u8 transport_domain[0x18]; + + u8 reserved_4[0x3c0]; +}; + +enum { + MLX5_TIRC_DISP_TYPE_DIRECT = 0x0, + MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1, +}; + +enum { + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, +}; + +enum { + MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0, + MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1, + MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2, +}; + +enum { + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2, +}; + +struct mlx5_ifc_tirc_bits { + u8 reserved_0[0x20]; + + u8 disp_type[0x4]; + u8 reserved_1[0x1c]; + + u8 reserved_2[0x40]; + + u8 reserved_3[0x4]; + u8 lro_timeout_period_usecs[0x10]; + u8 lro_enable_mask[0x4]; + u8 lro_max_ip_payload_size[0x8]; + + u8 reserved_4[0x40]; + + u8 reserved_5[0x8]; + u8 inline_rqn[0x18]; + + u8 rx_hash_symmetric[0x1]; + u8 reserved_6[0x1]; + u8 tunneled_offload_en[0x1]; + u8 reserved_7[0x5]; + u8 indirect_table[0x18]; + + u8 rx_hash_fn[0x4]; + u8 reserved_8[0x2]; + u8 self_lb_block[0x2]; + u8 transport_domain[0x18]; + + u8 rx_hash_toeplitz_key[10][0x20]; + + struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer; + + struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; + + u8 reserved_9[0x4c0]; +}; + +enum { + MLX5_SRQC_STATE_GOOD = 0x0, + MLX5_SRQC_STATE_ERROR = 0x1, +}; + +struct mlx5_ifc_srqc_bits { + u8 state[0x4]; + u8 log_srq_size[0x4]; + u8 reserved_0[0x18]; + + u8 wq_signature[0x1]; + u8 cont_srq[0x1]; + u8 reserved_1[0x1]; + u8 rlky[0x1]; + u8 reserved_2[0x1]; + u8 log_rq_stride[0x3]; + u8 xrcd[0x18]; + + u8 page_offset[0x6]; + u8 reserved_3[0x2]; + u8 cqn[0x18]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0x2]; + u8 log_page_size[0x6]; + u8 reserved_6[0x18]; + + u8 reserved_7[0x20]; + + u8 reserved_8[0x8]; + u8 pd[0x18]; + + u8 lwm[0x10]; + u8 wqe_cnt[0x10]; + + u8 reserved_9[0x40]; + + u8 dbr_addr[0x40]; + + u8 reserved_10[0x80]; +}; + +enum { + MLX5_SQC_STATE_RST = 0x0, + MLX5_SQC_STATE_RDY = 0x1, + MLX5_SQC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_sqc_bits { + u8 rlky[0x1]; + u8 cd_master[0x1]; + u8 fre[0x1]; + u8 flush_in_error_en[0x1]; + u8 reserved_0[0x4]; + u8 state[0x4]; + u8 reserved_1[0x14]; + + u8 reserved_2[0x8]; + u8 user_index[0x18]; + + u8 reserved_3[0x8]; + u8 cqn[0x18]; + + u8 reserved_4[0xa0]; + + u8 tis_lst_sz[0x10]; + u8 reserved_5[0x10]; + + u8 reserved_6[0x40]; + + u8 reserved_7[0x8]; + u8 tis_num_0[0x18]; + + struct mlx5_ifc_wq_bits wq; +}; + +struct mlx5_ifc_rqtc_bits { + u8 reserved_0[0xa0]; + + u8 reserved_1[0x10]; + u8 rqt_max_size[0x10]; + + u8 reserved_2[0x10]; + u8 rqt_actual_size[0x10]; + + u8 reserved_3[0x6a0]; + + struct mlx5_ifc_rq_num_bits rq_num[0]; +}; + +enum { + MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, + MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1, +}; + +enum { + MLX5_RQC_STATE_RST = 0x0, + MLX5_RQC_STATE_RDY = 0x1, + MLX5_RQC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_rqc_bits { + u8 rlky[0x1]; + u8 reserved_0[0x2]; + u8 vsd[0x1]; + u8 mem_rq_type[0x4]; + u8 state[0x4]; + u8 reserved_1[0x1]; + u8 flush_in_error_en[0x1]; + u8 reserved_2[0x12]; + + u8 reserved_3[0x8]; + u8 user_index[0x18]; + + u8 reserved_4[0x8]; + u8 cqn[0x18]; + + u8 counter_set_id[0x8]; + u8 reserved_5[0x18]; + + u8 reserved_6[0x8]; + u8 rmpn[0x18]; + + u8 reserved_7[0xe0]; + + struct mlx5_ifc_wq_bits wq; +}; + +enum { + MLX5_RMPC_STATE_RDY = 0x1, + MLX5_RMPC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_rmpc_bits { + u8 reserved_0[0x8]; + u8 state[0x4]; + u8 reserved_1[0x14]; + + u8 basic_cyclic_rcv_wqe[0x1]; + u8 reserved_2[0x1f]; + + u8 reserved_3[0x140]; + + struct mlx5_ifc_wq_bits wq; +}; + +enum { + MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0, +}; + +struct mlx5_ifc_nic_vport_context_bits { + u8 reserved_0[0x1f]; + u8 roce_en[0x1]; + + u8 reserved_1[0x760]; + + u8 reserved_2[0x5]; + u8 allowed_list_type[0x3]; + u8 reserved_3[0xc]; + u8 allowed_list_size[0xc]; + + struct mlx5_ifc_mac_address_layout_bits permanent_address; + + u8 reserved_4[0x20]; + + u8 current_uc_mac_address[0][0x40]; +}; + +enum { + MLX5_MKC_ACCESS_MODE_PA = 0x0, + MLX5_MKC_ACCESS_MODE_MTT = 0x1, + MLX5_MKC_ACCESS_MODE_KLMS = 0x2, +}; + +struct mlx5_ifc_mkc_bits { + u8 reserved_0[0x1]; + u8 free[0x1]; + u8 reserved_1[0xd]; + u8 small_fence_on_rdma_read_response[0x1]; + u8 umr_en[0x1]; + u8 a[0x1]; + u8 rw[0x1]; + u8 rr[0x1]; + u8 lw[0x1]; + u8 lr[0x1]; + u8 access_mode[0x2]; + u8 reserved_2[0x8]; + + u8 qpn[0x18]; + u8 mkey_7_0[0x8]; + + u8 reserved_3[0x20]; + + u8 length64[0x1]; + u8 bsf_en[0x1]; + u8 sync_umr[0x1]; + u8 reserved_4[0x2]; + u8 expected_sigerr_count[0x1]; + u8 reserved_5[0x1]; + u8 en_rinval[0x1]; + u8 pd[0x18]; + + u8 start_addr[0x40]; + + u8 len[0x40]; + + u8 bsf_octword_size[0x20]; + + u8 reserved_6[0x80]; + + u8 translations_octword_size[0x20]; + + u8 reserved_7[0x1b]; + u8 log_page_size[0x5]; + + u8 reserved_8[0x20]; +}; + +struct mlx5_ifc_pkey_bits { + u8 reserved_0[0x10]; + u8 pkey[0x10]; +}; + +struct mlx5_ifc_array128_auto_bits { + u8 array128_auto[16][0x8]; +}; + +struct mlx5_ifc_hca_vport_context_bits { + u8 field_select[0x20]; + + u8 reserved_0[0xe0]; + + u8 sm_virt_aware[0x1]; + u8 has_smi[0x1]; + u8 has_raw[0x1]; + u8 grh_required[0x1]; + u8 reserved_1[0xc]; + u8 port_physical_state[0x4]; + u8 vport_state_policy[0x4]; + u8 port_state[0x4]; + u8 vport_state[0x4]; + + u8 reserved_2[0x20]; + + u8 system_image_guid[0x40]; + + u8 port_guid[0x40]; + + u8 node_guid[0x40]; + + u8 cap_mask1[0x20]; + + u8 cap_mask1_field_select[0x20]; + + u8 cap_mask2[0x20]; + + u8 cap_mask2_field_select[0x20]; + + u8 reserved_3[0x80]; + + u8 lid[0x10]; + u8 reserved_4[0x4]; + u8 init_type_reply[0x4]; + u8 lmc[0x3]; + u8 subnet_timeout[0x5]; + + u8 sm_lid[0x10]; + u8 sm_sl[0x4]; + u8 reserved_5[0xc]; + + u8 qkey_violation_counter[0x10]; + u8 pkey_violation_counter[0x10]; + + u8 reserved_6[0xca0]; +}; + +enum { + MLX5_EQC_STATUS_OK = 0x0, + MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa, +}; + +enum { + MLX5_EQC_ST_ARMED = 0x9, + MLX5_EQC_ST_FIRED = 0xa, +}; + +struct mlx5_ifc_eqc_bits { + u8 status[0x4]; + u8 reserved_0[0x9]; + u8 ec[0x1]; + u8 oi[0x1]; + u8 reserved_1[0x5]; + u8 st[0x4]; + u8 reserved_2[0x8]; + + u8 reserved_3[0x20]; + + u8 reserved_4[0x14]; + u8 page_offset[0x6]; + u8 reserved_5[0x6]; + + u8 reserved_6[0x3]; + u8 log_eq_size[0x5]; + u8 uar_page[0x18]; + + u8 reserved_7[0x20]; + + u8 reserved_8[0x18]; + u8 intr[0x8]; + + u8 reserved_9[0x3]; + u8 log_page_size[0x5]; + u8 reserved_10[0x18]; + + u8 reserved_11[0x60]; + + u8 reserved_12[0x8]; + u8 consumer_counter[0x18]; + + u8 reserved_13[0x8]; + u8 producer_counter[0x18]; + + u8 reserved_14[0x80]; +}; + +enum { + MLX5_DCTC_STATE_ACTIVE = 0x0, + MLX5_DCTC_STATE_DRAINING = 0x1, + MLX5_DCTC_STATE_DRAINED = 0x2, +}; + +enum { + MLX5_DCTC_CS_RES_DISABLE = 0x0, + MLX5_DCTC_CS_RES_NA = 0x1, + MLX5_DCTC_CS_RES_UP_TO_64B = 0x2, +}; + +enum { + MLX5_DCTC_MTU_256_BYTES = 0x1, + MLX5_DCTC_MTU_512_BYTES = 0x2, + MLX5_DCTC_MTU_1K_BYTES = 0x3, + MLX5_DCTC_MTU_2K_BYTES = 0x4, + MLX5_DCTC_MTU_4K_BYTES = 0x5, +}; + +struct mlx5_ifc_dctc_bits { + u8 reserved_0[0x4]; + u8 state[0x4]; + u8 reserved_1[0x18]; + + u8 reserved_2[0x8]; + u8 user_index[0x18]; + + u8 reserved_3[0x8]; + u8 cqn[0x18]; + + u8 counter_set_id[0x8]; + u8 atomic_mode[0x4]; + u8 rre[0x1]; + u8 rwe[0x1]; + u8 rae[0x1]; + u8 atomic_like_write_en[0x1]; + u8 latency_sensitive[0x1]; + u8 rlky[0x1]; + u8 free_ar[0x1]; + u8 reserved_4[0xd]; + + u8 reserved_5[0x8]; + u8 cs_res[0x8]; + u8 reserved_6[0x3]; + u8 min_rnr_nak[0x5]; + u8 reserved_7[0x8]; + + u8 reserved_8[0x8]; + u8 srqn[0x18]; + + u8 reserved_9[0x8]; + u8 pd[0x18]; + + u8 tclass[0x8]; + u8 reserved_10[0x4]; + u8 flow_label[0x14]; + + u8 dc_access_key[0x40]; + + u8 reserved_11[0x5]; + u8 mtu[0x3]; + u8 port[0x8]; + u8 pkey_index[0x10]; + + u8 reserved_12[0x8]; + u8 my_addr_index[0x8]; + u8 reserved_13[0x8]; + u8 hop_limit[0x8]; + + u8 dc_access_key_violation_count[0x20]; + + u8 reserved_14[0x14]; + u8 dei_cfi[0x1]; + u8 eth_prio[0x3]; + u8 ecn[0x2]; + u8 dscp[0x6]; + + u8 reserved_15[0x40]; +}; + +enum { + MLX5_CQC_STATUS_OK = 0x0, + MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9, + MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa, +}; + +enum { + MLX5_CQC_CQE_SZ_64_BYTES = 0x0, + MLX5_CQC_CQE_SZ_128_BYTES = 0x1, +}; + +enum { + MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6, + MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9, + MLX5_CQC_ST_FIRED = 0xa, +}; + +struct mlx5_ifc_cqc_bits { + u8 status[0x4]; + u8 reserved_0[0x4]; + u8 cqe_sz[0x3]; + u8 cc[0x1]; + u8 reserved_1[0x1]; + u8 scqe_break_moderation_en[0x1]; + u8 oi[0x1]; + u8 reserved_2[0x2]; + u8 cqe_zip_en[0x1]; + u8 mini_cqe_res_format[0x2]; + u8 st[0x4]; + u8 reserved_3[0x8]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0x14]; + u8 page_offset[0x6]; + u8 reserved_6[0x6]; + + u8 reserved_7[0x3]; + u8 log_cq_size[0x5]; + u8 uar_page[0x18]; + + u8 reserved_8[0x4]; + u8 cq_period[0xc]; + u8 cq_max_count[0x10]; + + u8 reserved_9[0x18]; + u8 c_eqn[0x8]; + + u8 reserved_10[0x3]; + u8 log_page_size[0x5]; + u8 reserved_11[0x18]; + + u8 reserved_12[0x20]; + + u8 reserved_13[0x8]; + u8 last_notified_index[0x18]; + + u8 reserved_14[0x8]; + u8 last_solicit_index[0x18]; + + u8 reserved_15[0x8]; + u8 consumer_counter[0x18]; + + u8 reserved_16[0x8]; + u8 producer_counter[0x18]; + + u8 reserved_17[0x40]; + + u8 dbr_addr[0x40]; +}; + +union mlx5_ifc_cong_control_roce_ecn_auto_bits { + struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; + struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; + struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; + u8 reserved_0[0x800]; +}; + +struct mlx5_ifc_query_adapter_param_block_bits { + u8 reserved_0[0xc0]; + + u8 reserved_1[0x8]; + u8 ieee_vendor_id[0x18]; + + u8 reserved_2[0x10]; + u8 vsd_vendor_id[0x10]; + + u8 vsd[208][0x8]; + + u8 vsd_contd_psid[16][0x8]; +}; + +union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { + struct mlx5_ifc_modify_field_select_bits modify_field_select; + struct mlx5_ifc_resize_field_select_bits resize_field_select; + u8 reserved_0[0x20]; +}; + +union mlx5_ifc_field_select_802_1_r_roce_auto_bits { + struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; + struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; + struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; + u8 reserved_0[0x20]; +}; + +union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { + struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; + struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; + struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; + struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; + struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; + struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; + struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; + u8 reserved_0[0x7c0]; +}; + +union mlx5_ifc_event_auto_bits { + struct mlx5_ifc_comp_event_bits comp_event; + struct mlx5_ifc_dct_events_bits dct_events; + struct mlx5_ifc_qp_events_bits qp_events; + struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event; + struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event; + struct mlx5_ifc_cq_error_bits cq_error; + struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged; + struct mlx5_ifc_port_state_change_event_bits port_state_change_event; + struct mlx5_ifc_gpio_event_bits gpio_event; + struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; + struct mlx5_ifc_stall_vl_event_bits stall_vl_event; + struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; + u8 reserved_0[0xe0]; +}; + +struct mlx5_ifc_health_buffer_bits { + u8 reserved_0[0x100]; + + u8 assert_existptr[0x20]; + + u8 assert_callra[0x20]; + + u8 reserved_1[0x40]; + + u8 fw_version[0x20]; + + u8 hw_id[0x20]; + + u8 reserved_2[0x20]; + + u8 irisc_index[0x8]; + u8 synd[0x8]; + u8 ext_synd[0x10]; +}; + +struct mlx5_ifc_register_loopback_control_bits { + u8 no_lb[0x1]; + u8 reserved_0[0x7]; + u8 port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x60]; +}; + +struct mlx5_ifc_teardown_hca_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +enum { + MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, + MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1, +}; + +struct mlx5_ifc_teardown_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 profile[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_sqerr2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_sqerr2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_sqd2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_sqd2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_set_roce_address_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_roce_address_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 roce_address_index[0x10]; + u8 reserved_2[0x10]; + + u8 reserved_3[0x20]; + + struct mlx5_ifc_roce_addr_layout_bits roce_address; +}; + +struct mlx5_ifc_set_mad_demux_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +enum { + MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0, + MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2, +}; + +struct mlx5_ifc_set_mad_demux_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x6]; + u8 demux_mode[0x2]; + u8 reserved_4[0x18]; +}; + +struct mlx5_ifc_set_l2_table_entry_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_l2_table_entry_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x60]; + + u8 reserved_3[0x8]; + u8 table_index[0x18]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0x13]; + u8 vlan_valid[0x1]; + u8 vlan[0xc]; + + struct mlx5_ifc_mac_address_layout_bits mac_address; + + u8 reserved_6[0xc0]; +}; + +struct mlx5_ifc_set_issi_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_issi_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 current_issi[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_set_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + union mlx5_ifc_hca_cap_union_bits capability; +}; + +struct mlx5_ifc_set_fte_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_fte_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x40]; + + u8 flow_index[0x20]; + + u8 reserved_6[0xe0]; + + struct mlx5_ifc_flow_context_bits flow_context; +}; + +struct mlx5_ifc_rts2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_rts2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_rtr2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_rtr2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_rst2init_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_rst2init_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_query_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; + + u8 reserved_2[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_3[0x20]; +}; + +enum { + MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0, + MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1, +}; + +struct mlx5_ifc_query_vport_state_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; + + u8 reserved_2[0x18]; + u8 admin_state[0x4]; + u8 state[0x4]; +}; + +enum { + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, +}; + +struct mlx5_ifc_query_vport_state_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_vport_counter_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_traffic_counter_bits received_errors; + + struct mlx5_ifc_traffic_counter_bits transmit_errors; + + struct mlx5_ifc_traffic_counter_bits received_ib_unicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast; + + struct mlx5_ifc_traffic_counter_bits received_ib_multicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast; + + struct mlx5_ifc_traffic_counter_bits received_eth_broadcast; + + struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast; + + struct mlx5_ifc_traffic_counter_bits received_eth_unicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast; + + struct mlx5_ifc_traffic_counter_bits received_eth_multicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; + + u8 reserved_2[0xa00]; +}; + +enum { + MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0, +}; + +struct mlx5_ifc_query_vport_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x60]; + + u8 clear[0x1]; + u8 reserved_4[0x1f]; + + u8 reserved_5[0x20]; +}; + +struct mlx5_ifc_query_tis_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_tisc_bits tis_context; +}; + +struct mlx5_ifc_query_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tisn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_tir_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xc0]; + + struct mlx5_ifc_tirc_bits tir_context; +}; + +struct mlx5_ifc_query_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tirn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_srqc_bits srq_context_entry; + + u8 reserved_2[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 srqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_sq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xc0]; + + struct mlx5_ifc_sqc_bits sq_context; +}; + +struct mlx5_ifc_query_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 sqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_special_contexts_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; + + u8 resd_lkey[0x20]; +}; + +struct mlx5_ifc_query_special_contexts_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_query_rqt_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xc0]; + + struct mlx5_ifc_rqtc_bits rqt_context; +}; + +struct mlx5_ifc_query_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rqtn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_rq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xc0]; + + struct mlx5_ifc_rqc_bits rq_context; +}; + +struct mlx5_ifc_query_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_roce_address_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_roce_addr_layout_bits roce_address; +}; + +struct mlx5_ifc_query_roce_address_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 roce_address_index[0x10]; + u8 reserved_2[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_rmp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xc0]; + + struct mlx5_ifc_rmpc_bits rmp_context; +}; + +struct mlx5_ifc_query_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rmpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 opt_param_mask[0x20]; + + u8 reserved_2[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_3[0x80]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_q_counter_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 rx_write_requests[0x20]; + + u8 reserved_2[0x20]; + + u8 rx_read_requests[0x20]; + + u8 reserved_3[0x20]; + + u8 rx_atomic_requests[0x20]; + + u8 reserved_4[0x20]; + + u8 rx_dct_connect[0x20]; + + u8 reserved_5[0x20]; + + u8 out_of_buffer[0x20]; + + u8 reserved_6[0x20]; + + u8 out_of_sequence[0x20]; + + u8 reserved_7[0x620]; +}; + +struct mlx5_ifc_query_q_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x80]; + + u8 clear[0x1]; + u8 reserved_3[0x1f]; + + u8 reserved_4[0x18]; + u8 counter_set_id[0x8]; +}; + +struct mlx5_ifc_query_pages_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x10]; + u8 function_id[0x10]; + + u8 num_pages[0x20]; +}; + +enum { + MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1, + MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2, + MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3, +}; + +struct mlx5_ifc_query_pages_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 function_id[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_nic_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_nic_vport_context_bits nic_vport_context; +}; + +struct mlx5_ifc_query_nic_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x5]; + u8 allowed_list_type[0x3]; + u8 reserved_4[0x18]; +}; + +struct mlx5_ifc_query_mkey_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_mkc_bits memory_key_mkey_entry; + + u8 reserved_2[0x600]; + + u8 bsf0_klm0_pas_mtt0_1[16][0x8]; + + u8 bsf1_klm1_pas_mtt2_3[16][0x8]; +}; + +struct mlx5_ifc_query_mkey_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 mkey_index[0x18]; + + u8 pg_access[0x1]; + u8 reserved_3[0x1f]; +}; + +struct mlx5_ifc_query_mad_demux_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 mad_dumux_parameters_block[0x20]; +}; + +struct mlx5_ifc_query_mad_demux_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_query_l2_table_entry_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xa0]; + + u8 reserved_2[0x13]; + u8 vlan_valid[0x1]; + u8 vlan[0xc]; + + struct mlx5_ifc_mac_address_layout_bits mac_address; + + u8 reserved_3[0xc0]; +}; + +struct mlx5_ifc_query_l2_table_entry_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x60]; + + u8 reserved_3[0x8]; + u8 table_index[0x18]; + + u8 reserved_4[0x140]; +}; + +struct mlx5_ifc_query_issi_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x10]; + u8 current_issi[0x10]; + + u8 reserved_2[0xa0]; + + u8 supported_issi_reserved[76][0x8]; + u8 supported_issi_dw0[0x20]; +}; + +struct mlx5_ifc_query_issi_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_query_hca_vport_pkey_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_pkey_bits pkey[0]; +}; + +struct mlx5_ifc_query_hca_vport_pkey_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xb]; + u8 port_num[0x4]; + u8 vport_number[0x10]; + + u8 reserved_3[0x10]; + u8 pkey_index[0x10]; +}; + +struct mlx5_ifc_query_hca_vport_gid_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; + + u8 gids_num[0x10]; + u8 reserved_2[0x10]; + + struct mlx5_ifc_array128_auto_bits gid[0]; +}; + +struct mlx5_ifc_query_hca_vport_gid_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xb]; + u8 port_num[0x4]; + u8 vport_number[0x10]; + + u8 reserved_3[0x10]; + u8 gid_index[0x10]; +}; + +struct mlx5_ifc_query_hca_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_hca_vport_context_bits hca_vport_context; +}; + +struct mlx5_ifc_query_hca_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xb]; + u8 port_num[0x4]; + u8 vport_number[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + union mlx5_ifc_hca_cap_union_bits capability; +}; + +struct mlx5_ifc_query_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_query_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x80]; + + u8 reserved_2[0x8]; + u8 level[0x8]; + u8 reserved_3[0x8]; + u8 log_size[0x8]; + + u8 reserved_4[0x120]; +}; + +struct mlx5_ifc_query_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x140]; +}; + +struct mlx5_ifc_query_fte_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x1c0]; + + struct mlx5_ifc_flow_context_bits flow_context; +}; + +struct mlx5_ifc_query_fte_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x40]; + + u8 flow_index[0x20]; + + u8 reserved_6[0xe0]; +}; + +enum { + MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, + MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, + MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, +}; + +struct mlx5_ifc_query_flow_group_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xa0]; + + u8 start_flow_index[0x20]; + + u8 reserved_2[0x20]; + + u8 end_flow_index[0x20]; + + u8 reserved_3[0xa0]; + + u8 reserved_4[0x18]; + u8 match_criteria_enable[0x8]; + + struct mlx5_ifc_fte_match_param_bits match_criteria; + + u8 reserved_5[0xe00]; +}; + +struct mlx5_ifc_query_flow_group_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 group_id[0x20]; + + u8 reserved_5[0x120]; +}; + +struct mlx5_ifc_query_eq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_eqc_bits eq_context_entry; + + u8 reserved_2[0x40]; + + u8 event_bitmask[0x40]; + + u8 reserved_3[0x580]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_eq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 eq_number[0x8]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_dct_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_dctc_bits dct_context_entry; + + u8 reserved_2[0x180]; +}; + +struct mlx5_ifc_query_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 dctn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_cq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_cqc_bits cq_context; + + u8 reserved_2[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 cqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_cong_status_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; + + u8 enable[0x1]; + u8 tag_enable[0x1]; + u8 reserved_2[0x1e]; +}; + +struct mlx5_ifc_query_cong_status_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 priority[0x4]; + u8 cong_protocol[0x4]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_cong_statistics_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 cur_flows[0x20]; + + u8 sum_flows[0x20]; + + u8 cnp_ignored_high[0x20]; + + u8 cnp_ignored_low[0x20]; + + u8 cnp_handled_high[0x20]; + + u8 cnp_handled_low[0x20]; + + u8 reserved_2[0x100]; + + u8 time_stamp_high[0x20]; + + u8 time_stamp_low[0x20]; + + u8 accumulators_period[0x20]; + + u8 ecn_marked_roce_packets_high[0x20]; + + u8 ecn_marked_roce_packets_low[0x20]; + + u8 cnps_sent_high[0x20]; + + u8 cnps_sent_low[0x20]; + + u8 reserved_3[0x560]; +}; + +struct mlx5_ifc_query_cong_statistics_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 clear[0x1]; + u8 reserved_2[0x1f]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_cong_params_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; +}; + +struct mlx5_ifc_query_cong_params_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x1c]; + u8 cong_protocol[0x4]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_adapter_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; +}; + +struct mlx5_ifc_query_adapter_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_qp_2rst_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_qp_2rst_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_qp_2err_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_qp_2err_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_page_fault_resume_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_page_fault_resume_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 error[0x1]; + u8 reserved_2[0x4]; + u8 rdma[0x1]; + u8 read_write[0x1]; + u8 req_res[0x1]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_nop_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_nop_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_modify_vport_state_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_vport_state_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x18]; + u8 admin_state[0x4]; + u8 reserved_4[0x4]; +}; + +struct mlx5_ifc_modify_tis_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tisn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_tisc_bits ctx; +}; + +struct mlx5_ifc_modify_tir_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tirn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_tirc_bits ctx; +}; + +struct mlx5_ifc_modify_sq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 sq_state[0x4]; + u8 reserved_2[0x4]; + u8 sqn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_sqc_bits ctx; +}; + +struct mlx5_ifc_modify_rqt_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rqtn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_rqtc_bits ctx; +}; + +struct mlx5_ifc_modify_rq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 rq_state[0x4]; + u8 reserved_2[0x4]; + u8 rqn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_rqc_bits ctx; +}; + +struct mlx5_ifc_modify_rmp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_rmp_bitmask_bits { + u8 reserved[0x20]; + + u8 reserved1[0x1f]; + u8 lwm[0x1]; +}; + +struct mlx5_ifc_modify_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 rmp_state[0x4]; + u8 reserved_2[0x4]; + u8 rmpn[0x18]; + + u8 reserved_3[0x20]; + + struct mlx5_ifc_rmp_bitmask_bits bitmask; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_rmpc_bits ctx; +}; + +struct mlx5_ifc_modify_nic_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_nic_vport_field_select_bits { + u8 reserved_0[0x1c]; + u8 permanent_address[0x1]; + u8 addresses_list[0x1]; + u8 roce_en[0x1]; + u8 reserved_1[0x1]; +}; + +struct mlx5_ifc_modify_nic_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; + + u8 reserved_3[0x780]; + + struct mlx5_ifc_nic_vport_context_bits nic_vport_context; +}; + +struct mlx5_ifc_modify_hca_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_hca_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xb]; + u8 port_num[0x4]; + u8 vport_number[0x10]; + + u8 reserved_3[0x20]; + + struct mlx5_ifc_hca_vport_context_bits hca_vport_context; +}; + +struct mlx5_ifc_modify_cq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +enum { + MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0, + MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1, +}; + +struct mlx5_ifc_modify_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 cqn[0x18]; + + union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; + + struct mlx5_ifc_cqc_bits cq_context; + + u8 reserved_3[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_modify_cong_status_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_cong_status_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 priority[0x4]; + u8 cong_protocol[0x4]; + + u8 enable[0x1]; + u8 tag_enable[0x1]; + u8 reserved_3[0x1e]; +}; + +struct mlx5_ifc_modify_cong_params_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_cong_params_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x1c]; + u8 cong_protocol[0x4]; + + union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; + + u8 reserved_3[0x80]; + + union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; +}; + +struct mlx5_ifc_manage_pages_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 output_num_entries[0x20]; + + u8 reserved_1[0x20]; + + u8 pas[0][0x40]; +}; + +enum { + MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0, + MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1, + MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2, +}; + +struct mlx5_ifc_manage_pages_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 function_id[0x10]; + + u8 input_num_entries[0x20]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_mad_ifc_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 response_mad_packet[256][0x8]; +}; + +struct mlx5_ifc_mad_ifc_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 remote_lid[0x10]; + u8 reserved_2[0x8]; + u8 port[0x8]; + + u8 reserved_3[0x20]; + + u8 mad[256][0x8]; +}; + +struct mlx5_ifc_init_hca_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_init_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_init2rtr_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_init2rtr_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_init2init_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_init2init_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_get_dropped_packet_log_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 packet_headers_log[128][0x8]; + + u8 packet_syndrome[64][0x8]; +}; + +struct mlx5_ifc_get_dropped_packet_log_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_gen_eqe_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 eq_number[0x8]; + + u8 reserved_3[0x20]; + + u8 eqe[64][0x8]; +}; + +struct mlx5_ifc_gen_eq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_enable_hca_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; +}; + +struct mlx5_ifc_enable_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 function_id[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_drain_dct_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_drain_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 dctn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_disable_hca_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; +}; + +struct mlx5_ifc_disable_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 function_id[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_detach_from_mcg_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_detach_from_mcg_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 multicast_gid[16][0x8]; +}; + +struct mlx5_ifc_destroy_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_tis_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tisn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_tir_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tirn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 srqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_sq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 sqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_rqt_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rqtn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_rq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_rmp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rmpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_psv_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_psv_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 psvn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_mkey_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_mkey_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 mkey_index[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x140]; +}; + +struct mlx5_ifc_destroy_flow_group_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_flow_group_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 group_id[0x20]; + + u8 reserved_5[0x120]; +}; + +struct mlx5_ifc_destroy_eq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_eq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 eq_number[0x8]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_dct_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 dctn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_cq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 cqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x10]; + u8 vxlan_udp_port[0x10]; +}; + +struct mlx5_ifc_delete_l2_table_entry_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_delete_l2_table_entry_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x60]; + + u8 reserved_3[0x8]; + u8 table_index[0x18]; + + u8 reserved_4[0x140]; +}; + +struct mlx5_ifc_delete_fte_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_delete_fte_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x40]; + + u8 flow_index[0x20]; + + u8 reserved_6[0xe0]; +}; + +struct mlx5_ifc_dealloc_xrcd_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_dealloc_xrcd_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 xrcd[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_dealloc_uar_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_dealloc_uar_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 uar[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_dealloc_transport_domain_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_dealloc_transport_domain_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 transport_domain[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_dealloc_q_counter_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_dealloc_q_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 counter_set_id[0x8]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_dealloc_pd_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_dealloc_pd_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 pd[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_create_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; + + u8 reserved_3[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_tis_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 tisn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_tisc_bits ctx; +}; + +struct mlx5_ifc_create_tir_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 tirn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_tirc_bits ctx; +}; + +struct mlx5_ifc_create_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 srqn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_srqc_bits srq_context_entry; + + u8 reserved_3[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_sq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 sqn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_sqc_bits ctx; +}; + +struct mlx5_ifc_create_rqt_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 rqtn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_rqtc_bits rqt_context; +}; + +struct mlx5_ifc_create_rq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 rqn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_rqc_bits ctx; +}; + +struct mlx5_ifc_create_rmp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 rmpn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_rmpc_bits ctx; +}; + +struct mlx5_ifc_create_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 qpn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 opt_param_mask[0x20]; + + u8 reserved_3[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_4[0x80]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_psv_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 reserved_2[0x8]; + u8 psv0_index[0x18]; + + u8 reserved_3[0x8]; + u8 psv1_index[0x18]; + + u8 reserved_4[0x8]; + u8 psv2_index[0x18]; + + u8 reserved_5[0x8]; + u8 psv3_index[0x18]; +}; + +struct mlx5_ifc_create_psv_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 num_psv[0x4]; + u8 reserved_2[0x4]; + u8 pd[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_create_mkey_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 mkey_index[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_mkey_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x20]; + + u8 pg_access[0x1]; + u8 reserved_3[0x1f]; + + struct mlx5_ifc_mkc_bits memory_key_mkey_entry; + + u8 reserved_4[0x80]; + + u8 translations_octword_actual_size[0x20]; + + u8 reserved_5[0x560]; + + u8 klm_pas_mtt[0][0x20]; +}; + +struct mlx5_ifc_create_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 table_id[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0x8]; + u8 level[0x8]; + u8 reserved_6[0x8]; + u8 log_size[0x8]; + + u8 reserved_7[0x120]; +}; + +struct mlx5_ifc_create_flow_group_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 group_id[0x18]; + + u8 reserved_2[0x20]; +}; + +enum { + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, +}; + +struct mlx5_ifc_create_flow_group_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x20]; + + u8 start_flow_index[0x20]; + + u8 reserved_6[0x20]; + + u8 end_flow_index[0x20]; + + u8 reserved_7[0xa0]; + + u8 reserved_8[0x18]; + u8 match_criteria_enable[0x8]; + + struct mlx5_ifc_fte_match_param_bits match_criteria; + + u8 reserved_9[0xe00]; +}; + +struct mlx5_ifc_create_eq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x18]; + u8 eq_number[0x8]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_eq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_eqc_bits eq_context_entry; + + u8 reserved_3[0x40]; + + u8 event_bitmask[0x40]; + + u8 reserved_4[0x580]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_dct_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 dctn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_dctc_bits dct_context_entry; + + u8 reserved_3[0x180]; +}; + +struct mlx5_ifc_create_cq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 cqn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_cqc_bits cq_context; + + u8 reserved_3[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_config_int_moderation_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x4]; + u8 min_delay[0xc]; + u8 int_vector[0x10]; + + u8 reserved_2[0x20]; +}; + +enum { + MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0, + MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1, +}; + +struct mlx5_ifc_config_int_moderation_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x4]; + u8 min_delay[0xc]; + u8 int_vector[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_attach_to_mcg_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_attach_to_mcg_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 multicast_gid[16][0x8]; +}; + +struct mlx5_ifc_arm_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +enum { + MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1, +}; + +struct mlx5_ifc_arm_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_3[0x10]; + u8 lwm[0x10]; +}; + +struct mlx5_ifc_arm_rq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +enum { + MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1, +}; + +struct mlx5_ifc_arm_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 srq_number[0x18]; + + u8 reserved_3[0x10]; + u8 lwm[0x10]; +}; + +struct mlx5_ifc_arm_dct_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_arm_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 dct_number[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_alloc_xrcd_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 xrcd[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_alloc_xrcd_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_alloc_uar_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 uar[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_alloc_uar_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_alloc_transport_domain_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 transport_domain[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_alloc_transport_domain_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_alloc_q_counter_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x18]; + u8 counter_set_id[0x8]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_alloc_q_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_alloc_pd_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 pd[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_alloc_pd_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_add_vxlan_udp_dport_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_add_vxlan_udp_dport_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x10]; + u8 vxlan_udp_port[0x10]; +}; + +struct mlx5_ifc_access_register_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 register_data[0][0x20]; +}; + +enum { + MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0, + MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1, +}; + +struct mlx5_ifc_access_register_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 register_id[0x10]; + + u8 argument[0x20]; + + u8 register_data[0][0x20]; +}; + +struct mlx5_ifc_sltp_reg_bits { + u8 status[0x4]; + u8 version[0x4]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_0[0x2]; + u8 lane[0x4]; + u8 reserved_1[0x8]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x7]; + u8 polarity[0x1]; + u8 ob_tap0[0x8]; + u8 ob_tap1[0x8]; + u8 ob_tap2[0x8]; + + u8 reserved_4[0xc]; + u8 ob_preemp_mode[0x4]; + u8 ob_reg[0x8]; + u8 ob_bias[0x8]; + + u8 reserved_5[0x20]; +}; + +struct mlx5_ifc_slrg_reg_bits { + u8 status[0x4]; + u8 version[0x4]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_0[0x2]; + u8 lane[0x4]; + u8 reserved_1[0x8]; + + u8 time_to_link_up[0x10]; + u8 reserved_2[0xc]; + u8 grade_lane_speed[0x4]; + + u8 grade_version[0x8]; + u8 grade[0x18]; + + u8 reserved_3[0x4]; + u8 height_grade_type[0x4]; + u8 height_grade[0x18]; + + u8 height_dz[0x10]; + u8 height_dv[0x10]; + + u8 reserved_4[0x10]; + u8 height_sigma[0x10]; + + u8 reserved_5[0x20]; + + u8 reserved_6[0x4]; + u8 phase_grade_type[0x4]; + u8 phase_grade[0x18]; + + u8 reserved_7[0x8]; + u8 phase_eo_pos[0x8]; + u8 reserved_8[0x8]; + u8 phase_eo_neg[0x8]; + + u8 ffe_set_tested[0x10]; + u8 test_errors_per_lane[0x10]; +}; + +struct mlx5_ifc_pvlc_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x1c]; + u8 vl_hw_cap[0x4]; + + u8 reserved_3[0x1c]; + u8 vl_admin[0x4]; + + u8 reserved_4[0x1c]; + u8 vl_operational[0x4]; +}; + +struct mlx5_ifc_pude_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 reserved_0[0x4]; + u8 admin_status[0x4]; + u8 reserved_1[0x4]; + u8 oper_status[0x4]; + + u8 reserved_2[0x60]; +}; + +struct mlx5_ifc_ptys_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0xd]; + u8 proto_mask[0x3]; + + u8 reserved_2[0x40]; + + u8 eth_proto_capability[0x20]; + + u8 ib_link_width_capability[0x10]; + u8 ib_proto_capability[0x10]; + + u8 reserved_3[0x20]; + + u8 eth_proto_admin[0x20]; + + u8 ib_link_width_admin[0x10]; + u8 ib_proto_admin[0x10]; + + u8 reserved_4[0x20]; + + u8 eth_proto_oper[0x20]; + + u8 ib_link_width_oper[0x10]; + u8 ib_proto_oper[0x10]; + + u8 reserved_5[0x20]; + + u8 eth_proto_lp_advertise[0x20]; + + u8 reserved_6[0x60]; +}; + +struct mlx5_ifc_ptas_reg_bits { + u8 reserved_0[0x20]; + + u8 algorithm_options[0x10]; + u8 reserved_1[0x4]; + u8 repetitions_mode[0x4]; + u8 num_of_repetitions[0x8]; + + u8 grade_version[0x8]; + u8 height_grade_type[0x4]; + u8 phase_grade_type[0x4]; + u8 height_grade_weight[0x8]; + u8 phase_grade_weight[0x8]; + + u8 gisim_measure_bits[0x10]; + u8 adaptive_tap_measure_bits[0x10]; + + u8 ber_bath_high_error_threshold[0x10]; + u8 ber_bath_mid_error_threshold[0x10]; + + u8 ber_bath_low_error_threshold[0x10]; + u8 one_ratio_high_threshold[0x10]; + + u8 one_ratio_high_mid_threshold[0x10]; + u8 one_ratio_low_mid_threshold[0x10]; + + u8 one_ratio_low_threshold[0x10]; + u8 ndeo_error_threshold[0x10]; + + u8 mixer_offset_step_size[0x10]; + u8 reserved_2[0x8]; + u8 mix90_phase_for_voltage_bath[0x8]; + + u8 mixer_offset_start[0x10]; + u8 mixer_offset_end[0x10]; + + u8 reserved_3[0x15]; + u8 ber_test_time[0xb]; +}; + +struct mlx5_ifc_pspa_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 sub_port[0x8]; + u8 reserved_0[0x8]; + + u8 reserved_1[0x20]; +}; + +struct mlx5_ifc_pqdr_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x5]; + u8 prio[0x3]; + u8 reserved_2[0x6]; + u8 mode[0x2]; + + u8 reserved_3[0x20]; + + u8 reserved_4[0x10]; + u8 min_threshold[0x10]; + + u8 reserved_5[0x10]; + u8 max_threshold[0x10]; + + u8 reserved_6[0x10]; + u8 mark_probability_denominator[0x10]; + + u8 reserved_7[0x60]; +}; + +struct mlx5_ifc_ppsc_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x60]; + + u8 reserved_3[0x1c]; + u8 wrps_admin[0x4]; + + u8 reserved_4[0x1c]; + u8 wrps_status[0x4]; + + u8 reserved_5[0x8]; + u8 up_threshold[0x8]; + u8 reserved_6[0x8]; + u8 down_threshold[0x8]; + + u8 reserved_7[0x20]; + + u8 reserved_8[0x1c]; + u8 srps_admin[0x4]; + + u8 reserved_9[0x1c]; + u8 srps_status[0x4]; + + u8 reserved_10[0x40]; +}; + +struct mlx5_ifc_pplr_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x8]; + u8 lb_cap[0x8]; + u8 reserved_3[0x8]; + u8 lb_en[0x8]; +}; + +struct mlx5_ifc_pplm_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x20]; + + u8 port_profile_mode[0x8]; + u8 static_port_profile[0x8]; + u8 active_port_profile[0x8]; + u8 reserved_3[0x8]; + + u8 retransmission_active[0x8]; + u8 fec_mode_active[0x18]; + + u8 reserved_4[0x20]; +}; + +struct mlx5_ifc_ppcnt_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_0[0x8]; + u8 grp[0x6]; + + u8 clr[0x1]; + u8 reserved_1[0x1c]; + u8 prio_tc[0x3]; + + union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; +}; + +struct mlx5_ifc_ppad_reg_bits { + u8 reserved_0[0x3]; + u8 single_mac[0x1]; + u8 reserved_1[0x4]; + u8 local_port[0x8]; + u8 mac_47_32[0x10]; + + u8 mac_31_0[0x20]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_pmtu_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 max_mtu[0x10]; + u8 reserved_2[0x10]; + + u8 admin_mtu[0x10]; + u8 reserved_3[0x10]; + + u8 oper_mtu[0x10]; + u8 reserved_4[0x10]; +}; + +struct mlx5_ifc_pmpr_reg_bits { + u8 reserved_0[0x8]; + u8 module[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x18]; + u8 attenuation_5g[0x8]; + + u8 reserved_3[0x18]; + u8 attenuation_7g[0x8]; + + u8 reserved_4[0x18]; + u8 attenuation_12g[0x8]; +}; + +struct mlx5_ifc_pmpe_reg_bits { + u8 reserved_0[0x8]; + u8 module[0x8]; + u8 reserved_1[0xc]; + u8 module_status[0x4]; + + u8 reserved_2[0x60]; +}; + +struct mlx5_ifc_pmpc_reg_bits { + u8 module_state_updated[32][0x8]; +}; + +struct mlx5_ifc_pmlpn_reg_bits { + u8 reserved_0[0x4]; + u8 mlpn_status[0x4]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 e[0x1]; + u8 reserved_2[0x1f]; +}; + +struct mlx5_ifc_pmlp_reg_bits { + u8 rxtx[0x1]; + u8 reserved_0[0x7]; + u8 local_port[0x8]; + u8 reserved_1[0x8]; + u8 width[0x8]; + + u8 lane0_module_mapping[0x20]; + + u8 lane1_module_mapping[0x20]; + + u8 lane2_module_mapping[0x20]; + + u8 lane3_module_mapping[0x20]; + + u8 reserved_2[0x160]; +}; + +struct mlx5_ifc_pmaos_reg_bits { + u8 reserved_0[0x8]; + u8 module[0x8]; + u8 reserved_1[0x4]; + u8 admin_status[0x4]; + u8 reserved_2[0x4]; + u8 oper_status[0x4]; + + u8 ase[0x1]; + u8 ee[0x1]; + u8 reserved_3[0x1c]; + u8 e[0x2]; + + u8 reserved_4[0x40]; +}; + +struct mlx5_ifc_plpc_reg_bits { + u8 reserved_0[0x4]; + u8 profile_id[0xc]; + u8 reserved_1[0x4]; + u8 proto_mask[0x4]; + u8 reserved_2[0x8]; + + u8 reserved_3[0x10]; + u8 lane_speed[0x10]; + + u8 reserved_4[0x17]; + u8 lpbf[0x1]; + u8 fec_mode_policy[0x8]; + + u8 retransmission_capability[0x8]; + u8 fec_mode_capability[0x18]; + + u8 retransmission_support_admin[0x8]; + u8 fec_mode_support_admin[0x18]; + + u8 retransmission_request_admin[0x8]; + u8 fec_mode_request_admin[0x18]; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_plib_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x8]; + u8 ib_port[0x8]; + + u8 reserved_2[0x60]; +}; + +struct mlx5_ifc_plbf_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0xd]; + u8 lbf_mode[0x3]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_pipg_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 dic[0x1]; + u8 reserved_2[0x19]; + u8 ipg[0x4]; + u8 reserved_3[0x2]; +}; + +struct mlx5_ifc_pifr_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0xe0]; + + u8 port_filter[8][0x20]; + + u8 port_filter_update_en[8][0x20]; +}; + +struct mlx5_ifc_pfcc_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 ppan[0x4]; + u8 reserved_2[0x4]; + u8 prio_mask_tx[0x8]; + u8 reserved_3[0x8]; + u8 prio_mask_rx[0x8]; + + u8 pptx[0x1]; + u8 aptx[0x1]; + u8 reserved_4[0x6]; + u8 pfctx[0x8]; + u8 reserved_5[0x10]; + + u8 pprx[0x1]; + u8 aprx[0x1]; + u8 reserved_6[0x6]; + u8 pfcrx[0x8]; + u8 reserved_7[0x10]; + + u8 reserved_8[0x80]; +}; + +struct mlx5_ifc_pelc_reg_bits { + u8 op[0x4]; + u8 reserved_0[0x4]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 op_admin[0x8]; + u8 op_capability[0x8]; + u8 op_request[0x8]; + u8 op_active[0x8]; + + u8 admin[0x40]; + + u8 capability[0x40]; + + u8 request[0x40]; + + u8 active[0x40]; + + u8 reserved_2[0x80]; +}; + +struct mlx5_ifc_peir_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0xc]; + u8 error_count[0x4]; + u8 reserved_3[0x10]; + + u8 reserved_4[0xc]; + u8 lane[0x4]; + u8 reserved_5[0x8]; + u8 error_type[0x8]; +}; + +struct mlx5_ifc_pcap_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 port_capability_mask[4][0x20]; +}; + +struct mlx5_ifc_paos_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 reserved_0[0x4]; + u8 admin_status[0x4]; + u8 reserved_1[0x4]; + u8 oper_status[0x4]; + + u8 ase[0x1]; + u8 ee[0x1]; + u8 reserved_2[0x1c]; + u8 e[0x2]; + + u8 reserved_3[0x40]; +}; + +struct mlx5_ifc_pamp_reg_bits { + u8 reserved_0[0x8]; + u8 opamp_group[0x8]; + u8 reserved_1[0xc]; + u8 opamp_group_type[0x4]; + + u8 start_index[0x10]; + u8 reserved_2[0x4]; + u8 num_of_indices[0xc]; + + u8 index_data[18][0x10]; +}; + +struct mlx5_ifc_lane_2_module_mapping_bits { + u8 reserved_0[0x6]; + u8 rx_lane[0x2]; + u8 reserved_1[0x6]; + u8 tx_lane[0x2]; + u8 reserved_2[0x8]; + u8 module[0x8]; +}; + +struct mlx5_ifc_bufferx_reg_bits { + u8 reserved_0[0x6]; + u8 lossy[0x1]; + u8 epsb[0x1]; + u8 reserved_1[0xc]; + u8 size[0xc]; + + u8 xoff_threshold[0x10]; + u8 xon_threshold[0x10]; +}; + +struct mlx5_ifc_set_node_in_bits { + u8 node_description[64][0x8]; +}; + +struct mlx5_ifc_register_power_settings_bits { + u8 reserved_0[0x18]; + u8 power_settings_level[0x8]; + + u8 reserved_1[0x60]; +}; + +struct mlx5_ifc_register_host_endianness_bits { + u8 he[0x1]; + u8 reserved_0[0x1f]; + + u8 reserved_1[0x60]; +}; + +struct mlx5_ifc_umr_pointer_desc_argument_bits { + u8 reserved_0[0x20]; + + u8 mkey[0x20]; + + u8 addressh_63_32[0x20]; + + u8 addressl_31_0[0x20]; +}; + +struct mlx5_ifc_ud_adrs_vector_bits { + u8 dc_key[0x40]; + + u8 ext[0x1]; + u8 reserved_0[0x7]; + u8 destination_qp_dct[0x18]; + + u8 static_rate[0x4]; + u8 sl_eth_prio[0x4]; + u8 fl[0x1]; + u8 mlid[0x7]; + u8 rlid_udp_sport[0x10]; + + u8 reserved_1[0x20]; + + u8 rmac_47_16[0x20]; + + u8 rmac_15_0[0x10]; + u8 tclass[0x8]; + u8 hop_limit[0x8]; + + u8 reserved_2[0x1]; + u8 grh[0x1]; + u8 reserved_3[0x2]; + u8 src_addr_index[0x8]; + u8 flow_label[0x14]; + + u8 rgid_rip[16][0x8]; +}; + +struct mlx5_ifc_pages_req_event_bits { + u8 reserved_0[0x10]; + u8 function_id[0x10]; + + u8 num_pages[0x20]; + + u8 reserved_1[0xa0]; +}; + +struct mlx5_ifc_eqe_bits { + u8 reserved_0[0x8]; + u8 event_type[0x8]; + u8 reserved_1[0x8]; + u8 event_sub_type[0x8]; + + u8 reserved_2[0xe0]; + + union mlx5_ifc_event_auto_bits event_data; + + u8 reserved_3[0x10]; + u8 signature[0x8]; + u8 reserved_4[0x7]; + u8 owner[0x1]; +}; + +enum { + MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7, +}; + +struct mlx5_ifc_cmd_queue_entry_bits { + u8 type[0x8]; + u8 reserved_0[0x18]; + + u8 input_length[0x20]; + + u8 input_mailbox_pointer_63_32[0x20]; + + u8 input_mailbox_pointer_31_9[0x17]; + u8 reserved_1[0x9]; + + u8 command_input_inline_data[16][0x8]; + + u8 command_output_inline_data[16][0x8]; + + u8 output_mailbox_pointer_63_32[0x20]; + + u8 output_mailbox_pointer_31_9[0x17]; + u8 reserved_2[0x9]; + + u8 output_length[0x20]; + + u8 token[0x8]; + u8 signature[0x8]; + u8 reserved_3[0x8]; + u8 status[0x7]; + u8 ownership[0x1]; +}; + +struct mlx5_ifc_cmd_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 command_output[0x20]; +}; + +struct mlx5_ifc_cmd_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 command[0][0x20]; +}; + +struct mlx5_ifc_cmd_if_box_bits { + u8 mailbox_data[512][0x8]; + + u8 reserved_0[0x180]; + + u8 next_pointer_63_32[0x20]; + + u8 next_pointer_31_10[0x16]; + u8 reserved_1[0xa]; + + u8 block_number[0x20]; + + u8 reserved_2[0x8]; + u8 token[0x8]; + u8 ctrl_signature[0x8]; + u8 signature[0x8]; +}; + +struct mlx5_ifc_mtt_bits { + u8 ptag_63_32[0x20]; + + u8 ptag_31_8[0x18]; + u8 reserved_0[0x6]; + u8 wr_en[0x1]; + u8 rd_en[0x1]; +}; + +enum { + MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, + MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, + MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2, +}; + +enum { + MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0, + MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1, + MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2, +}; + +enum { + MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10, +}; + +struct mlx5_ifc_initial_seg_bits { + u8 fw_rev_minor[0x10]; + u8 fw_rev_major[0x10]; + + u8 cmd_interface_rev[0x10]; + u8 fw_rev_subminor[0x10]; + + u8 reserved_0[0x40]; + + u8 cmdq_phy_addr_63_32[0x20]; + + u8 cmdq_phy_addr_31_12[0x14]; + u8 reserved_1[0x2]; + u8 nic_interface[0x2]; + u8 log_cmdq_size[0x4]; + u8 log_cmdq_stride[0x4]; + + u8 command_doorbell_vector[0x20]; + + u8 reserved_2[0xf00]; + + u8 initializing[0x1]; + u8 reserved_3[0x4]; + u8 nic_interface_supported[0x3]; + u8 reserved_4[0x18]; + + struct mlx5_ifc_health_buffer_bits health_buffer; + + u8 no_dram_nic_offset[0x20]; + + u8 reserved_5[0x6e40]; + + u8 reserved_6[0x1f]; + u8 clear_int[0x1]; + + u8 health_syndrome[0x8]; + u8 health_counter[0x18]; + + u8 reserved_7[0x17fc0]; +}; + +union mlx5_ifc_ports_control_registers_document_bits { + struct mlx5_ifc_bufferx_reg_bits bufferx_reg; + struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; + struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; + struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; + struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; + struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; + struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; + struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping; + struct mlx5_ifc_pamp_reg_bits pamp_reg; + struct mlx5_ifc_paos_reg_bits paos_reg; + struct mlx5_ifc_pcap_reg_bits pcap_reg; + struct mlx5_ifc_peir_reg_bits peir_reg; + struct mlx5_ifc_pelc_reg_bits pelc_reg; + struct mlx5_ifc_pfcc_reg_bits pfcc_reg; + struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; + struct mlx5_ifc_pifr_reg_bits pifr_reg; + struct mlx5_ifc_pipg_reg_bits pipg_reg; + struct mlx5_ifc_plbf_reg_bits plbf_reg; + struct mlx5_ifc_plib_reg_bits plib_reg; + struct mlx5_ifc_plpc_reg_bits plpc_reg; + struct mlx5_ifc_pmaos_reg_bits pmaos_reg; + struct mlx5_ifc_pmlp_reg_bits pmlp_reg; + struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg; + struct mlx5_ifc_pmpc_reg_bits pmpc_reg; + struct mlx5_ifc_pmpe_reg_bits pmpe_reg; + struct mlx5_ifc_pmpr_reg_bits pmpr_reg; + struct mlx5_ifc_pmtu_reg_bits pmtu_reg; + struct mlx5_ifc_ppad_reg_bits ppad_reg; + struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; + struct mlx5_ifc_pplm_reg_bits pplm_reg; + struct mlx5_ifc_pplr_reg_bits pplr_reg; + struct mlx5_ifc_ppsc_reg_bits ppsc_reg; + struct mlx5_ifc_pqdr_reg_bits pqdr_reg; + struct mlx5_ifc_pspa_reg_bits pspa_reg; + struct mlx5_ifc_ptas_reg_bits ptas_reg; + struct mlx5_ifc_ptys_reg_bits ptys_reg; + struct mlx5_ifc_pude_reg_bits pude_reg; + struct mlx5_ifc_pvlc_reg_bits pvlc_reg; + struct mlx5_ifc_slrg_reg_bits slrg_reg; + struct mlx5_ifc_sltp_reg_bits sltp_reg; + u8 reserved_0[0x60e0]; +}; + +union mlx5_ifc_debug_enhancements_document_bits { + struct mlx5_ifc_health_buffer_bits health_buffer; + u8 reserved_0[0x200]; +}; + +union mlx5_ifc_uplink_pci_interface_document_bits { + struct mlx5_ifc_initial_seg_bits initial_seg; + u8 reserved_0[0x20060]; }; #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 310b5f7fd6ae..f079fb1a31f7 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -134,13 +134,21 @@ enum { enum { MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, + MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2, MLX5_WQE_CTRL_SOLICITED = 1 << 1, }; enum { + MLX5_SEND_WQE_DS = 16, MLX5_SEND_WQE_BB = 64, }; +#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS) + +enum { + MLX5_SEND_WQE_MAX_WQEBBS = 16, +}; + enum { MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, @@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg { #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 +enum { + MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, + MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5, + MLX5_ETH_WQE_L3_CSUM = 1 << 6, + MLX5_ETH_WQE_L4_CSUM = 1 << 7, +}; + +struct mlx5_wqe_eth_seg { + u8 rsvd0[4]; + u8 cs_flags; + u8 rsvd1; + __be16 mss; + __be32 rsvd2; + __be16 inline_hdr_sz; + u8 inline_hdr_start[2]; +}; + struct mlx5_wqe_xrc_seg { __be32 xrc_srqn; u8 rsvd[12]; diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h new file mode 100644 index 000000000000..967e0fd06e89 --- /dev/null +++ b/include/linux/mlx5/vport.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_VPORT_H__ +#define __MLX5_VPORT_H__ + +#include + +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod); +void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr); +int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid); +int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey); +int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct mlx5_hca_vport_context *rep); +int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, + u64 *sys_image_guid); +int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, + u64 *node_guid); + +#endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 51f8d2f5dc3f..6f5f71ff5169 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1997,6 +1997,7 @@ struct offload_callbacks { struct packet_offload { __be16 type; /* This is really htons(ether_type). */ + u16 priority; struct offload_callbacks callbacks; struct list_head list; }; diff --git a/include/linux/of.h b/include/linux/of.h index ddeaae6d2083..b871ff9d81d7 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -121,6 +121,8 @@ extern struct device_node *of_stdout; extern raw_spinlock_t devtree_lock; #ifdef CONFIG_OF +void of_core_init(void); + static inline bool is_of_node(struct fwnode_handle *fwnode) { return fwnode && fwnode->type == FWNODE_OF; @@ -376,6 +378,10 @@ bool of_console_check(struct device_node *dn, char *name, int index); #else /* CONFIG_OF */ +static inline void of_core_init(void) +{ +} + static inline bool is_of_node(struct fwnode_handle *fwnode) { return false; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2f7b9a40f627..2972c7f3aa1d 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2329,6 +2329,8 @@ #define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea #define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb +#define PCI_VENDOR_ID_CAVIUM 0x177d + #define PCI_VENDOR_ID_BELKIN 0x1799 #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 50e50095c8d1..84a109449610 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -41,7 +41,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc); void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); -int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); +int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); + +static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) +{ + return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); +} static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { @@ -116,6 +121,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) return 0; } +static inline int +__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) +{ + return percpu_counter_compare(fbc, rhs); +} + static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 61992cf2e977..d8a82a89f35a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -92,8 +92,6 @@ struct hw_perf_event_extra { int idx; /* index in shared_regs->regs[] */ }; -struct event_constraint; - /** * struct hw_perf_event - performance event hardware details: */ @@ -112,8 +110,6 @@ struct hw_perf_event { struct hw_perf_event_extra extra_reg; struct hw_perf_event_extra branch_reg; - - struct event_constraint *constraint; }; struct { /* software */ struct hrtimer hrtimer; diff --git a/include/linux/phy.h b/include/linux/phy.h index 701c7a3946e0..a26c3f84b8dd 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -677,6 +677,17 @@ static inline bool phy_is_internal(struct phy_device *phydev) return phydev->is_internal; } +/** + * phy_interface_is_rgmii - Convenience function for testing if a PHY interface + * is RGMII (all variants) + * @phydev: the phy_device struct + */ +static inline bool phy_interface_is_rgmii(struct phy_device *phydev) +{ + return phydev->interface >= PHY_INTERFACE_MODE_RGMII && + phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID; +} + /** * phy_write_mmd - Convenience function for writing a register * on an MMD on a given PHY. diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6b41c15efa27..cc612fc0a894 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1943,7 +1943,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb, if (skb_transport_header_was_set(skb)) return; else if (skb_flow_dissect_flow_keys(skb, &keys)) - skb_set_transport_header(skb, keys.basic.thoff); + skb_set_transport_header(skb, keys.control.thoff); else skb_set_transport_header(skb, offset_hint); } diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 7f484a239f53..c735f5c91eea 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -99,6 +99,7 @@ struct plat_stmmacenet_data { int phy_addr; int interface; struct stmmac_mdio_bus_data *mdio_bus_data; + struct device_node *phy_node; struct stmmac_dma_cfg *dma_cfg; int clk_csr; int has_gmac; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index d63ecec73090..a741678f24a2 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4575,13 +4575,15 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss, * @ie: information elements of the deauth/disassoc frame (may be %NULL) * @ie_len: length of IEs * @reason: reason code for the disconnection, set it to 0 if unknown + * @locally_generated: disconnection was requested locally * @gfp: allocation flags * * After it calls this function, the driver should enter an idle state * and not try to connect to any AP any more. */ void cfg80211_disconnected(struct net_device *dev, u16 reason, - const u8 *ie, size_t ie_len, gfp_t gfp); + const u8 *ie, size_t ie_len, + bool locally_generated, gfp_t gfp); /** * cfg80211_ready_on_channel - notification of remain_on_channel start diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 6ea16c84293b..290a9a69af07 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -44,6 +44,8 @@ struct cfg802154_ops { int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel); int (*set_cca_mode)(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca); + int (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level); + int (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power); int (*set_pan_id)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 pan_id); int (*set_short_addr)(struct wpan_phy *wpan_phy, @@ -61,14 +63,66 @@ struct cfg802154_ops { struct wpan_dev *wpan_dev, bool mode); }; +static inline bool +wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st) +{ + switch (st) { + case NL802154_SUPPORTED_BOOL_TRUE: + return b; + case NL802154_SUPPORTED_BOOL_FALSE: + return !b; + case NL802154_SUPPORTED_BOOL_BOTH: + return true; + default: + WARN_ON(1); + } + + return false; +} + +struct wpan_phy_supported { + u32 channels[IEEE802154_MAX_PAGE + 1], + cca_modes, cca_opts, iftypes; + enum nl802154_supported_bool_states lbt; + u8 min_minbe, max_minbe, min_maxbe, max_maxbe, + min_csma_backoffs, max_csma_backoffs; + s8 min_frame_retries, max_frame_retries; + size_t tx_powers_size, cca_ed_levels_size; + const s32 *tx_powers, *cca_ed_levels; +}; + struct wpan_phy_cca { enum nl802154_cca_modes mode; enum nl802154_cca_opts opt; }; -struct wpan_phy { - struct mutex pib_lock; +static inline bool +wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b) +{ + if (a->mode != b->mode) + return false; + + if (a->mode == NL802154_CCA_ENERGY_CARRIER) + return a->opt == b->opt; + return true; +} + +/** + * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support + * transmit power setting. + * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed + * level setting. + * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode + * setting. + */ +enum wpan_phy_flags { + WPAN_PHY_FLAG_TXPOWER = BIT(1), + WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2), + WPAN_PHY_FLAG_CCA_MODE = BIT(3), +}; + +struct wpan_phy { /* If multiple wpan_phys are registered and you're handed e.g. * a regular netdev with assigned ieee802154_ptr, you won't * know whether it points to a wpan_phy your driver has registered @@ -77,6 +131,8 @@ struct wpan_phy { */ const void *privid; + u32 flags; + /* * This is a PIB according to 802.15.4-2011. * We do not provide timing-related variables, as they @@ -84,12 +140,14 @@ struct wpan_phy { */ u8 current_channel; u8 current_page; - u32 channels_supported[IEEE802154_MAX_PAGE + 1]; - s8 transmit_power; + struct wpan_phy_supported supported; + /* current transmit_power in mBm */ + s32 transmit_power; struct wpan_phy_cca cca; __le64 perm_extended_addr; + /* current cca ed threshold in mBm */ s32 cca_ed_level; /* PHY depended MAC PIB values */ @@ -121,9 +179,9 @@ struct wpan_dev { __le64 extended_addr; /* MAC BSN field */ - u8 bsn; + atomic_t bsn; /* MAC DSN field */ - u8 dsn; + atomic_t dsn; u8 min_be; u8 max_be; diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index bac9c1421f58..1a8c22419936 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -6,6 +6,15 @@ #include #include +/** + * struct flow_dissector_key_control: + * @thoff: Transport header offset + */ +struct flow_dissector_key_control { + u16 thoff; + u16 addr_type; +}; + /** * struct flow_dissector_key_basic: * @thoff: Transport header offset @@ -13,24 +22,63 @@ * @ip_proto: Transport header protocol (eg. TCP/UDP) */ struct flow_dissector_key_basic { - u16 thoff; __be16 n_proto; u8 ip_proto; + u8 padding; +}; + +struct flow_dissector_key_tags { + u32 vlan_id:12, + flow_label:20; +}; + +struct flow_dissector_key_keyid { + __be32 keyid; }; /** - * struct flow_dissector_key_addrs: - * @src: source ip address in case of IPv4 - * For IPv6 it contains 32bit hash of src address - * @dst: destination ip address in case of IPv4 - * For IPv6 it contains 32bit hash of dst address + * struct flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address */ -struct flow_dissector_key_addrs { +struct flow_dissector_key_ipv4_addrs { /* (src,dst) must be grouped, in the same way than in IP header */ __be32 src; __be32 dst; }; +/** + * struct flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct flow_dissector_key_ipv6_addrs { + /* (src,dst) must be grouped, in the same way than in IP header */ + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * struct flow_dissector_key_tipc_addrs: + * @srcnode: source node address + */ +struct flow_dissector_key_tipc_addrs { + __be32 srcnode; +}; + +/** + * struct flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc_addrs tipcaddrs; + }; +}; + /** * flow_dissector_key_tp_ports: * @ports: port numbers of Transport header @@ -47,16 +95,6 @@ struct flow_dissector_key_ports { }; }; -/** - * struct flow_dissector_key_ipv6_addrs: - * @src: source ip address - * @dst: destination ip address - */ -struct flow_dissector_key_ipv6_addrs { - /* (src,dst) must be grouped, in the same way than in IP header */ - struct in6_addr src; - struct in6_addr dst; -}; /** * struct flow_dissector_key_eth_addrs: @@ -70,12 +108,17 @@ struct flow_dissector_key_eth_addrs { }; enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ - FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_addrs */ - FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, /* struct flow_dissector_key_addrs */ - FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ + FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */ FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */ + FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ + FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ + FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */ + FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ + FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ + FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */ FLOW_DISSECTOR_KEY_MAX, }; @@ -109,11 +152,21 @@ static inline bool skb_flow_dissect(const struct sk_buff *skb, } struct flow_keys { - struct flow_dissector_key_addrs addrs; - struct flow_dissector_key_ports ports; + struct flow_dissector_key_control control; +#define FLOW_KEYS_HASH_START_FIELD basic struct flow_dissector_key_basic basic; + struct flow_dissector_key_tags tags; + struct flow_dissector_key_keyid keyid; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_addrs addrs; }; +#define FLOW_KEYS_HASH_OFFSET \ + offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD) + +__be32 flow_get_u32_src(const struct flow_keys *flow); +__be32 flow_get_u32_dst(const struct flow_keys *flow); + extern struct flow_dissector flow_keys_dissector; extern struct flow_dissector flow_keys_buf_dissector; diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 94a297052442..0a87975128ec 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -422,16 +422,6 @@ struct ieee802154_mlme_ops { struct ieee802154_mac_params *params); struct ieee802154_llsec_ops *llsec; - - /* The fields below are required. */ - - /* - * FIXME: these should become the part of PIB/MIB interface. - * However we still don't have IB interface of any kind - */ - __le16 (*get_pan_id)(const struct net_device *dev); - __le16 (*get_short_addr)(const struct net_device *dev); - u8 (*get_dsn)(const struct net_device *dev); }; static inline struct ieee802154_mlme_ops * @@ -440,10 +430,4 @@ ieee802154_mlme_ops(const struct net_device *dev) return dev->ml_priv; } -static inline struct ieee802154_reduced_mlme_ops * -ieee802154_reduced_mlme_ops(const struct net_device *dev) -{ - return dev->ml_priv; -} - #endif diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 497bc14cdb85..0320bbb7d7b5 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -98,7 +98,8 @@ struct inet_connection_sock { const struct tcp_congestion_ops *icsk_ca_ops; const struct inet_connection_sock_af_ops *icsk_af_ops; unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); - __u8 icsk_ca_state:7, + __u8 icsk_ca_state:6, + icsk_ca_setsockopt:1, icsk_ca_dst_locked:1; __u8 icsk_retransmits; __u8 icsk_pending; diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 8d1765577acc..e1300b3dd597 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -43,7 +43,7 @@ enum { * @len: total length of the original datagram * @meat: length of received fragments so far * @flags: fragment queue flags - * @max_size: (ipv4 only) maximum received fragment size with IP_DF set + * @max_size: maximum received fragment size * @net: namespace that this frag belongs to */ struct inet_frag_queue { diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 774d24151d4a..b73c88a19dd4 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -164,52 +163,12 @@ static inline spinlock_t *inet_ehash_lockp( return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; } -static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) -{ - unsigned int i, size = 256; -#if defined(CONFIG_PROVE_LOCKING) - unsigned int nr_pcpus = 2; -#else - unsigned int nr_pcpus = num_possible_cpus(); -#endif - if (nr_pcpus >= 4) - size = 512; - if (nr_pcpus >= 8) - size = 1024; - if (nr_pcpus >= 16) - size = 2048; - if (nr_pcpus >= 32) - size = 4096; - if (sizeof(spinlock_t) != 0) { -#ifdef CONFIG_NUMA - if (size * sizeof(spinlock_t) > PAGE_SIZE) - hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t)); - else -#endif - hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t), - GFP_KERNEL); - if (!hashinfo->ehash_locks) - return ENOMEM; - for (i = 0; i < size; i++) - spin_lock_init(&hashinfo->ehash_locks[i]); - } - hashinfo->ehash_locks_mask = size - 1; - return 0; -} +int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) { - if (hashinfo->ehash_locks) { -#ifdef CONFIG_NUMA - unsigned int size = (hashinfo->ehash_locks_mask + 1) * - sizeof(spinlock_t); - if (size > PAGE_SIZE) - vfree(hashinfo->ehash_locks); - else -#endif - kfree(hashinfo->ehash_locks); - hashinfo->ehash_locks = NULL; - } + kvfree(hashinfo->ehash_locks); + hashinfo->ehash_locks = NULL; } struct inet_bind_bucket * diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index b6c3737da4e9..47eb67b08abd 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -187,6 +187,7 @@ struct inet_sock { transparent:1, mc_all:1, nodefrag:1; + __u8 bind_address_no_port:1; __u8 rcv_tos; __u8 convert_csum; int uc_index; diff --git a/include/net/ip.h b/include/net/ip.h index 7921a36b805c..0750a186ea63 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -45,6 +45,7 @@ struct inet_skb_parm { #define IPSKB_FRAG_COMPLETE BIT(3) #define IPSKB_REROUTED BIT(4) #define IPSKB_DOREDIRECT BIT(5) +#define IPSKB_FRAG_PMTU BIT(6) u16 frag_max_size; }; @@ -354,13 +355,30 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) skb->len, proto, 0); } +/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store + * Equivalent to : flow->v4addrs.src = iph->saddr; + * flow->v4addrs.dst = iph->daddr; + */ +static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, + const struct iphdr *iph) +{ + BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != + offsetof(typeof(flow->addrs), v4addrs.src) + + sizeof(flow->addrs.v4addrs.src)); + memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs)); + flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; +} + static inline void inet_set_txhash(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct flow_keys keys; - keys.addrs.src = inet->inet_saddr; - keys.addrs.dst = inet->inet_daddr; + memset(&keys, 0, sizeof(keys)); + + keys.addrs.v4addrs.src = inet->inet_saddr; + keys.addrs.v4addrs.dst = inet->inet_daddr; + keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; keys.ports.src = inet->inet_sport; keys.ports.dst = inet->inet_dport; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 35d485c78080..82dbdb092a5d 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -692,6 +692,20 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, return hlimit; } +/* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store + * Equivalent to : flow->v6addrs.src = iph->saddr; + * flow->v6addrs.dst = iph->daddr; + */ +static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow, + const struct ipv6hdr *iph) +{ + BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) != + offsetof(typeof(flow->addrs), v6addrs.src) + + sizeof(flow->addrs.v6addrs.src)); + memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs)); + flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; +} + #if IS_ENABLED(CONFIG_IPV6) static inline void ip6_set_txhash(struct sock *sk) { @@ -699,8 +713,13 @@ static inline void ip6_set_txhash(struct sock *sk) struct ipv6_pinfo *np = inet6_sk(sk); struct flow_keys keys; - keys.addrs.src = (__force __be32)ipv6_addr_hash(&np->saddr); - keys.addrs.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr); + memset(&keys, 0, sizeof(keys)); + + memcpy(&keys.addrs.v6addrs.src, &np->saddr, + sizeof(keys.addrs.v6addrs.src)); + memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr, + sizeof(keys.addrs.v6addrs.dst)); + keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; keys.ports.src = inet->inet_sport; keys.ports.dst = inet->inet_dport; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 2f8b7decace0..887fe95b9805 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1728,8 +1728,7 @@ struct ieee80211_tx_control { * @sta: station table entry, %NULL for per-vif queue * @tid: the TID for this queue (unused for per-vif queue) * @ac: the AC for this queue - * @drv_priv: data area for driver use, will always be aligned to - * sizeof(void *). + * @drv_priv: driver private area, sized by hw->txq_data_size * * The driver can obtain packets from this queue by calling * ieee80211_tx_dequeue(). diff --git a/include/net/mac802154.h b/include/net/mac802154.h index 7df28a4c23f9..9605c7f7453f 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -89,41 +89,26 @@ struct ieee802154_hw { #define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001 /* Indicates that receiver will autorespond with ACK frames. */ #define IEEE802154_HW_AACK 0x00000002 -/* Indicates that transceiver will support transmit power setting. */ -#define IEEE802154_HW_TXPOWER 0x00000004 /* Indicates that transceiver will support listen before transmit. */ -#define IEEE802154_HW_LBT 0x00000008 -/* Indicates that transceiver will support cca mode setting. */ -#define IEEE802154_HW_CCA_MODE 0x00000010 -/* Indicates that transceiver will support cca ed level setting. */ -#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020 +#define IEEE802154_HW_LBT 0x00000004 /* Indicates that transceiver will support csma (max_be, min_be, csma retries) * settings. */ -#define IEEE802154_HW_CSMA_PARAMS 0x00000040 +#define IEEE802154_HW_CSMA_PARAMS 0x00000008 /* Indicates that transceiver will support ARET frame retries setting. */ -#define IEEE802154_HW_FRAME_RETRIES 0x00000080 +#define IEEE802154_HW_FRAME_RETRIES 0x00000010 /* Indicates that transceiver will support hardware address filter setting. */ -#define IEEE802154_HW_AFILT 0x00000100 +#define IEEE802154_HW_AFILT 0x00000020 /* Indicates that transceiver will support promiscuous mode setting. */ -#define IEEE802154_HW_PROMISCUOUS 0x00000200 +#define IEEE802154_HW_PROMISCUOUS 0x00000040 /* Indicates that receiver omits FCS. */ -#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400 +#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000080 /* Indicates that receiver will not filter frames with bad checksum. */ -#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800 +#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000100 /* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ #define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \ IEEE802154_HW_RX_OMIT_CKSUM) -/* This groups the most common CSMA support fields into one. */ -#define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \ - IEEE802154_HW_CCA_ED_LEVEL | \ - IEEE802154_HW_CSMA_PARAMS) - -/* This groups the most common ARET support fields into one. */ -#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \ - IEEE802154_HW_FRAME_RETRIES) - /* struct ieee802154_ops - callbacks from mac802154 to the driver * * This structure contains various callbacks that the driver may @@ -171,7 +156,7 @@ struct ieee802154_hw { * Returns either zero, or negative errno. * * set_txpower: - * Set radio transmit power in dB. Called with pib_lock held. + * Set radio transmit power in mBm. Called with pib_lock held. * Returns either zero, or negative errno. * * set_lbt @@ -184,7 +169,7 @@ struct ieee802154_hw { * Returns either zero, or negative errno. * * set_cca_ed_level - * Sets the CCA energy detection threshold in dBm. Called with pib_lock + * Sets the CCA energy detection threshold in mBm. Called with pib_lock * held. * Returns either zero, or negative errno. * @@ -213,12 +198,11 @@ struct ieee802154_ops { int (*set_hw_addr_filt)(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed); - int (*set_txpower)(struct ieee802154_hw *hw, s8 dbm); + int (*set_txpower)(struct ieee802154_hw *hw, s32 mbm); int (*set_lbt)(struct ieee802154_hw *hw, bool on); int (*set_cca_mode)(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca); - int (*set_cca_ed_level)(struct ieee802154_hw *hw, - s32 level); + int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm); int (*set_csma_params)(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries); int (*set_frame_retries)(struct ieee802154_hw *hw, diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index e6bcf55dcf20..3d6f48ca40a7 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -819,6 +819,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, * @use: number of chain references to this table * @flags: table flag (see enum nft_table_flags) * @name: name of the table + * @dev: this table is bound to this device (if any) */ struct nft_table { struct list_head list; @@ -828,6 +829,11 @@ struct nft_table { u32 use; u16 flags; char name[NFT_TABLE_MAXNAMELEN]; + struct net_device *dev; +}; + +enum nft_af_flags { + NFT_AF_NEEDS_DEV = (1 << 0), }; /** @@ -838,6 +844,7 @@ struct nft_table { * @nhooks: number of hooks in this family * @owner: module owner * @tables: used internally + * @flags: family flags * @nops: number of hook ops in this family * @hook_ops_init: initialization function for chain hook ops * @hooks: hookfn overrides for packet validation @@ -848,6 +855,7 @@ struct nft_af_info { unsigned int nhooks; struct module *owner; struct list_head tables; + u32 flags; unsigned int nops; void (*hook_ops_init)(struct nf_hook_ops *, unsigned int); diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 6848b8bb2e63..c68926b4899c 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -19,6 +19,7 @@ struct sock; struct local_ports { seqlock_t lock; int range[2]; + bool warned; }; struct ping_group_range { diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index eee608b12cc9..c80781146019 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -13,6 +13,7 @@ struct netns_nftables { struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; + struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; }; diff --git a/include/net/nl802154.h b/include/net/nl802154.h index f8b5bc997959..0badebd1de7f 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -100,6 +100,8 @@ enum nl802154_attrs { NL802154_ATTR_EXTENDED_ADDR, + NL802154_ATTR_WPAN_PHY_CAPS, + /* add attributes here, update the policy in nl802154.c */ __NL802154_ATTR_AFTER_LAST, @@ -119,6 +121,61 @@ enum nl802154_iftype { NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1 }; +/** + * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes + * + * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved + * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr + * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for + * nl802154_wpan_phy_tx_power + * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level + * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level + * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags + * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags + * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value + * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value + * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value + * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value + * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value + * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value + * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value + * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value + * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags + * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags + * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined + * @__NL802154_CAP_ATTR_AFTER_LAST: internal use + */ +enum nl802154_wpan_phy_capability_attr { + __NL802154_CAP_ATTR_INVALID, + + NL802154_CAP_ATTR_IFTYPES, + + NL802154_CAP_ATTR_CHANNELS, + NL802154_CAP_ATTR_TX_POWERS, + + NL802154_CAP_ATTR_CCA_ED_LEVELS, + NL802154_CAP_ATTR_CCA_MODES, + NL802154_CAP_ATTR_CCA_OPTS, + + NL802154_CAP_ATTR_MIN_MINBE, + NL802154_CAP_ATTR_MAX_MINBE, + + NL802154_CAP_ATTR_MIN_MAXBE, + NL802154_CAP_ATTR_MAX_MAXBE, + + NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS, + NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS, + + NL802154_CAP_ATTR_MIN_FRAME_RETRIES, + NL802154_CAP_ATTR_MAX_FRAME_RETRIES, + + NL802154_CAP_ATTR_LBT, + + /* keep last */ + __NL802154_CAP_ATTR_AFTER_LAST, + NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1 +}; + /** * enum nl802154_cca_modes - cca modes * @@ -162,4 +219,26 @@ enum nl802154_cca_opts { NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1 }; +/** + * enum nl802154_supported_bool_states - bool states for bool capability entry + * + * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false + * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true + * @__NL802154_SUPPORTED_BOOL_INVALD: reserved + * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false + * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal + * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states + */ +enum nl802154_supported_bool_states { + NL802154_SUPPORTED_BOOL_FALSE, + NL802154_SUPPORTED_BOOL_TRUE, + /* to handle them in a mask */ + __NL802154_SUPPORTED_BOOL_INVALD, + NL802154_SUPPORTED_BOOL_BOTH, + + /* keep last */ + __NL802154_SUPPORTED_BOOL_AFTER_LAST, + NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1 +}; + #endif /* __NL802154_H */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index c56a438c3a1e..ce13cf20f625 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -574,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr) /* Map v4 address to v4-mapped v6 address */ static inline void sctp_v4_map_v6(union sctp_addr *addr) { + __be16 port; + + port = addr->v4.sin_port; + addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; + addr->v6.sin6_port = port; addr->v6.sin6_family = AF_INET6; addr->v6.sin6_flowinfo = 0; addr->v6.sin6_scope_id = 0; - addr->v6.sin6_port = addr->v4.sin_port; - addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; addr->v6.sin6_addr.s6_addr32[0] = 0; addr->v6.sin6_addr.s6_addr32[1] = 0; addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); diff --git a/include/net/tcp.h b/include/net/tcp.h index 2bb2bad21d5c..978cebedd3fc 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -469,6 +469,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); /* From syncookies.c */ +struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst); int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, u32 cookie); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h index 53a18b3635e2..df705908480a 100644 --- a/include/sound/hda_regmap.h +++ b/include/sound/hda_regmap.h @@ -9,6 +9,8 @@ #include #include +#define AC_AMP_FAKE_MUTE 0x10 /* fake mute bit set to amp verbs */ + int snd_hdac_regmap_init(struct hdac_device *codec); void snd_hdac_regmap_exit(struct hdac_device *codec); int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec, diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index d61be7297b2c..5f1225706993 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -1,9 +1,7 @@ #ifndef TARGET_CORE_BACKEND_H #define TARGET_CORE_BACKEND_H -#define TRANSPORT_PLUGIN_PHBA_PDEV 1 -#define TRANSPORT_PLUGIN_VHBA_PDEV 2 -#define TRANSPORT_PLUGIN_VHBA_VDEV 3 +#define TRANSPORT_FLAG_PASSTHROUGH 1 struct target_backend_cits { struct config_item_type tb_dev_cit; @@ -22,7 +20,7 @@ struct se_subsystem_api { char inquiry_rev[4]; struct module *owner; - u8 transport_type; + u8 transport_flags; int (*attach_hba)(struct se_hba *, u32); void (*detach_hba)(struct se_hba *); @@ -138,5 +136,7 @@ int se_dev_set_queue_depth(struct se_device *, u32); int se_dev_set_max_sectors(struct se_device *, u32); int se_dev_set_optimal_sectors(struct se_device *, u32); int se_dev_set_block_size(struct se_device *, u32); +sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, + sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); #endif /* TARGET_CORE_BACKEND_H */ diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h index 25bb04c4209e..b99c01170392 100644 --- a/include/target/target_core_configfs.h +++ b/include/target/target_core_configfs.h @@ -40,8 +40,6 @@ struct target_fabric_configfs { struct config_item *tf_fabric; /* Passed from fabric modules */ struct config_item_type *tf_fabric_cit; - /* Pointer to target core subsystem */ - struct configfs_subsystem *tf_subsys; /* Pointer to fabric's struct module */ struct module *tf_module; struct target_core_fabric_ops tf_ops; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 17c7f5ac7ea0..0f4dc3768587 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -4,7 +4,6 @@ struct target_core_fabric_ops { struct module *module; const char *name; - struct configfs_subsystem *tf_subsys; char *(*get_fabric_name)(void); u8 (*get_fabric_proto_ident)(struct se_portal_group *); char *(*tpg_get_wwn)(struct se_portal_group *); @@ -109,6 +108,9 @@ struct target_core_fabric_ops { int target_register_template(const struct target_core_fabric_ops *fo); void target_unregister_template(const struct target_core_fabric_ops *fo); +int target_depend_item(struct config_item *item); +void target_undepend_item(struct config_item *item); + struct se_session *transport_init_session(enum target_prot_op); int transport_alloc_session_tags(struct se_session *, unsigned int, unsigned int); diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 81ea59812117..f7554fd7fc62 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -140,19 +140,42 @@ DEFINE_EVENT(kmem_free, kfree, TP_ARGS(call_site, ptr) ); -DEFINE_EVENT(kmem_free, kmem_cache_free, +DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free, TP_PROTO(unsigned long call_site, const void *ptr), - TP_ARGS(call_site, ptr) + TP_ARGS(call_site, ptr), + + /* + * This trace can be potentially called from an offlined cpu. + * Since trace points use RCU and RCU should not be used from + * offline cpus, filter such calls out. + * While this trace can be called from a preemptable section, + * it has no impact on the condition since tasks can migrate + * only from online cpus to other online cpus. Thus its safe + * to use raw_smp_processor_id. + */ + TP_CONDITION(cpu_online(raw_smp_processor_id())) ); -TRACE_EVENT(mm_page_free, +TRACE_EVENT_CONDITION(mm_page_free, TP_PROTO(struct page *page, unsigned int order), TP_ARGS(page, order), + + /* + * This trace can be potentially called from an offlined cpu. + * Since trace points use RCU and RCU should not be used from + * offline cpus, filter such calls out. + * While this trace can be called from a preemptable section, + * it has no impact on the condition since tasks can migrate + * only from online cpus to other online cpus. Thus its safe + * to use raw_smp_processor_id. + */ + TP_CONDITION(cpu_online(raw_smp_processor_id())), + TP_STRUCT__entry( __field( unsigned long, pfn ) __field( unsigned int, order ) @@ -253,12 +276,35 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, TP_ARGS(page, order, migratetype) ); -DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, +TRACE_EVENT_CONDITION(mm_page_pcpu_drain, TP_PROTO(struct page *page, unsigned int order, int migratetype), TP_ARGS(page, order, migratetype), + /* + * This trace can be potentially called from an offlined cpu. + * Since trace points use RCU and RCU should not be used from + * offline cpus, filter such calls out. + * While this trace can be called from a preemptable section, + * it has no impact on the condition since tasks can migrate + * only from online cpus to other online cpus. Thus its safe + * to use raw_smp_processor_id. + */ + TP_CONDITION(cpu_online(raw_smp_processor_id())), + + TP_STRUCT__entry( + __field( unsigned long, pfn ) + __field( unsigned int, order ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->pfn = page ? page_to_pfn(page) : -1UL; + __entry->order = order; + __entry->migratetype = migratetype; + ), + TP_printk("page=%p pfn=%lu order=%d migratetype=%d", pfn_to_page(__entry->pfn), __entry->pfn, __entry->order, __entry->migratetype) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 880dd7437172..c178d13d6f4c 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -250,7 +250,6 @@ DEFINE_EVENT(writeback_class, name, \ DEFINE_WRITEBACK_EVENT(writeback_nowork); DEFINE_WRITEBACK_EVENT(writeback_wake_background); DEFINE_WRITEBACK_EVENT(writeback_bdi_register); -DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 871e73f99a4d..94d44ab2fda1 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -1038,6 +1038,7 @@ struct drm_radeon_cs { #define RADEON_INFO_CURRENT_GPU_SCLK 0x22 #define RADEON_INFO_CURRENT_GPU_MCLK 0x23 #define RADEON_INFO_READ_REG 0x24 +#define RADEON_INFO_VA_UNMAP_WORKING 0x25 struct drm_radeon_info { uint32_t request; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index f0a9af8b4dae..602f05b7a275 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -220,6 +220,16 @@ enum bpf_func_id { * Return: 0 on success */ BPF_FUNC_tail_call, + + /** + * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev + * @skb: pointer to skb + * @ifindex: ifindex of the net device + * @flags: bit 0 - if set, redirect to ingress instead of egress + * other bits - reserved + * Return: 0 on success + */ + BPF_FUNC_clone_redirect, __BPF_FUNC_MAX_ID, }; @@ -236,6 +246,10 @@ struct __sk_buff { __u32 vlan_tci; __u32 vlan_proto; __u32 priority; + __u32 ingress_ifindex; + __u32 ifindex; + __u32 tc_index; + __u32 cb[5]; }; #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index ae832b45b44c..0594933cdf55 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -796,6 +796,31 @@ struct ethtool_rx_flow_spec { __u32 location; }; +/* How rings are layed out when accessing virtual functions or + * offloaded queues is device specific. To allow users to do flow + * steering and specify these queues the ring cookie is partitioned + * into a 32bit queue index with an 8 bit virtual function id. + * This also leaves the 3bytes for further specifiers. It is possible + * future devices may support more than 256 virtual functions if + * devices start supporting PCIe w/ARI. However at the moment I + * do not know of any devices that support this so I do not reserve + * space for this at this time. If a future patch consumes the next + * byte it should be aware of this possiblity. + */ +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; + /** * struct ethtool_rxnfc - command to get or set RX flow classification rules * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index afccc9393fef..1737b7a8272b 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -395,6 +395,8 @@ enum { IFLA_GENEVE_UNSPEC, IFLA_GENEVE_ID, IFLA_GENEVE_REMOTE, + IFLA_GENEVE_TTL, + IFLA_GENEVE_TOS, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 589ced069e8a..83d6236a2f08 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -69,6 +69,8 @@ enum { #define IPPROTO_SCTP IPPROTO_SCTP IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */ #define IPPROTO_UDPLITE IPPROTO_UDPLITE + IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */ +#define IPPROTO_MPLS IPPROTO_MPLS IPPROTO_RAW = 255, /* Raw IP packets */ #define IPPROTO_RAW IPPROTO_RAW IPPROTO_MAX @@ -110,6 +112,7 @@ struct in_addr { #define IP_MINTTL 21 #define IP_NODEFRAG 22 #define IP_CHECKSUM 23 +#define IP_BIND_ADDRESS_NO_PORT 24 /* IP_MTU_DISCOVER values */ #define IP_PMTUDISC_DONT 0 /* Never send DF frames */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 5fa1cd04762e..89a671e0f5e7 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -146,12 +146,14 @@ enum nft_table_flags { * @NFTA_TABLE_NAME: name of the table (NLA_STRING) * @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32) * @NFTA_TABLE_USE: number of chains in this table (NLA_U32) + * @NFTA_TABLE_DEV: net device name (NLA_STRING) */ enum nft_table_attributes { NFTA_TABLE_UNSPEC, NFTA_TABLE_NAME, NFTA_TABLE_FLAGS, NFTA_TABLE_USE, + NFTA_TABLE_DEV, __NFTA_TABLE_MAX }; #define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index bbd49a0c46c7..1dab77601c21 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -153,6 +153,8 @@ enum ovs_packet_cmd { * flow key against the kernel's. * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. + * Also used in upcall when %OVS_ACTION_ATTR_USERSPACE has optional + * %OVS_USERSPACE_ATTR_ACTIONS attribute. * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content @@ -528,6 +530,7 @@ enum ovs_sample_attr { * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA. * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get * tunnel info. + * @OVS_USERSPACE_ATTR_ACTIONS: If present, send actions with upcall. */ enum ovs_userspace_attr { OVS_USERSPACE_ATTR_UNSPEC, @@ -535,6 +538,7 @@ enum ovs_userspace_attr { OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */ OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port * to get tunnel info. */ + OVS_USERSPACE_ATTR_ACTIONS, /* Optional flag to get actions. */ __OVS_USERSPACE_ATTR_MAX }; diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 91950950aa59..0f9265cb2a96 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -38,6 +38,8 @@ #define RDS_IB_ABI_VERSION 0x301 +#define SOL_RDS 276 + /* * setsockopt/getsockopt for SOL_RDS */ @@ -48,6 +50,14 @@ #define RDS_RECVERR 5 #define RDS_CONG_MONITOR 6 #define RDS_GET_MR_FOR_DEST 7 +#define SO_RDS_TRANSPORT 8 + +/* supported values for SO_RDS_TRANSPORT */ +#define RDS_TRANS_IB 0 +#define RDS_TRANS_IWARP 1 +#define RDS_TRANS_TCP 2 +#define RDS_TRANS_COUNT 3 +#define RDS_TRANS_NONE (~0) /* * Control message types for SOL_RDS. diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index 984169a819ee..d7f1cbc3766c 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -26,6 +26,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include +#include #include #include diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 614bcd4c1d74..cb31229a6fa4 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -202,7 +202,7 @@ static int prog_array_map_update_elem(struct bpf_map *map, void *key, old_prog = xchg(array->prog + index, prog); if (old_prog) - bpf_prog_put(old_prog); + bpf_prog_put_rcu(old_prog); return 0; } @@ -218,7 +218,7 @@ static int prog_array_map_delete_elem(struct bpf_map *map, void *key) old_prog = xchg(array->prog + index, NULL); if (old_prog) { - bpf_prog_put(old_prog); + bpf_prog_put_rcu(old_prog); return 0; } else { return -ENOENT; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index d44b25cbe460..1e00aa3316dc 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -26,9 +26,10 @@ #include #include #include -#include #include +#include + /* Registers */ #define BPF_R0 regs[BPF_REG_0] #define BPF_R1 regs[BPF_REG_1] @@ -62,6 +63,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns ptr = skb_network_header(skb) + k - SKF_NET_OFF; else if (k >= SKF_LL_OFF) ptr = skb_mac_header(skb) + k - SKF_LL_OFF; + if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) return ptr; @@ -176,15 +178,6 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) return 0; } -const struct bpf_func_proto bpf_tail_call_proto = { - .func = NULL, - .gpl_only = false, - .ret_type = RET_VOID, - .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_CONST_MAP_PTR, - .arg3_type = ARG_ANYTHING, -}; - /** * __bpf_prog_run - run eBPF program on a given context * @ctx: is the data we are operating on @@ -650,36 +643,35 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn) return 0; } -void __weak bpf_int_jit_compile(struct bpf_prog *prog) -{ -} - -bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp) +bool bpf_prog_array_compatible(struct bpf_array *array, + const struct bpf_prog *fp) { - if (array->owner_prog_type) { - if (array->owner_prog_type != fp->type) - return false; - if (array->owner_jited != fp->jited) - return false; - } else { + if (!array->owner_prog_type) { + /* There's no owner yet where we could check for + * compatibility. + */ array->owner_prog_type = fp->type; array->owner_jited = fp->jited; + + return true; } - return true; + + return array->owner_prog_type == fp->type && + array->owner_jited == fp->jited; } -static int check_tail_call(const struct bpf_prog *fp) +static int bpf_check_tail_call(const struct bpf_prog *fp) { struct bpf_prog_aux *aux = fp->aux; int i; for (i = 0; i < aux->used_map_cnt; i++) { + struct bpf_map *map = aux->used_maps[i]; struct bpf_array *array; - struct bpf_map *map; - map = aux->used_maps[i]; if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) continue; + array = container_of(map, struct bpf_array, map); if (!bpf_prog_array_compatible(array, fp)) return -EINVAL; @@ -689,22 +681,25 @@ static int check_tail_call(const struct bpf_prog *fp) } /** - * bpf_prog_select_runtime - select execution runtime for BPF program + * bpf_prog_select_runtime - select exec runtime for BPF program * @fp: bpf_prog populated with internal BPF program * - * try to JIT internal BPF program, if JIT is not available select interpreter - * BPF program will be executed via BPF_PROG_RUN() macro + * Try to JIT eBPF program, if JIT is not available, use interpreter. + * The BPF program will be executed via BPF_PROG_RUN() macro. */ int bpf_prog_select_runtime(struct bpf_prog *fp) { fp->bpf_func = (void *) __bpf_prog_run; - /* Probe if internal BPF can be JITed */ bpf_int_jit_compile(fp); - /* Lock whole bpf_prog as read-only */ bpf_prog_lock_ro(fp); - return check_tail_call(fp); + /* The tail call compatibility check can only be done at + * this late stage as we need to determine, if we deal + * with JITed or non JITed program concatenations and not + * all eBPF JITs might immediately support all features. + */ + return bpf_check_tail_call(fp); } EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); @@ -734,6 +729,22 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak; const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; +const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; + +/* Always built-in helper functions. */ +const struct bpf_func_proto bpf_tail_call_proto = { + .func = NULL, + .gpl_only = false, + .ret_type = RET_VOID, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +}; + +/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */ +void __weak bpf_int_jit_compile(struct bpf_prog *prog) +{ +} /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call * skb_copy_bits(), so provide a weak definition of it for NET-less config. diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index bd7f5988ed9c..7ad5d8842d5b 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -13,6 +13,7 @@ #include #include #include +#include /* If kernel subsystem is allowing eBPF programs to call this function, * inside its own verifier_ops->get_func_proto() callback it should return @@ -44,11 +45,11 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) } const struct bpf_func_proto bpf_map_lookup_elem_proto = { - .func = bpf_map_lookup_elem, - .gpl_only = false, - .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, - .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_PTR_TO_MAP_KEY, + .func = bpf_map_lookup_elem, + .gpl_only = false, + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, }; static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) @@ -63,13 +64,13 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) } const struct bpf_func_proto bpf_map_update_elem_proto = { - .func = bpf_map_update_elem, - .gpl_only = false, - .ret_type = RET_INTEGER, - .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_PTR_TO_MAP_KEY, - .arg3_type = ARG_PTR_TO_MAP_VALUE, - .arg4_type = ARG_ANYTHING, + .func = bpf_map_update_elem, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, + .arg3_type = ARG_PTR_TO_MAP_VALUE, + .arg4_type = ARG_ANYTHING, }; static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) @@ -83,11 +84,11 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) } const struct bpf_func_proto bpf_map_delete_elem_proto = { - .func = bpf_map_delete_elem, - .gpl_only = false, - .ret_type = RET_INTEGER, - .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_PTR_TO_MAP_KEY, + .func = bpf_map_delete_elem, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, }; static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) @@ -111,3 +112,15 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = { .gpl_only = false, .ret_type = RET_INTEGER, }; + +static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + /* NMI safe access to clock monotonic */ + return ktime_get_mono_fast_ns(); +} + +const struct bpf_func_proto bpf_ktime_get_ns_proto = { + .func = bpf_ktime_get_ns, + .gpl_only = true, + .ret_type = RET_INTEGER, +}; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 98a69bd83069..a1b14d197a4f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -432,6 +432,23 @@ static void free_used_maps(struct bpf_prog_aux *aux) kfree(aux->used_maps); } +static void __prog_put_rcu(struct rcu_head *rcu) +{ + struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); + + free_used_maps(aux); + bpf_prog_free(aux->prog); +} + +/* version of bpf_prog_put() that is called after a grace period */ +void bpf_prog_put_rcu(struct bpf_prog *prog) +{ + if (atomic_dec_and_test(&prog->aux->refcnt)) { + prog->aux->prog = prog; + call_rcu(&prog->aux->rcu, __prog_put_rcu); + } +} + void bpf_prog_put(struct bpf_prog *prog) { if (atomic_dec_and_test(&prog->aux->refcnt)) { @@ -445,7 +462,7 @@ static int bpf_prog_release(struct inode *inode, struct file *filp) { struct bpf_prog *prog = filp->private_data; - bpf_prog_put(prog); + bpf_prog_put_rcu(prog); return 0; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cfd9a40b9a5a..039d866fd36a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1692,6 +1692,8 @@ static int do_check(struct verifier_env *env) } } else if (class == BPF_STX) { + enum bpf_reg_type dst_reg_type; + if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn); if (err) @@ -1700,11 +1702,6 @@ static int do_check(struct verifier_env *env) continue; } - if (BPF_MODE(insn->code) != BPF_MEM || - insn->imm != 0) { - verbose("BPF_STX uses reserved fields\n"); - return -EINVAL; - } /* check src1 operand */ err = check_reg_arg(regs, insn->src_reg, SRC_OP); if (err) @@ -1714,6 +1711,8 @@ static int do_check(struct verifier_env *env) if (err) return err; + dst_reg_type = regs[insn->dst_reg].type; + /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, @@ -1721,6 +1720,15 @@ static int do_check(struct verifier_env *env) if (err) return err; + if (insn->imm == 0) { + insn->imm = dst_reg_type; + } else if (dst_reg_type != insn->imm && + (dst_reg_type == PTR_TO_CTX || + insn->imm == PTR_TO_CTX)) { + verbose("same insn cannot be used with different pointers\n"); + return -EINVAL; + } + } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { @@ -1839,12 +1847,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && - (BPF_MODE(insn->code) != BPF_MEM || - insn->imm != 0)) { + (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose("BPF_LDX uses reserved fields\n"); return -EINVAL; } + if (BPF_CLASS(insn->code) == BPF_STX && + ((BPF_MODE(insn->code) != BPF_MEM && + BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { + verbose("BPF_STX uses reserved fields\n"); + return -EINVAL; + } + if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; @@ -1967,12 +1981,17 @@ static int convert_ctx_accesses(struct verifier_env *env) struct bpf_prog *new_prog; u32 cnt; int i; + enum bpf_access_type type; if (!env->prog->aux->ops->convert_ctx_access) return 0; for (i = 0; i < insn_cnt; i++, insn++) { - if (insn->code != (BPF_LDX | BPF_MEM | BPF_W)) + if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) + type = BPF_READ; + else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) + type = BPF_WRITE; + else continue; if (insn->imm != PTR_TO_CTX) { @@ -1982,7 +2001,7 @@ static int convert_ctx_accesses(struct verifier_env *env) } cnt = env->prog->aux->ops-> - convert_ctx_access(insn->dst_reg, insn->src_reg, + convert_ctx_access(type, insn->dst_reg, insn->src_reg, insn->off, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose("bpf verifier is misconfigured\n"); diff --git a/kernel/compat.c b/kernel/compat.c index 24f00610c575..333d364be29d 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -912,7 +912,8 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, * bitmap. We must however ensure the end of the * kernel bitmap is zeroed. */ - if (nr_compat_longs-- > 0) { + if (nr_compat_longs) { + nr_compat_longs--; if (__get_user(um, umask)) return -EFAULT; } else { @@ -954,7 +955,8 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, * We dont want to write past the end of the userspace * bitmap. */ - if (nr_compat_longs-- > 0) { + if (nr_compat_longs) { + nr_compat_longs--; if (__put_user(um, umask)) return -EFAULT; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 1a3bf48743ce..eddf1ed4155e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3442,7 +3442,6 @@ static void free_event_rcu(struct rcu_head *head) if (event->ns) put_pid_ns(event->ns); perf_event_free_filter(event); - perf_event_free_bpf_prog(event); kfree(event); } @@ -3573,6 +3572,8 @@ static void __free_event(struct perf_event *event) put_callchain_buffers(); } + perf_event_free_bpf_prog(event); + if (event->destroy) event->destroy(event); diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 232f00f273cb..725c416085e3 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -493,6 +493,20 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, rb->aux_pages[rb->aux_nr_pages] = page_address(page++); } + /* + * In overwrite mode, PMUs that don't support SG may not handle more + * than one contiguous allocation, since they rely on PMI to do double + * buffering. In this case, the entire buffer has to be one contiguous + * chunk. + */ + if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) && + overwrite) { + struct page *page = virt_to_page(rb->aux_pages[0]); + + if (page_private(page) != max_order) + goto out; + } + rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, overwrite); if (!rb->aux_priv) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index a0831e1b99f4..aaeae885d9af 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3900,7 +3900,8 @@ static void zap_class(struct lock_class *class) list_del_rcu(&class->hash_entry); list_del_rcu(&class->lock_entry); - class->key = NULL; + RCU_INIT_POINTER(class->key, NULL); + RCU_INIT_POINTER(class->name, NULL); } static inline int within(const void *addr, void *start, unsigned long size) diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index ef43ac4bafb5..d83d798bef95 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -426,10 +426,12 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt) static void seq_stats(struct seq_file *m, struct lock_stat_data *data) { - char name[39]; - struct lock_class *class; + struct lockdep_subclass_key *ckey; struct lock_class_stats *stats; + struct lock_class *class; + const char *cname; int i, namelen; + char name[39]; class = data->class; stats = &data->stats; @@ -440,15 +442,25 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) if (class->subclass) namelen -= 2; - if (!class->name) { + rcu_read_lock_sched(); + cname = rcu_dereference_sched(class->name); + ckey = rcu_dereference_sched(class->key); + + if (!cname && !ckey) { + rcu_read_unlock_sched(); + return; + + } else if (!cname) { char str[KSYM_NAME_LEN]; const char *key_name; - key_name = __get_key_name(class->key, str); + key_name = __get_key_name(ckey, str); snprintf(name, namelen, "%s", key_name); } else { - snprintf(name, namelen, "%s", class->name); + snprintf(name, namelen, "%s", cname); } + rcu_read_unlock_sched(); + namelen = strlen(name); if (class->name_version > 1) { snprintf(name+namelen, 3, "#%d", class->name_version); diff --git a/kernel/module.c b/kernel/module.c index 42a1d2afb217..cfc9e843a924 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3370,6 +3370,9 @@ static int load_module(struct load_info *info, const char __user *uargs, module_bug_cleanup(mod); mutex_unlock(&module_mutex); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_GOING, mod); + /* we can't deallocate the module until we clear memory protection */ unset_module_init_ro_nx(mod); unset_module_core_ro_nx(mod); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ffeaa4105e48..c2980e8733bc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work) } for (; vma; vma = vma->vm_next) { if (!vma_migratable(vma) || !vma_policy_mof(vma) || - is_vm_hugetlb_page(vma)) { + is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { continue; } diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 76d4bd962b19..93ef7190bdea 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -266,21 +266,23 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) /* * Divide a ktime value by a nanosecond value */ -u64 __ktime_divns(const ktime_t kt, s64 div) +s64 __ktime_divns(const ktime_t kt, s64 div) { - u64 dclc; int sft = 0; + s64 dclc; + u64 tmp; dclc = ktime_to_ns(kt); + tmp = dclc < 0 ? -dclc : dclc; + /* Make sure the divisor is less than 2^32: */ while (div >> 32) { sft++; div >>= 1; } - dclc >>= sft; - do_div(dclc, (unsigned long) div); - - return dclc; + tmp >>= sft; + do_div(tmp, (unsigned long) div); + return dclc < 0 ? -tmp : tmp; } EXPORT_SYMBOL_GPL(__ktime_divns); #endif /* BITS_PER_LONG >= 64 */ diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 646445e41bd4..50c4015a8ad3 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -79,18 +79,6 @@ static const struct bpf_func_proto bpf_probe_read_proto = { .arg3_type = ARG_ANYTHING, }; -static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) -{ - /* NMI safe access to clock monotonic */ - return ktime_get_mono_fast_ns(); -} - -static const struct bpf_func_proto bpf_ktime_get_ns_proto = { - .func = bpf_ktime_get_ns, - .gpl_only = true, - .ret_type = RET_INTEGER, -}; - /* * limited trace_printk() * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 13d945c0d03f..1b28df2d9104 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c @@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void) if (producer_fifo >= 0) { struct sched_param param = { - .sched_priority = consumer_fifo + .sched_priority = producer_fifo }; sched_setscheduler(producer, SCHED_FIFO, ¶m); } else diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index ced69da0ff55..7f2e97ce71a7 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1369,19 +1369,26 @@ static int check_preds(struct filter_parse_state *ps) { int n_normal_preds = 0, n_logical_preds = 0; struct postfix_elt *elt; + int cnt = 0; list_for_each_entry(elt, &ps->postfix, list) { - if (elt->op == OP_NONE) + if (elt->op == OP_NONE) { + cnt++; continue; + } if (elt->op == OP_AND || elt->op == OP_OR) { n_logical_preds++; + cnt--; continue; } + if (elt->op != OP_NOT) + cnt--; n_normal_preds++; + WARN_ON_ONCE(cnt < 0); } - if (!n_normal_preds || n_logical_preds >= n_normal_preds) { + if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) { parse_error(ps, FILT_ERR_INVALID_FILTER, 0); return -EINVAL; } diff --git a/lib/cpumask.c b/lib/cpumask.c index 830dd5dec40f..5a70f6196f57 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -16,11 +16,10 @@ int cpumask_next_and(int n, const struct cpumask *src1p, const struct cpumask *src2p) { - struct cpumask tmp; - - if (cpumask_and(&tmp, src1p, src2p)) - return cpumask_next(n, &tmp); - return nr_cpu_ids; + while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) + if (cpumask_test_cpu(n, src2p)) + break; + return n; } EXPORT_SYMBOL(cpumask_next_and); @@ -139,64 +138,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) #endif /** - * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first - * + * cpumask_local_spread - select the i'th cpu with local numa cpu's first * @i: index number - * @numa_node: local numa_node - * @dstp: cpumask with the relevant cpu bit set according to the policy + * @node: local numa_node * - * This function sets the cpumask according to a numa aware policy. - * cpumask could be used as an affinity hint for the IRQ related to a - * queue. When the policy is to spread queues across cores - local cores - * first. + * This function selects an online CPU according to a numa aware policy; + * local cpus are returned first, followed by non-local ones, then it + * wraps around. * - * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set - * the cpu bit and need to re-call the function. + * It's not very efficient, but useful for setup. */ -int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) +unsigned int cpumask_local_spread(unsigned int i, int node) { - cpumask_var_t mask; int cpu; - int ret = 0; - - if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) - return -ENOMEM; + /* Wrap: we always want a cpu. */ i %= num_online_cpus(); - if (numa_node == -1 || !cpumask_of_node(numa_node)) { - /* Use all online cpu's for non numa aware system */ - cpumask_copy(mask, cpu_online_mask); + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; } else { - int n; - - cpumask_and(mask, - cpumask_of_node(numa_node), cpu_online_mask); - - n = cpumask_weight(mask); - if (i >= n) { - i -= n; - - /* If index > number of local cpu's, mask out local - * cpu's - */ - cpumask_andnot(mask, cpu_online_mask, mask); + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; } } - - for_each_cpu(cpu, mask) { - if (--i < 0) - goto out; - } - - ret = -EAGAIN; - -out: - free_cpumask_var(mask); - - if (!ret) - cpumask_set_cpu(cpu, dstp); - - return ret; + BUG(); } -EXPORT_SYMBOL(cpumask_set_cpu_local_first); +EXPORT_SYMBOL(cpumask_local_spread); diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index aac511417ad1..a89d041592c8 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -639,7 +639,7 @@ do { \ ************** MIPS ***************** ***************************************/ #if defined(__mips__) && W_TYPE_SIZE == 32 -#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) #define umul_ppmm(w1, w0, u, v) \ do { \ UDItype __ll = (UDItype)(u) * (v); \ @@ -671,7 +671,7 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) #define umul_ppmm(w1, w0, u, v) \ do { \ typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 48144cdae819..f051d69f0910 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb, * Compare counter against given value. * Return 1 if greater, 0 if equal and -1 if less */ -int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) +int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) { s64 count; count = percpu_counter_read(fbc); /* Check to see if rough count will be sufficient for comparison */ - if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { + if (abs(count - rhs) > (batch * num_online_cpus())) { if (count > rhs) return 1; else @@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) else return 0; } -EXPORT_SYMBOL(percpu_counter_compare); +EXPORT_SYMBOL(__percpu_counter_compare); static int __init percpu_counter_startup(void) { diff --git a/lib/rhashtable.c b/lib/rhashtable.c index ca66a0e32c8e..a60a6d335a91 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -26,6 +26,7 @@ #include #include #include +#include #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index a28df5206d95..fe9a32591c24 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, return res + find_zero(data) + 1 - align; } res += sizeof(unsigned long); - if (unlikely(max < sizeof(unsigned long))) + /* We already handled 'unsigned long' bytes. Did we do it all ? */ + if (unlikely(max <= sizeof(unsigned long))) break; max -= sizeof(unsigned long); if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) @@ -89,8 +90,15 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. - * If the string is too long, returns 'count+1'. + * If the string is too long, returns a number larger than @count. User + * has to check the return value against "> count". * On exception (or invalid count), returns 0. + * + * NOTE! You should basically never use this function. There is + * almost never any valid case for using the length of a user space + * string, since the string can be changed at any time by other + * threads. Use "strncpy_from_user()" instead to get a stable copy + * of the string. */ long strnlen_user(const char __user *str, long count) { diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 4abda074ea45..3c365ab6cf5f 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -537,8 +537,9 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); * Allocates bounce buffer and returns its kernel virtual address. */ -phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, - enum dma_data_direction dir) +static phys_addr_t +map_single(struct device *hwdev, phys_addr_t phys, size_t size, + enum dma_data_direction dir) { dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index c07b8e7db330..7f58c735d745 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -314,6 +314,47 @@ static int bpf_fill_maxinsns10(struct bpf_test *self) return 0; } +static int __bpf_fill_ja(struct bpf_test *self, unsigned int len, + unsigned int plen) +{ + struct sock_filter *insn; + unsigned int rlen; + int i, j; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + rlen = (len % plen) - 1; + + for (i = 0; i + plen < len; i += plen) + for (j = 0; j < plen; j++) + insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, + plen - 1 - j, 0, 0); + for (j = 0; j < rlen; j++) + insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j, + 0, 0); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns11(struct bpf_test *self) +{ + /* Hits 70 passes on x86_64, so cannot get JITed there. */ + return __bpf_fill_ja(self, BPF_MAXINSNS, 68); +} + +static int bpf_fill_ja(struct bpf_test *self) +{ + /* Hits exactly 11 passes on x86_64 JIT. */ + return __bpf_fill_ja(self, 12, 9); +} + static struct bpf_test tests[] = { { "TAX", @@ -4252,6 +4293,14 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + { + "JMP_JA: Jump, gap, jump, ...", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xababcbac } }, + .fill_helper = bpf_fill_ja, + }, { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Maximum possible literals", { }, @@ -4335,6 +4384,14 @@ static struct bpf_test tests[] = { { { 0, 0xabababac } }, .fill_helper = bpf_fill_maxinsns10, }, + { + "BPF_MAXINSNS: Jump, gap, jump, ...", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xababcbac } }, + .fill_helper = bpf_fill_maxinsns11, + }, }; static struct net_device dev; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6dc4580df2af..000e7b3b9896 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) flush_delayed_work(&bdi->wb.dwork); } -/* - * Called when the device behind @bdi has been removed or ejected. - * - * We can't really do much here except for reducing the dirty ratio at - * the moment. In the future we should be able to set a flag so that - * the filesystem can handle errors at mark_inode_dirty time instead - * of only at writeback time. - */ -void bdi_unregister(struct backing_dev_info *bdi) -{ - if (WARN_ON_ONCE(!bdi->dev)) - return; - - bdi_set_min_ratio(bdi, 0); -} -EXPORT_SYMBOL(bdi_unregister); - static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) { memset(wb, 0, sizeof(*wb)); @@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi) int i; bdi_wb_shutdown(bdi); + bdi_set_min_ratio(bdi, 0); WARN_ON(!list_empty(&bdi->work_list)); WARN_ON(delayed_work_pending(&bdi->wb.dwork)); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 14c2f2017e37..a04225d372ba 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2323,6 +2323,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, css_get_many(&memcg->css, batch); if (batch > nr_pages) refill_stock(memcg, batch - nr_pages); + if (!(gfp_mask & __GFP_WAIT)) + goto done; /* * If the hierarchy is above the normal consumption range, * make the charging task trim their excess contribution. @@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); - /* XXX: caller holds IRQ-safe mapping->tree_lock */ - VM_BUG_ON(!irqs_disabled()); - + /* Caller disabled preemption with mapping->tree_lock */ mem_cgroup_charge_statistics(memcg, page, -1); memcg_check_events(memcg, page); } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 457bde530cbe..9e88f749aa51 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1969,8 +1969,10 @@ void try_offline_node(int nid) * wait_table may be allocated from boot memory, * here only free if it's allocated by vmalloc. */ - if (is_vmalloc_addr(zone->wait_table)) + if (is_vmalloc_addr(zone->wait_table)) { vfree(zone->wait_table); + zone->wait_table = NULL; + } } } EXPORT_SYMBOL(try_offline_node); diff --git a/mm/shmem.c b/mm/shmem.c index de981370fbc5..47d536e59fc0 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3401,7 +3401,13 @@ int shmem_zero_setup(struct vm_area_struct *vma) struct file *file; loff_t size = vma->vm_end - vma->vm_start; - file = shmem_file_setup("dev/zero", size, vma->vm_flags); + /* + * Cloning a new file under mmap_sem leads to a lock ordering conflict + * between XFS directory reading and selinux: since this file is only + * accessible to the user through its mapping, use S_PRIVATE flag to + * bypass file security, in the same way as shmem_kernel_file_setup(). + */ + file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE); if (IS_ERR(file)) return PTR_ERR(file); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 08bd7a3d464a..a8b5e749e84e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool) static void destroy_handle_cache(struct zs_pool *pool) { - kmem_cache_destroy(pool->handle_cachep); + if (pool->handle_cachep) + kmem_cache_destroy(pool->handle_cachep); } static unsigned long alloc_handle(struct zs_pool *pool) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 59555f0f8fc8..d2cd9de4b724 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -618,6 +618,92 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) return err; } +static struct sk_buff **vlan_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + struct sk_buff *p, **pp = NULL; + struct vlan_hdr *vhdr; + unsigned int hlen, off_vlan; + const struct packet_offload *ptype; + __be16 type; + int flush = 1; + + off_vlan = skb_gro_offset(skb); + hlen = off_vlan + sizeof(*vhdr); + vhdr = skb_gro_header_fast(skb, off_vlan); + if (skb_gro_header_hard(skb, hlen)) { + vhdr = skb_gro_header_slow(skb, hlen, off_vlan); + if (unlikely(!vhdr)) + goto out; + } + + type = vhdr->h_vlan_encapsulated_proto; + + rcu_read_lock(); + ptype = gro_find_receive_by_type(type); + if (!ptype) + goto out_unlock; + + flush = 0; + + for (p = *head; p; p = p->next) { + struct vlan_hdr *vhdr2; + + if (!NAPI_GRO_CB(p)->same_flow) + continue; + + vhdr2 = (struct vlan_hdr *)(p->data + off_vlan); + if (compare_vlan_header(vhdr, vhdr2)) + NAPI_GRO_CB(p)->same_flow = 0; + } + + skb_gro_pull(skb, sizeof(*vhdr)); + skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); + pp = ptype->callbacks.gro_receive(head, skb); + +out_unlock: + rcu_read_unlock(); +out: + NAPI_GRO_CB(skb)->flush |= flush; + + return pp; +} + +static int vlan_gro_complete(struct sk_buff *skb, int nhoff) +{ + struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff); + __be16 type = vhdr->h_vlan_encapsulated_proto; + struct packet_offload *ptype; + int err = -ENOENT; + + rcu_read_lock(); + ptype = gro_find_complete_by_type(type); + if (ptype) + err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr)); + + rcu_read_unlock(); + return err; +} + +static struct packet_offload vlan_packet_offloads[] __read_mostly = { + { + .type = cpu_to_be16(ETH_P_8021Q), + .priority = 10, + .callbacks = { + .gro_receive = vlan_gro_receive, + .gro_complete = vlan_gro_complete, + }, + }, + { + .type = cpu_to_be16(ETH_P_8021AD), + .priority = 10, + .callbacks = { + .gro_receive = vlan_gro_receive, + .gro_complete = vlan_gro_complete, + }, + }, +}; + static int __net_init vlan_init_net(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); @@ -645,6 +731,7 @@ static struct pernet_operations vlan_net_ops = { static int __init vlan_proto_init(void) { int err; + unsigned int i; pr_info("%s v%s\n", vlan_fullname, vlan_version); @@ -668,6 +755,9 @@ static int __init vlan_proto_init(void) if (err < 0) goto err5; + for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++) + dev_add_offload(&vlan_packet_offloads[i]); + vlan_ioctl_set(vlan_ioctl_handler); return 0; @@ -685,7 +775,13 @@ static int __init vlan_proto_init(void) static void __exit vlan_cleanup_module(void) { + unsigned int i; + vlan_ioctl_set(NULL); + + for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++) + dev_remove_offload(&vlan_packet_offloads[i]); + vlan_netlink_fini(); unregister_netdevice_notifier(&vlan_notifier_block); diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index eb7d8c0388e4..21434ab79d2c 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -1,5 +1,5 @@ # -# Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +# Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: # # Marek Lindner, Simon Wunderlich # @@ -20,7 +20,7 @@ obj-$(CONFIG_BATMAN_ADV) += batman-adv.o batman-adv-y += bat_iv_ogm.o batman-adv-y += bitarray.o batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o -batman-adv-y += debugfs.o +batman-adv-$(CONFIG_DEBUG_FS) += debugfs.o batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o batman-adv-y += fragmentation.o batman-adv-y += gateway_client.o @@ -29,6 +29,7 @@ batman-adv-y += hard-interface.o batman-adv-y += hash.o batman-adv-y += icmp_socket.o batman-adv-y += main.o +batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o batman-adv-y += originator.o batman-adv-y += routing.o @@ -36,4 +37,3 @@ batman-adv-y += send.o batman-adv-y += soft-interface.o batman-adv-y += sysfs.o batman-adv-y += translation-table.o -batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h index 4e49666f8c65..4e59cf3eb079 100644 --- a/net/batman-adv/bat_algo.h +++ b/net/batman-adv/bat_algo.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 00e00e09b000..4e93d2d3958b 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -28,7 +28,7 @@ /** * enum batadv_dup_status - duplicate status - * @BATADV_NO_DUP: the packet is a duplicate + * @BATADV_NO_DUP: the packet is no duplicate * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the * neighbor) * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor @@ -55,7 +55,7 @@ static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, } /** - * batadv_ring_buffer_set - compute the average of all non-zero values stored + * batadv_ring_buffer_avg - compute the average of all non-zero values stored * in the given ring buffer * @lq_recv: pointer to the ring buffer * @@ -64,7 +64,9 @@ static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]) { const uint8_t *ptr; - uint16_t count = 0, i = 0, sum = 0; + uint16_t count = 0; + uint16_t i = 0; + uint16_t sum = 0; ptr = lq_recv; @@ -308,7 +310,6 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) struct batadv_ogm_packet *batadv_ogm_packet; unsigned char *ogm_buff; uint32_t random_seqno; - int res = -ENOMEM; /* randomize initial seqno to avoid collision */ get_random_bytes(&random_seqno, sizeof(random_seqno)); @@ -317,7 +318,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN; ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC); if (!ogm_buff) - goto out; + return -ENOMEM; hard_iface->bat_iv.ogm_buff = ogm_buff; @@ -329,10 +330,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) batadv_ogm_packet->reserved = 0; batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; - res = 0; - -out: - return res; + return 0; } static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface) @@ -396,8 +394,8 @@ static uint8_t batadv_hop_penalty(uint8_t tq, } /* is there another aggregated packet here? */ -static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, - __be16 tvlv_len) +static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, + __be16 tvlv_len) { int next_buff_pos = 0; @@ -413,7 +411,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - char *fwd_str; + const char *fwd_str; uint8_t packet_num; int16_t buff_pos; struct batadv_ogm_packet *batadv_ogm_packet; @@ -548,58 +546,62 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, * - the send time is within our MAX_AGGREGATION_MS time * - the resulting packet wont be bigger than * MAX_AGGREGATION_BYTES + * otherwise aggregation is not possible */ - if (time_before(send_time, forw_packet->send_time) && - time_after_eq(aggregation_end_time, forw_packet->send_time) && - (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) { - /* check aggregation compatibility - * -> direct link packets are broadcasted on - * their interface only - * -> aggregate packet if the current packet is - * a "global" packet as well as the base - * packet - */ - primary_if = batadv_primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; - - /* packet is not leaving on the same interface. */ - if (forw_packet->if_outgoing != if_outgoing) - goto out; + if (!time_before(send_time, forw_packet->send_time) || + !time_after_eq(aggregation_end_time, forw_packet->send_time)) + return false; + + if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES) + return false; + + /* packet is not leaving on the same interface. */ + if (forw_packet->if_outgoing != if_outgoing) + return false; + + /* check aggregation compatibility + * -> direct link packets are broadcasted on + * their interface only + * -> aggregate packet if the current packet is + * a "global" packet as well as the base + * packet + */ + primary_if = batadv_primary_if_get_selected(bat_priv); + if (!primary_if) + return false; - /* packets without direct link flag and high TTL - * are flooded through the net - */ - if ((!directlink) && - (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) && - (batadv_ogm_packet->ttl != 1) && - - /* own packets originating non-primary - * interfaces leave only that interface - */ - ((!forw_packet->own) || - (forw_packet->if_incoming == primary_if))) { - res = true; - goto out; - } + /* packets without direct link flag and high TTL + * are flooded through the net + */ + if (!directlink && + !(batadv_ogm_packet->flags & BATADV_DIRECTLINK) && + batadv_ogm_packet->ttl != 1 && + + /* own packets originating non-primary + * interfaces leave only that interface + */ + (!forw_packet->own || + forw_packet->if_incoming == primary_if)) { + res = true; + goto out; + } - /* if the incoming packet is sent via this one - * interface only - we still can aggregate - */ - if ((directlink) && - (new_bat_ogm_packet->ttl == 1) && - (forw_packet->if_incoming == if_incoming) && - - /* packets from direct neighbors or - * own secondary interface packets - * (= secondary interface packets in general) - */ - (batadv_ogm_packet->flags & BATADV_DIRECTLINK || - (forw_packet->own && - forw_packet->if_incoming != primary_if))) { - res = true; - goto out; - } + /* if the incoming packet is sent via this one + * interface only - we still can aggregate + */ + if (directlink && + new_bat_ogm_packet->ttl == 1 && + forw_packet->if_incoming == if_incoming && + + /* packets from direct neighbors or + * own secondary interface packets + * (= secondary interface packets in general) + */ + (batadv_ogm_packet->flags & BATADV_DIRECTLINK || + (forw_packet->own && + forw_packet->if_incoming != primary_if))) { + res = true; + goto out; } out: @@ -642,19 +644,16 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "batman packet queue full\n"); - goto out; + goto out_free_outgoing; } } forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC); - if (!forw_packet_aggr) { - if (!own_packet) - atomic_inc(&bat_priv->batman_queue_left); - goto out; - } + if (!forw_packet_aggr) + goto out_nomem; - if ((atomic_read(&bat_priv->aggregated_ogms)) && - (packet_len < BATADV_MAX_AGGREGATION_BYTES)) + if (atomic_read(&bat_priv->aggregated_ogms) && + packet_len < BATADV_MAX_AGGREGATION_BYTES) skb_size = BATADV_MAX_AGGREGATION_BYTES; else skb_size = packet_len; @@ -662,12 +661,8 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, skb_size += ETH_HLEN; forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size); - if (!forw_packet_aggr->skb) { - if (!own_packet) - atomic_inc(&bat_priv->batman_queue_left); - kfree(forw_packet_aggr); - goto out; - } + if (!forw_packet_aggr->skb) + goto out_free_forw_packet; forw_packet_aggr->skb->priority = TC_PRIO_CONTROL; skb_reserve(forw_packet_aggr->skb, ETH_HLEN); @@ -699,7 +694,12 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, send_time - jiffies); return; -out: +out_free_forw_packet: + kfree(forw_packet_aggr); +out_nomem: + if (!own_packet) + atomic_inc(&bat_priv->batman_queue_left); +out_free_outgoing: batadv_hardif_free_ref(if_outgoing); out_free_incoming: batadv_hardif_free_ref(if_incoming); @@ -752,13 +752,13 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, unsigned long max_aggregation_jiffies; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; - direct_link = batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0; + direct_link = !!(batadv_ogm_packet->flags & BATADV_DIRECTLINK); max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS); /* find position for the packet in the forward queue */ spin_lock_bh(&bat_priv->forw_bat_list_lock); /* own packets are not to be aggregated */ - if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { + if (atomic_read(&bat_priv->aggregated_ogms) && !own_packet) { hlist_for_each_entry(forw_packet_pos, &bat_priv->forw_bat_list, list) { if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet, @@ -1034,9 +1034,10 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, batadv_orig_node_free_ref(orig_tmp); if (!neigh_node) goto unlock; - } else + } else { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Updating existing last-hop neighbor of originator\n"); + } rcu_read_unlock(); neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); @@ -1081,7 +1082,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, * won't consider it either */ if (router_ifinfo && - (neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg)) { + neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) { orig_node_tmp = router->orig_node; spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock); if_num = router->if_incoming->if_num; @@ -1356,8 +1357,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, out: spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); batadv_orig_node_free_ref(orig_node); - if (orig_ifinfo) - batadv_orig_ifinfo_free_ref(orig_ifinfo); + batadv_orig_ifinfo_free_ref(orig_ifinfo); return ret; } diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index e3da07a64026..40e4a2a18e45 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h index 2acaafe60188..be497be696d1 100644 --- a/net/batman-adv/bitarray.h +++ b/net/batman-adv/bitarray.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ac4b96eccade..fa941cd7d8ad 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich * diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 43c985d92c3e..1f506d34039e 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich * diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index a4972874c056..46118084221a 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -482,11 +482,7 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface) debugfs_remove_recursive(hard_iface->debug_dir); hard_iface->debug_dir = NULL; out: -#ifdef CONFIG_DEBUG_FS return -ENOMEM; -#else - return 0; -#endif /* CONFIG_DEBUG_FS */ } /** @@ -541,11 +537,7 @@ int batadv_debugfs_add_meshif(struct net_device *dev) debugfs_remove_recursive(bat_priv->debug_dir); bat_priv->debug_dir = NULL; out: -#ifdef CONFIG_DEBUG_FS return -ENOMEM; -#else - return 0; -#endif /* CONFIG_DEBUG_FS */ } void batadv_debugfs_del_meshif(struct net_device *dev) diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h index 37c4d6ddd04d..ed25605ca732 100644 --- a/net/batman-adv/debugfs.h +++ b/net/batman-adv/debugfs.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -20,6 +20,8 @@ #define BATADV_DEBUGFS_SUBDIR "batman_adv" +#if IS_ENABLED(CONFIG_DEBUG_FS) + void batadv_debugfs_init(void); void batadv_debugfs_destroy(void); int batadv_debugfs_add_meshif(struct net_device *dev); @@ -27,4 +29,36 @@ void batadv_debugfs_del_meshif(struct net_device *dev); int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface); void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface); +#else + +static inline void batadv_debugfs_init(void) +{ +} + +static inline void batadv_debugfs_destroy(void) +{ +} + +static inline int batadv_debugfs_add_meshif(struct net_device *dev) +{ + return 0; +} + +static inline void batadv_debugfs_del_meshif(struct net_device *dev) +{ +} + +static inline +int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface) +{ + return 0; +} + +static inline +void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface) +{ +} + +#endif + #endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */ diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index aad022dd15df..da1742d9059f 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors: * * Antonio Quartulli * diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h index 2fe0764c64be..ed41b8edba18 100644 --- a/net/batman-adv/distributed-arp-table.h +++ b/net/batman-adv/distributed-arp-table.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors: * * Antonio Quartulli * diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 3d1dcaa3e8b5..6ce3c84a7e55 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors: * * Martin Hundebøll * @@ -161,6 +161,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, hlist_add_head(&frag_entry_new->list, &chain->head); chain->size = skb->len - hdr_size; chain->timestamp = jiffies; + chain->total_size = ntohs(frag_packet->total_size); ret = true; goto out; } @@ -195,9 +196,11 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, out: if (chain->size > batadv_frag_size_limit() || - ntohs(frag_packet->total_size) > batadv_frag_size_limit()) { + chain->total_size != ntohs(frag_packet->total_size) || + chain->total_size > batadv_frag_size_limit()) { /* Clear chain if total size of either the list or the packet - * exceeds the maximum size of one merged packet. + * exceeds the maximum size of one merged packet. Don't allow + * packets to have different total_size. */ batadv_frag_clear_chain(&chain->head); chain->size = 0; @@ -228,19 +231,13 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, * Returns the merged skb or NULL on error. */ static struct sk_buff * -batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb) +batadv_frag_merge_packets(struct hlist_head *chain) { struct batadv_frag_packet *packet; struct batadv_frag_list_entry *entry; struct sk_buff *skb_out = NULL; int size, hdr_size = sizeof(struct batadv_frag_packet); - /* Make sure incoming skb has non-bogus data. */ - packet = (struct batadv_frag_packet *)skb->data; - size = ntohs(packet->total_size); - if (size > batadv_frag_size_limit()) - goto free; - /* Remove first entry, as this is the destination for the rest of the * fragments. */ @@ -249,6 +246,9 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb) skb_out = entry->skb; kfree(entry); + packet = (struct batadv_frag_packet *)skb_out->data; + size = ntohs(packet->total_size); + /* Make room for the rest of the fragments. */ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { kfree_skb(skb_out); @@ -304,7 +304,7 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb, if (hlist_empty(&head)) goto out; - skb_out = batadv_frag_merge_packets(&head, *skb); + skb_out = batadv_frag_merge_packets(&head); if (!skb_out) goto out_err; diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h index d848cf6676a2..ec1e86f899e8 100644 --- a/net/batman-adv/fragmentation.h +++ b/net/batman-adv/fragmentation.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors: * * Martin Hundebøll * diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 090828cf1fa7..a85eaca344e8 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 7ee53bb7d50f..185fb0887654 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c index 88a1bc3804d1..0792e2f101e4 100644 --- a/net/batman-adv/gateway_common.c +++ b/net/batman-adv/gateway_common.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h index aa5116561947..df5434229675 100644 --- a/net/batman-adv/gateway_common.h +++ b/net/batman-adv/gateway_common.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index baf1f9843f2c..bdb020e29272 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h index 1918cd50b62e..e8b6ffea703d 100644 --- a/net/batman-adv/hard-interface.h +++ b/net/batman-adv/hard-interface.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c index 7c1c63080e20..3a0e1dcd1f29 100644 --- a/net/batman-adv/hash.c +++ b/net/batman-adv/hash.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h index 539fc1266793..379e32acf2b4 100644 --- a/net/batman-adv/hash.h +++ b/net/batman-adv/hash.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index 161ef8f17d2e..6c3cfb57d132 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h index 0c33950aa4aa..4815824e2f61 100644 --- a/net/batman-adv/icmp_socket.h +++ b/net/batman-adv/icmp_socket.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 12fc77bef23f..548e405d13c1 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -209,10 +209,13 @@ void batadv_mesh_free(struct net_device *soft_iface) * interfaces in the current mesh * @bat_priv: the bat priv with all the soft interface information * @addr: the address to check + * + * Returns 'true' if the mac address was found, false otherwise. */ -int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr) +bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr) { const struct batadv_hard_iface *hard_iface; + bool is_my_mac = false; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { @@ -223,12 +226,12 @@ int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr) continue; if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { - rcu_read_unlock(); - return 1; + is_my_mac = true; + break; } } rcu_read_unlock(); - return 0; + return is_my_mac; } /** @@ -510,14 +513,12 @@ static struct batadv_algo_ops *batadv_algo_get(char *name) int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops) { struct batadv_algo_ops *bat_algo_ops_tmp; - int ret; bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name); if (bat_algo_ops_tmp) { pr_info("Trying to register already registered routing algorithm: %s\n", bat_algo_ops->name); - ret = -EEXIST; - goto out; + return -EEXIST; } /* all algorithms must implement all ops (for now) */ @@ -531,32 +532,26 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops) !bat_algo_ops->bat_neigh_is_equiv_or_better) { pr_info("Routing algo '%s' does not implement required ops\n", bat_algo_ops->name); - ret = -EINVAL; - goto out; + return -EINVAL; } INIT_HLIST_NODE(&bat_algo_ops->list); hlist_add_head(&bat_algo_ops->list, &batadv_algo_list); - ret = 0; -out: - return ret; + return 0; } int batadv_algo_select(struct batadv_priv *bat_priv, char *name) { struct batadv_algo_ops *bat_algo_ops; - int ret = -EINVAL; bat_algo_ops = batadv_algo_get(name); if (!bat_algo_ops) - goto out; + return -EINVAL; bat_priv->bat_algo_ops = bat_algo_ops; - ret = 0; -out: - return ret; + return 0; } int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) @@ -819,15 +814,15 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff, new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC); /* keep old buffer if kmalloc should fail */ - if (new_buff) { - memcpy(new_buff, *packet_buff, min_packet_len); - kfree(*packet_buff); - *packet_buff = new_buff; - *packet_buff_len = min_packet_len + additional_packet_len; - return true; - } + if (!new_buff) + return false; + + memcpy(new_buff, *packet_buff, min_packet_len); + kfree(*packet_buff); + *packet_buff = new_buff; + *packet_buff_len = min_packet_len + additional_packet_len; - return false; + return true; } /** diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 4d2318829a34..af0a3361d4b2 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -24,7 +24,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2015.0" +#define BATADV_SOURCE_VERSION "2015.1" #endif /* B.A.T.M.A.N. parameters */ @@ -44,7 +44,7 @@ #define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */ #define BATADV_TT_WORK_PERIOD 5000 /* 5 seconds */ #define BATADV_ORIG_WORK_PERIOD 1000 /* 1 second */ -#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */ +#define BATADV_DAT_ENTRY_TIMEOUT (5 * 60000) /* 5 mins in milliseconds */ /* sliding packet range of received originator messages in sequence numbers * (should be a multiple of our word size) */ @@ -195,7 +195,7 @@ extern struct workqueue_struct *batadv_event_workqueue; int batadv_mesh_init(struct net_device *soft_iface); void batadv_mesh_free(struct net_device *soft_iface); -int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr); +bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr); struct batadv_hard_iface * batadv_seq_print_text_primary_if_get(struct seq_file *seq); int batadv_max_header_len(void); @@ -279,7 +279,7 @@ static inline void _batadv_dbg(int type __always_unused, * * note: can't use ether_addr_equal() as it requires aligned memory */ -static inline int batadv_compare_eth(const void *data1, const void *data2) +static inline bool batadv_compare_eth(const void *data1, const void *data2) { return ether_addr_equal_unaligned(data1, data2); } diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index b24e4bb64fb5..09f2838dedf2 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors: * * Linus Lüssing * diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h index 3a44ebdb43cb..033d80e84fdf 100644 --- a/net/batman-adv/multicast.h +++ b/net/batman-adv/multicast.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors: * * Linus Lüssing * diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 127cc4d7380a..b984bc49deaf 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2012-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors: * * Martin Hundebøll, Jeppe Ledet-Pedersen * @@ -155,7 +155,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv) */ void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv) { - atomic_set(&bat_priv->network_coding, 1); + atomic_set(&bat_priv->network_coding, 0); bat_priv->nc.min_tq = 200; bat_priv->nc.max_fwd_delay = 10; bat_priv->nc.max_buffer_time = 200; @@ -275,7 +275,7 @@ static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv, * max_buffer time */ return batadv_has_timed_out(nc_path->last_valid, - bat_priv->nc.max_buffer_time*10); + bat_priv->nc.max_buffer_time * 10); } /** diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h index 358c0d686ab0..b5ab8ff544ee 100644 --- a/net/batman-adv/network-coding.h +++ b/net/batman-adv/network-coding.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2012-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors: * * Martin Hundebøll, Jeppe Ledet-Pedersen * diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 90e805aba379..e3900e452616 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index aa4a43696295..91339143a2f7 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index b81fbbf21a63..9468bc09c7c4 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index da83982bf974..c5d90095bc3c 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index 557d3d12a9ab..6573f12b3ddc 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 3d64ed20c393..23635bd63fec 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -255,8 +255,8 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, unsigned short vid) { - struct ethhdr *ethhdr; struct batadv_unicast_packet *unicast_packet; + struct ethhdr *ethhdr; int ret = NET_XMIT_DROP; if (!orig_node) diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index 38d0ec1833ae..60c233eb35ed 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 5ec31d7de24f..50cf722f4e1b 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -732,7 +732,7 @@ static int batadv_softif_init_late(struct net_device *dev) atomic_set(&bat_priv->aggregated_ogms, 1); atomic_set(&bat_priv->bonding, 0); #ifdef CONFIG_BATMAN_ADV_BLA - atomic_set(&bat_priv->bridge_loop_avoidance, 0); + atomic_set(&bat_priv->bridge_loop_avoidance, 1); #endif #ifdef CONFIG_BATMAN_ADV_DAT atomic_set(&bat_priv->distributed_arp_table, 1); @@ -818,7 +818,7 @@ static int batadv_softif_slave_add(struct net_device *dev, int ret = -EINVAL; hard_iface = batadv_hardif_get_by_netdev(slave_dev); - if (!hard_iface || hard_iface->soft_iface != NULL) + if (!hard_iface || hard_iface->soft_iface) goto out; ret = batadv_hardif_enable_interface(hard_iface, dev->name); diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index dbab22fd89a5..9ce08049ffd0 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index a75dc12f96f8..fa8c347bf057 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h index b715b60db7cd..b9e79ad806ac 100644 --- a/net/batman-adv/sysfs.h +++ b/net/batman-adv/sysfs.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 07b263a437d1..b098e53edded 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich, Antonio Quartulli * diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index ad84d7b89e39..5769037c7e2d 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich, Antonio Quartulli * diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 9398c3fb4174..c1000c0d6b0d 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -132,6 +132,7 @@ struct batadv_orig_ifinfo { * @timestamp: time (jiffie) of last received fragment * @seqno: sequence number of the fragments in the list * @size: accumulated size of packets in list + * @total_size: expected size of the assembled packet */ struct batadv_frag_table_entry { struct hlist_head head; @@ -139,6 +140,7 @@ struct batadv_frag_table_entry { unsigned long timestamp; uint16_t seqno; uint16_t size; + uint16_t total_size; }; /** @@ -181,9 +183,10 @@ struct batadv_orig_node_vlan { /** * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members - * @bcast_own: bitfield containing the number of our OGMs this orig_node - * rebroadcasted "back" to us (relative to last_real_seqno) - * @bcast_own_sum: counted result of bcast_own + * @bcast_own: set of bitfields (one per hard interface) where each one counts + * the number of our OGMs this orig_node rebroadcasted "back" to us (relative + * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long. + * @bcast_own_sum: sum of bcast_own * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum, * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count */ diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index c4802f3bd4c5..f6c99098959f 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -94,7 +94,6 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, char buf[32]; size_t buf_size = min(count, (sizeof(buf)-1)); bool enable; - int err; if (!test_bit(HCI_UP, &hdev->flags)) return -ENETDOWN; @@ -121,12 +120,8 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, if (IS_ERR(skb)) return PTR_ERR(skb); - err = -bt_to_errno(skb->data[0]); kfree_skb(skb); - if (err < 0) - return err; - hci_dev_change_flag(hdev, HCI_DUT_MODE); return count; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 7fd87e7135b5..a6f21f8c2f98 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -7577,7 +7577,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) memset(&ev, 0, sizeof(ev)); /* Devices using resolvable or non-resolvable random addresses - * without providing an indentity resolving key don't require + * without providing an identity resolving key don't require * to store long term keys. Their addresses will change the * next time around. * @@ -7617,7 +7617,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk) /* For identity resolving keys from devices that are already * using a public address or static random address, do not * ask for storing this key. The identity resolving key really - * is only mandatory for devices using resovlable random + * is only mandatory for devices using resolvable random * addresses. * * Storing all identity resolving keys has the downside that @@ -7646,7 +7646,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, memset(&ev, 0, sizeof(ev)); /* Devices using resolvable or non-resolvable random addresses - * without providing an indentity resolving key don't require + * without providing an identity resolving key don't require * to store signature resolving keys. Their addresses will change * the next time around. * diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 1ab3dc9c8f99..659371af39e4 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -371,6 +371,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) uint8_t tmp[16], data[16]; int err; + SMP_DBG("k %16phN r %16phN", k, r); + if (!tfm) { BT_ERR("tfm %p", tfm); return -EINVAL; @@ -400,6 +402,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) /* Most significant octet of encryptedData corresponds to data[0] */ swap_buf(data, r, 16); + SMP_DBG("r %16phN", r); + return err; } @@ -410,6 +414,10 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16], u8 p1[16], p2[16]; int err; + SMP_DBG("k %16phN r %16phN", k, r); + SMP_DBG("iat %u ia %6phN rat %u ra %6phN", _iat, ia, _rat, ra); + SMP_DBG("preq %7phN pres %7phN", preq, pres); + memset(p1, 0, 16); /* p1 = pres || preq || _rat || _iat */ @@ -418,10 +426,7 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16], memcpy(p1 + 2, preq, 7); memcpy(p1 + 9, pres, 7); - /* p2 = padding || ia || ra */ - memcpy(p2, ra, 6); - memcpy(p2 + 6, ia, 6); - memset(p2 + 12, 0, 4); + SMP_DBG("p1 %16phN", p1); /* res = r XOR p1 */ u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); @@ -433,6 +438,13 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16], return err; } + /* p2 = padding || ia || ra */ + memcpy(p2, ra, 6); + memcpy(p2 + 6, ia, 6); + memset(p2 + 12, 0, 4); + + SMP_DBG("p2 %16phN", p2); + /* res = res XOR p2 */ u128_xor((u128 *) res, (u128 *) res, (u128 *) p2); diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 7896cf143045..cecb482ed919 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -802,9 +802,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p, int err = 0; if (ndm->ndm_flags & NTF_USE) { + local_bh_disable(); rcu_read_lock(); br_fdb_update(p->br, p, addr, vid, true); rcu_read_unlock(); + local_bh_enable(); } else { spin_lock_bh(&p->br->hash_lock); err = fdb_add_entry(p, addr, ndm->ndm_state, diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 7c78b8df1d81..2e246a1a9b43 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1164,6 +1164,9 @@ static void br_multicast_add_router(struct net_bridge *br, struct net_bridge_port *p; struct hlist_node *slot = NULL; + if (!hlist_unhashed(&port->rlist)) + return; + hlist_for_each_entry(p, &br->router_list, rlist) { if ((unsigned long) port >= (unsigned long) p) break; @@ -1191,12 +1194,8 @@ static void br_multicast_mark_router(struct net_bridge *br, if (port->multicast_router != 1) return; - if (!hlist_unhashed(&port->rlist)) - goto timer; - br_multicast_add_router(br, port); -timer: mod_timer(&port->multicast_router_timer, now + br->multicast_querier_interval); } @@ -1644,7 +1643,7 @@ static void br_multicast_query_expired(struct net_bridge *br, if (query->startup_sent < br->multicast_startup_query_count) query->startup_sent++; - RCU_INIT_POINTER(querier, NULL); + RCU_INIT_POINTER(querier->port, NULL); br_multicast_send_query(br, NULL, query); spin_unlock(&br->multicast_lock); } diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index d5aba394ff6f..5149d9e71114 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1117,8 +1117,6 @@ static int do_replace(struct net *net, const void __user *user, return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; - if (tmp.num_counters == 0) - return -EINVAL; tmp.name[sizeof(tmp.name) - 1] = 0; @@ -2161,8 +2159,6 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; - if (tmp.num_counters == 0) - return -EINVAL; memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry)); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 78a04ebb113c..3cc71b9f5517 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); + + if (sock_flag(sk, SOCK_DEAD)) + break; + clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } @@ -373,6 +377,10 @@ static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg, struct sk_buff *skb; lock_sock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + err = -ECONNRESET; + goto unlock; + } skb = skb_dequeue(&sk->sk_receive_queue); caif_check_flow_release(sk); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 41a4abc7e98e..c4ec9239249a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1306,8 +1306,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, if (list_empty(&req->r_osd_item)) req->r_osd = NULL; } - - list_del_init(&req->r_req_lru_item); /* can be on notarget */ ceph_osdc_put_request(req); } @@ -2017,20 +2015,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend, err = __map_request(osdc, req, force_resend || force_resend_writes); dout("__map_request returned %d\n", err); - if (err == 0) - continue; /* no change and no osd was specified */ if (err < 0) continue; /* hrm! */ - if (req->r_osd == NULL) { - dout("tid %llu maps to no valid osd\n", req->r_tid); - needmap++; /* request a newer map */ - continue; - } + if (req->r_osd == NULL || err > 0) { + if (req->r_osd == NULL) { + dout("lingering %p tid %llu maps to no osd\n", + req, req->r_tid); + /* + * A homeless lingering request makes + * no sense, as it's job is to keep + * a particular OSD connection open. + * Request a newer map and kick the + * request, knowing that it won't be + * resent until we actually get a map + * that can tell us where to send it. + */ + needmap++; + } - dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, - req->r_osd ? req->r_osd->o_osd : -1); - __register_request(osdc, req); - __unregister_linger_request(osdc, req); + dout("kicking lingering %p tid %llu osd%d\n", req, + req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); + __register_request(osdc, req); + __unregister_linger_request(osdc, req); + } } reset_changed_osds(osdc); mutex_unlock(&osdc->request_mutex); diff --git a/net/core/dev.c b/net/core/dev.c index 594163d0c6eb..6778a9999d52 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -469,10 +469,14 @@ EXPORT_SYMBOL(dev_remove_pack); */ void dev_add_offload(struct packet_offload *po) { - struct list_head *head = &offload_base; + struct packet_offload *elem; spin_lock(&offload_lock); - list_add_rcu(&po->list, head); + list_for_each_entry(elem, &offload_base, list) { + if (po->priority < elem->priority) + break; + } + list_add_rcu(&po->list, elem->list.prev); spin_unlock(&offload_lock); } EXPORT_SYMBOL(dev_add_offload); @@ -1719,15 +1723,8 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { - if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { - if (skb_copy_ubufs(skb, GFP_ATOMIC)) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - } - - if (unlikely(!is_skb_forwardable(dev, skb))) { + if (skb_orphan_frags(skb, GFP_ATOMIC) || + unlikely(!is_skb_forwardable(dev, skb))) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 4f6a17ef0710..eb0c3ace7458 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -358,15 +358,7 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) int err; struct ethtool_cmd cmd; - if (!dev->ethtool_ops->get_settings) - return -EOPNOTSUPP; - - if (copy_from_user(&cmd, useraddr, sizeof(cmd))) - return -EFAULT; - - cmd.cmd = ETHTOOL_GSET; - - err = dev->ethtool_ops->get_settings(dev, &cmd); + err = __ethtool_get_settings(dev, &cmd); if (err < 0) return err; diff --git a/net/core/filter.c b/net/core/filter.c index 3adcca6f17a4..d271c06bf01f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -46,6 +46,7 @@ #include #include #include +#include /** * sk_filter - run a packet through a socket filter @@ -1238,21 +1239,6 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) return 0; } -/** - * bpf_skb_clone_not_writable - is the header of a clone not writable - * @skb: buffer to check - * @len: length up to which to write, can be negative - * - * Returns true if modifying the header part of the cloned buffer - * does require the data to be copied. I.e. this version works with - * negative lengths needed for eBPF case! - */ -static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len) -{ - return skb_header_cloned(skb) || - (int) skb_headroom(skb) + len > skb->hdr_len; -} - #define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) @@ -1275,9 +1261,8 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) if (unlikely((u32) offset > 0xffff || len > sizeof(buf))) return -EFAULT; - offset -= skb->data - skb_mac_header(skb); if (unlikely(skb_cloned(skb) && - bpf_skb_clone_unwritable(skb, offset + len))) + !skb_clone_writable(skb, offset + len))) return -EFAULT; ptr = skb_header_pointer(skb, offset, len, buf); @@ -1321,9 +1306,8 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) if (unlikely((u32) offset > 0xffff)) return -EFAULT; - offset -= skb->data - skb_mac_header(skb); if (unlikely(skb_cloned(skb) && - bpf_skb_clone_unwritable(skb, offset + sizeof(sum)))) + !skb_clone_writable(skb, offset + sizeof(sum)))) return -EFAULT; ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); @@ -1369,9 +1353,8 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) if (unlikely((u32) offset > 0xffff)) return -EFAULT; - offset -= skb->data - skb_mac_header(skb); if (unlikely(skb_cloned(skb) && - bpf_skb_clone_unwritable(skb, offset + sizeof(sum)))) + !skb_clone_writable(skb, offset + sizeof(sum)))) return -EFAULT; ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); @@ -1407,6 +1390,40 @@ const struct bpf_func_proto bpf_l4_csum_replace_proto = { .arg5_type = ARG_ANYTHING, }; +#define BPF_IS_REDIRECT_INGRESS(flags) ((flags) & 1) + +static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) +{ + struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; + struct net_device *dev; + + dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); + if (unlikely(!dev)) + return -EINVAL; + + if (unlikely(!(dev->flags & IFF_UP))) + return -EINVAL; + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb2)) + return -ENOMEM; + + if (BPF_IS_REDIRECT_INGRESS(flags)) + return dev_forward_skb(dev, skb2); + + skb2->dev = dev; + return dev_queue_xmit(skb2); +} + +const struct bpf_func_proto bpf_clone_redirect_proto = { + .func = bpf_clone_redirect, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto * sk_filter_func_proto(enum bpf_func_id func_id) { @@ -1423,6 +1440,8 @@ sk_filter_func_proto(enum bpf_func_id func_id) return &bpf_get_smp_processor_id_proto; case BPF_FUNC_tail_call: return &bpf_tail_call_proto; + case BPF_FUNC_ktime_get_ns: + return &bpf_ktime_get_ns_proto; default: return NULL; } @@ -1438,18 +1457,15 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) return &bpf_l3_csum_replace_proto; case BPF_FUNC_l4_csum_replace: return &bpf_l4_csum_replace_proto; + case BPF_FUNC_clone_redirect: + return &bpf_clone_redirect_proto; default: return sk_filter_func_proto(func_id); } } -static bool sk_filter_is_valid_access(int off, int size, - enum bpf_access_type type) +static bool __is_valid_access(int off, int size, enum bpf_access_type type) { - /* only read is allowed */ - if (type != BPF_READ) - return false; - /* check bounds */ if (off < 0 || off >= sizeof(struct __sk_buff)) return false; @@ -1465,8 +1481,42 @@ static bool sk_filter_is_valid_access(int off, int size, return true; } -static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, - struct bpf_insn *insn_buf) +static bool sk_filter_is_valid_access(int off, int size, + enum bpf_access_type type) +{ + if (type == BPF_WRITE) { + switch (off) { + case offsetof(struct __sk_buff, cb[0]) ... + offsetof(struct __sk_buff, cb[4]): + break; + default: + return false; + } + } + + return __is_valid_access(off, size, type); +} + +static bool tc_cls_act_is_valid_access(int off, int size, + enum bpf_access_type type) +{ + if (type == BPF_WRITE) { + switch (off) { + case offsetof(struct __sk_buff, mark): + case offsetof(struct __sk_buff, tc_index): + case offsetof(struct __sk_buff, cb[0]) ... + offsetof(struct __sk_buff, cb[4]): + break; + default: + return false; + } + } + return __is_valid_access(off, size, type); +} + +static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, + int src_reg, int ctx_off, + struct bpf_insn *insn_buf) { struct bpf_insn *insn = insn_buf; @@ -1499,8 +1549,34 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, offsetof(struct sk_buff, priority)); break; + case offsetof(struct __sk_buff, ingress_ifindex): + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4); + + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + offsetof(struct sk_buff, skb_iif)); + break; + + case offsetof(struct __sk_buff, ifindex): + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); + + *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), + dst_reg, src_reg, + offsetof(struct sk_buff, dev)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg, + offsetof(struct net_device, ifindex)); + break; + case offsetof(struct __sk_buff, mark): - return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn); + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); + + if (type == BPF_WRITE) + *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, + offsetof(struct sk_buff, mark)); + else + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + offsetof(struct sk_buff, mark)); + break; case offsetof(struct __sk_buff, pkt_type): return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn); @@ -1515,6 +1591,38 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, case offsetof(struct __sk_buff, vlan_tci): return convert_skb_access(SKF_AD_VLAN_TAG, dst_reg, src_reg, insn); + + case offsetof(struct __sk_buff, cb[0]) ... + offsetof(struct __sk_buff, cb[4]): + BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); + + ctx_off -= offsetof(struct __sk_buff, cb[0]); + ctx_off += offsetof(struct sk_buff, cb); + ctx_off += offsetof(struct qdisc_skb_cb, data); + if (type == BPF_WRITE) + *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off); + else + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off); + break; + + case offsetof(struct __sk_buff, tc_index): +#ifdef CONFIG_NET_SCHED + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2); + + if (type == BPF_WRITE) + *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, + offsetof(struct sk_buff, tc_index)); + else + *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, + offsetof(struct sk_buff, tc_index)); + break; +#else + if (type == BPF_WRITE) + *insn++ = BPF_MOV64_REG(dst_reg, dst_reg); + else + *insn++ = BPF_MOV64_IMM(dst_reg, 0); + break; +#endif } return insn - insn_buf; @@ -1523,13 +1631,13 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, static const struct bpf_verifier_ops sk_filter_ops = { .get_func_proto = sk_filter_func_proto, .is_valid_access = sk_filter_is_valid_access, - .convert_ctx_access = sk_filter_convert_ctx_access, + .convert_ctx_access = bpf_net_convert_ctx_access, }; static const struct bpf_verifier_ops tc_cls_act_ops = { .get_func_proto = tc_cls_act_func_proto, - .is_valid_access = sk_filter_is_valid_access, - .convert_ctx_access = sk_filter_convert_ctx_access, + .is_valid_access = tc_cls_act_is_valid_access, + .convert_ctx_access = bpf_net_convert_ctx_access, }; static struct bpf_prog_type_list sk_filter_type __read_mostly = { diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 1f2d89300b1a..77e22e4fc898 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -57,9 +58,11 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, flow_dissector->offset[key->key_id] = key->offset; } - /* Ensure that the dissector always includes basic key. That way - * we are able to avoid handling lack of it in fast path. + /* Ensure that the dissector always includes control and basic key. + * That way we are able to avoid handling lack of these in fast path. */ + BUG_ON(!skb_flow_dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_CONTROL)); BUG_ON(!skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_BASIC)); } @@ -120,9 +123,12 @@ bool __skb_flow_dissect(const struct sk_buff *skb, void *target_container, void *data, __be16 proto, int nhoff, int hlen) { + struct flow_dissector_key_control *key_control; struct flow_dissector_key_basic *key_basic; struct flow_dissector_key_addrs *key_addrs; struct flow_dissector_key_ports *key_ports; + struct flow_dissector_key_tags *key_tags; + struct flow_dissector_key_keyid *key_keyid; u8 ip_proto; if (!data) { @@ -132,6 +138,13 @@ bool __skb_flow_dissect(const struct sk_buff *skb, hlen = skb_headlen(skb); } + /* It is ensured by skb_flow_dissector_init() that control key will + * be always present. + */ + key_control = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_CONTROL, + target_container); + /* It is ensured by skb_flow_dissector_init() that basic key will * be always present. */ @@ -168,10 +181,12 @@ bool __skb_flow_dissect(const struct sk_buff *skb, if (!skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) break; + key_addrs = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - target_container); - memcpy(key_addrs, &iph->saddr, sizeof(*key_addrs)); + FLOW_DISSECTOR_KEY_IPV4_ADDRS, target_container); + memcpy(&key_addrs->v4addrs, &iph->saddr, + sizeof(key_addrs->v4addrs)); + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; break; } case htons(ETH_P_IPV6): { @@ -187,16 +202,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb, ip_proto = iph->nexthdr; nhoff += sizeof(struct ipv6hdr); - if (skb_flow_dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS)) { - key_addrs = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, - target_container); - - key_addrs->src = (__force __be32)ipv6_addr_hash(&iph->saddr); - key_addrs->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); - goto flow_label; - } if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs; @@ -206,30 +211,18 @@ bool __skb_flow_dissect(const struct sk_buff *skb, target_container); memcpy(key_ipv6_addrs, &iph->saddr, sizeof(*key_ipv6_addrs)); - goto flow_label; + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } - break; -flow_label: + flow_label = ip6_flowlabel(iph); if (flow_label) { - /* Awesome, IPv6 packet has a flow label so we can - * use that to represent the ports without any - * further dissection. - */ - - key_basic->n_proto = proto; - key_basic->ip_proto = ip_proto; - key_basic->thoff = (u16)nhoff; - if (skb_flow_dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS)) { - key_ports = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS, - target_container); - key_ports->ports = flow_label; + FLOW_DISSECTOR_KEY_FLOW_LABEL)) { + key_tags = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_FLOW_LABEL, + target_container); + key_tags->flow_label = ntohl(flow_label); } - - return true; } break; @@ -243,6 +236,15 @@ bool __skb_flow_dissect(const struct sk_buff *skb, if (!vlan) return false; + if (skb_flow_dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_VLANID)) { + key_tags = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_VLANID, + target_container); + + key_tags->vlan_id = skb_vlan_tag_get_id(skb); + } + proto = vlan->h_vlan_encapsulated_proto; nhoff += sizeof(*vlan); goto again; @@ -275,20 +277,51 @@ bool __skb_flow_dissect(const struct sk_buff *skb, if (!hdr) return false; key_basic->n_proto = proto; - key_basic->thoff = (u16)nhoff; + key_control->thoff = (u16)nhoff; if (skb_flow_dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS)) { + FLOW_DISSECTOR_KEY_TIPC_ADDRS)) { key_addrs = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, + FLOW_DISSECTOR_KEY_TIPC_ADDRS, target_container); - key_addrs->src = hdr->srcnode; - key_addrs->dst = 0; + key_addrs->tipcaddrs.srcnode = hdr->srcnode; + key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS; + } + return true; + } + + case htons(ETH_P_MPLS_UC): + case htons(ETH_P_MPLS_MC): { + struct mpls_label *hdr, _hdr[2]; +mpls: + hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, + hlen, &_hdr); + if (!hdr) + return false; + + if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) == + MPLS_LABEL_ENTROPY) { + if (skb_flow_dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) { + key_keyid = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY, + target_container); + key_keyid->keyid = hdr[1].entry & + htonl(MPLS_LS_LABEL_MASK); + } + + key_basic->n_proto = proto; + key_basic->ip_proto = ip_proto; + key_control->thoff = (u16)nhoff; + + return true; } + return true; } + case htons(ETH_P_FCOE): - key_basic->thoff = (u16)(nhoff + FCOE_HEADER_LEN); + key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN); /* fall through */ default: return false; @@ -308,30 +341,47 @@ bool __skb_flow_dissect(const struct sk_buff *skb, * Only look inside GRE if version zero and no * routing */ - if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) { - proto = hdr->proto; + if (hdr->flags & (GRE_VERSION | GRE_ROUTING)) + break; + + proto = hdr->proto; + nhoff += 4; + if (hdr->flags & GRE_CSUM) nhoff += 4; - if (hdr->flags & GRE_CSUM) - nhoff += 4; - if (hdr->flags & GRE_KEY) - nhoff += 4; - if (hdr->flags & GRE_SEQ) - nhoff += 4; - if (proto == htons(ETH_P_TEB)) { - const struct ethhdr *eth; - struct ethhdr _eth; - - eth = __skb_header_pointer(skb, nhoff, - sizeof(_eth), - data, hlen, &_eth); - if (!eth) - return false; - proto = eth->h_proto; - nhoff += sizeof(*eth); + if (hdr->flags & GRE_KEY) { + const __be32 *keyid; + __be32 _keyid; + + keyid = __skb_header_pointer(skb, nhoff, sizeof(_keyid), + data, hlen, &_keyid); + + if (!keyid) + return false; + + if (skb_flow_dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_GRE_KEYID)) { + key_keyid = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_GRE_KEYID, + target_container); + key_keyid->keyid = *keyid; } - goto again; + nhoff += 4; } - break; + if (hdr->flags & GRE_SEQ) + nhoff += 4; + if (proto == htons(ETH_P_TEB)) { + const struct ethhdr *eth; + struct ethhdr _eth; + + eth = __skb_header_pointer(skb, nhoff, + sizeof(_eth), + data, hlen, &_eth); + if (!eth) + return false; + proto = eth->h_proto; + nhoff += sizeof(*eth); + } + goto again; } case IPPROTO_IPIP: proto = htons(ETH_P_IP); @@ -339,19 +389,16 @@ bool __skb_flow_dissect(const struct sk_buff *skb, case IPPROTO_IPV6: proto = htons(ETH_P_IPV6); goto ipv6; + case IPPROTO_MPLS: + proto = htons(ETH_P_MPLS_UC); + goto mpls; default: break; } - /* It is ensured by skb_flow_dissector_init() that basic key will - * be always present. - */ - key_basic = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_BASIC, - target_container); key_basic->n_proto = proto; key_basic->ip_proto = ip_proto; - key_basic->thoff = (u16) nhoff; + key_control->thoff = (u16)nhoff; if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) { @@ -372,27 +419,109 @@ static __always_inline void __flow_hash_secret_init(void) net_get_random_once(&hashrnd, sizeof(hashrnd)); } -static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c, u32 keyval) +static __always_inline u32 __flow_hash_words(u32 *words, u32 length, u32 keyval) +{ + return jhash2(words, length, keyval); +} + +static inline void *flow_keys_hash_start(struct flow_keys *flow) +{ + BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32)); + return (void *)flow + FLOW_KEYS_HASH_OFFSET; +} + +static inline size_t flow_keys_hash_length(struct flow_keys *flow) { - return jhash_3words(a, b, c, keyval); + size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); + BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); + BUILD_BUG_ON(offsetof(typeof(*flow), addrs) != + sizeof(*flow) - sizeof(flow->addrs)); + + switch (flow->control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + diff -= sizeof(flow->addrs.v4addrs); + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + diff -= sizeof(flow->addrs.v6addrs); + break; + case FLOW_DISSECTOR_KEY_TIPC_ADDRS: + diff -= sizeof(flow->addrs.tipcaddrs); + break; + } + return (sizeof(*flow) - diff) / sizeof(u32); +} + +__be32 flow_get_u32_src(const struct flow_keys *flow) +{ + switch (flow->control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + return flow->addrs.v4addrs.src; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + return (__force __be32)ipv6_addr_hash( + &flow->addrs.v6addrs.src); + case FLOW_DISSECTOR_KEY_TIPC_ADDRS: + return flow->addrs.tipcaddrs.srcnode; + default: + return 0; + } +} +EXPORT_SYMBOL(flow_get_u32_src); + +__be32 flow_get_u32_dst(const struct flow_keys *flow) +{ + switch (flow->control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + return flow->addrs.v4addrs.dst; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + return (__force __be32)ipv6_addr_hash( + &flow->addrs.v6addrs.dst); + default: + return 0; + } +} +EXPORT_SYMBOL(flow_get_u32_dst); + +static inline void __flow_hash_consistentify(struct flow_keys *keys) +{ + int addr_diff, i; + + switch (keys->control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + addr_diff = (__force u32)keys->addrs.v4addrs.dst - + (__force u32)keys->addrs.v4addrs.src; + if ((addr_diff < 0) || + (addr_diff == 0 && + ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src))) { + swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); + swap(keys->ports.src, keys->ports.dst); + } + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + addr_diff = memcmp(&keys->addrs.v6addrs.dst, + &keys->addrs.v6addrs.src, + sizeof(keys->addrs.v6addrs.dst)); + if ((addr_diff < 0) || + (addr_diff == 0 && + ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src))) { + for (i = 0; i < 4; i++) + swap(keys->addrs.v6addrs.src.s6_addr32[i], + keys->addrs.v6addrs.dst.s6_addr32[i]); + swap(keys->ports.src, keys->ports.dst); + } + break; + } } static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) { u32 hash; - /* get a consistent hash (same value on both flow directions) */ - if (((__force u32)keys->addrs.dst < (__force u32)keys->addrs.src) || - (((__force u32)keys->addrs.dst == (__force u32)keys->addrs.src) && - ((__force u16)keys->ports.dst < (__force u16)keys->ports.src))) { - swap(keys->addrs.dst, keys->addrs.src); - swap(keys->ports.src, keys->ports.dst); - } + __flow_hash_consistentify(keys); - hash = __flow_hash_3words((__force u32)keys->addrs.dst, - (__force u32)keys->addrs.src, - (__force u32)keys->ports.ports, - keyval); + hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys), + flow_keys_hash_length(keys), keyval); if (!hash) hash = 1; @@ -437,8 +566,8 @@ void make_flow_keys_digest(struct flow_keys_digest *digest, data->n_proto = flow->basic.n_proto; data->ip_proto = flow->basic.ip_proto; data->ports = flow->ports.ports; - data->src = flow->addrs.src; - data->dst = flow->addrs.dst; + data->src = flow->addrs.v4addrs.src; + data->dst = flow->addrs.v4addrs.dst; } EXPORT_SYMBOL(make_flow_keys_digest); @@ -479,7 +608,7 @@ EXPORT_SYMBOL(skb_get_hash_perturb); u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen) { - u32 poff = keys->basic.thoff; + u32 poff = keys->control.thoff; switch (keys->basic.ip_proto) { case IPPROTO_TCP: { @@ -542,25 +671,49 @@ u32 skb_get_poff(const struct sk_buff *skb) } static const struct flow_dissector_key flow_keys_dissector_keys[] = { + { + .key_id = FLOW_DISSECTOR_KEY_CONTROL, + .offset = offsetof(struct flow_keys, control), + }, { .key_id = FLOW_DISSECTOR_KEY_BASIC, .offset = offsetof(struct flow_keys, basic), }, { .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, - .offset = offsetof(struct flow_keys, addrs), + .offset = offsetof(struct flow_keys, addrs.v4addrs), + }, + { + .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, + .offset = offsetof(struct flow_keys, addrs.v6addrs), }, { - .key_id = FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, - .offset = offsetof(struct flow_keys, addrs), + .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS, + .offset = offsetof(struct flow_keys, addrs.tipcaddrs), }, { .key_id = FLOW_DISSECTOR_KEY_PORTS, .offset = offsetof(struct flow_keys, ports), }, + { + .key_id = FLOW_DISSECTOR_KEY_VLANID, + .offset = offsetof(struct flow_keys, tags), + }, + { + .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, + .offset = offsetof(struct flow_keys, tags), + }, + { + .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, + .offset = offsetof(struct flow_keys, keyid), + }, }; static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { + { + .key_id = FLOW_DISSECTOR_KEY_CONTROL, + .offset = offsetof(struct flow_keys, control), + }, { .key_id = FLOW_DISSECTOR_KEY_BASIC, .offset = offsetof(struct flow_keys, basic), diff --git a/net/core/netevent.c b/net/core/netevent.c index f17ccd291d39..8b3bc4fac613 100644 --- a/net/core/netevent.c +++ b/net/core/netevent.c @@ -31,10 +31,7 @@ static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); */ int register_netevent_notifier(struct notifier_block *nb) { - int err; - - err = atomic_notifier_chain_register(&netevent_notif_chain, nb); - return err; + return atomic_notifier_chain_register(&netevent_notif_chain, nb); } EXPORT_SYMBOL_GPL(register_netevent_notifier); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9bac0e6f8dfa..b6a19ca0f99e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4467,7 +4467,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, while (order) { if (npages >= 1 << order) { - page = alloc_pages(gfp_mask | + page = alloc_pages((gfp_mask & ~__GFP_WAIT) | __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY, diff --git a/net/core/sock.c b/net/core/sock.c index 29124fcdc42a..7063c329c1b6 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -354,15 +354,12 @@ void sk_clear_memalloc(struct sock *sk) /* * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward - * progress of swapping. However, if SOCK_MEMALLOC is cleared while - * it has rmem allocations there is a risk that the user of the - * socket cannot make forward progress due to exceeding the rmem - * limits. By rights, sk_clear_memalloc() should only be called - * on sockets being torn down but warn and reset the accounting if - * that assumption breaks. + * progress of swapping. SOCK_MEMALLOC may be cleared while + * it has rmem allocations due to the last swapfile being deactivated + * but there is a risk that the socket is unusable due to exceeding + * the rmem limits. Reclaim the reserves and obey rmem limits again. */ - if (WARN_ON(sk->sk_forward_alloc)) - sk_mem_reclaim(sk); + sk_mem_reclaim(sk); } EXPORT_SYMBOL_GPL(sk_clear_memalloc); @@ -1581,6 +1578,8 @@ EXPORT_SYMBOL_GPL(sk_clone_lock); void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { + u32 max_segs = 1; + __sk_dst_set(sk, dst); sk->sk_route_caps = dst->dev->features; if (sk->sk_route_caps & NETIF_F_GSO) @@ -1592,9 +1591,10 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk->sk_gso_max_size = dst->dev->gso_max_size; - sk->sk_gso_max_segs = dst->dev->gso_max_segs; + max_segs = max_t(u32, dst->dev->gso_max_segs, 1); } } + sk->sk_gso_max_segs = max_segs; } EXPORT_SYMBOL_GPL(sk_setup_caps); @@ -1869,7 +1869,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) pfrag->offset = 0; if (SKB_FRAG_PAGE_ORDER) { - pfrag->page = alloc_pages(gfp | __GFP_COMP | + pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY, SKB_FRAG_PAGE_ORDER); if (likely(pfrag->page)) { diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index e6f6cc3a1bcf..392e29a0227d 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -359,7 +359,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, */ ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); if (ds == NULL) - return NULL; + return ERR_PTR(-ENOMEM); ds->dst = dst; ds->index = index; @@ -370,7 +370,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, ret = dsa_switch_setup_one(ds, parent); if (ret) - return NULL; + return ERR_PTR(ret); return ds; } diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index c3325bd2f3fb..77e0f0e7a88e 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -133,7 +133,7 @@ u32 eth_get_headlen(void *data, unsigned int len) /* parse any remaining L2/L3 headers, check for L4 */ if (!skb_flow_dissect_flow_keys_buf(&keys, data, eth->h_proto, sizeof(*eth), len)) - return max_t(u32, keys.basic.thoff, sizeof(*eth)); + return max_t(u32, keys.control.thoff, sizeof(*eth)); /* parse for any L4 headers */ return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len); @@ -470,6 +470,7 @@ EXPORT_SYMBOL(eth_gro_complete); static struct packet_offload eth_packet_offload __read_mostly = { .type = cpu_to_be16(ETH_P_TEB), + .priority = 10, .callbacks = { .gro_receive = eth_gro_receive, .gro_complete = eth_gro_complete, diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 0ae5822ef944..f20a387a1011 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -55,27 +55,6 @@ LIST_HEAD(lowpan_devices); static int lowpan_open_count; -static __le16 lowpan_get_pan_id(const struct net_device *dev) -{ - struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; - - return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev); -} - -static __le16 lowpan_get_short_addr(const struct net_device *dev) -{ - struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; - - return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); -} - -static u8 lowpan_get_dsn(const struct net_device *dev) -{ - struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; - - return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev); -} - static struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; @@ -103,12 +82,6 @@ static const struct net_device_ops lowpan_netdev_ops = { .ndo_start_xmit = lowpan_xmit, }; -static struct ieee802154_mlme_ops lowpan_mlme = { - .get_pan_id = lowpan_get_pan_id, - .get_short_addr = lowpan_get_short_addr, - .get_dsn = lowpan_get_dsn, -}; - static void lowpan_setup(struct net_device *dev) { dev->addr_len = IEEE802154_ADDR_LEN; @@ -124,7 +97,6 @@ static void lowpan_setup(struct net_device *dev) dev->netdev_ops = &lowpan_netdev_ops; dev->header_ops = &lowpan_header_ops; - dev->ml_priv = &lowpan_mlme; dev->destructor = free_netdev; dev->features |= NETIF_F_NETNS_LOCAL; } diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c index 2349070bd534..98acf7319754 100644 --- a/net/ieee802154/6lowpan/tx.c +++ b/net/ieee802154/6lowpan/tx.c @@ -207,7 +207,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev) /* prepare wpan address data */ sa.mode = IEEE802154_ADDR_LONG; - sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); + sa.pan_id = lowpan_dev_info(dev)->real_dev->ieee802154_ptr->pan_id; sa.extended_addr = ieee802154_devaddr_from_raw(saddr); /* intra-PAN communications */ diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c index 2ee00e8a0308..b0248e934230 100644 --- a/net/ieee802154/core.c +++ b/net/ieee802154/core.c @@ -121,8 +121,6 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size) /* atomic_inc_return makes it start at 1, make it start at 0 */ rdev->wpan_phy_idx--; - mutex_init(&rdev->wpan_phy.pib_lock); - INIT_LIST_HEAD(&rdev->wpan_dev_list); device_initialize(&rdev->wpan_phy.dev); dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx); diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index 2b4955d7aae5..3503c38954f9 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -97,8 +97,10 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, BUG_ON(!phy); get_device(&phy->dev); - short_addr = ops->get_short_addr(dev); - pan_id = ops->get_pan_id(dev); + rtnl_lock(); + short_addr = dev->ieee802154_ptr->short_addr; + pan_id = dev->ieee802154_ptr->pan_id; + rtnl_unlock(); if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || @@ -117,12 +119,12 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, rtnl_unlock(); if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER, - params.transmit_power) || + params.transmit_power / 100) || nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) || nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE, params.cca.mode) || nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL, - params.cca_ed_level) || + params.cca_ed_level / 100) || nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES, params.csma_retries) || nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE, @@ -166,10 +168,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) if (!dev) return NULL; - /* Check on mtu is currently a hacked solution because lowpan - * and wpan have the same ARPHRD type. - */ - if (dev->type != ARPHRD_IEEE802154 || dev->mtu != IEEE802154_MTU) { + if (dev->type != ARPHRD_IEEE802154) { dev_put(dev); return NULL; } @@ -244,7 +243,9 @@ int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info) addr.mode = IEEE802154_ADDR_LONG; addr.extended_addr = nla_get_hwaddr( info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]); - addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); + rtnl_lock(); + addr.pan_id = dev->ieee802154_ptr->pan_id; + rtnl_unlock(); ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr, nla_get_shortaddr(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]), @@ -281,7 +282,9 @@ int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info) addr.short_addr = nla_get_shortaddr( info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]); } - addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); + rtnl_lock(); + addr.pan_id = dev->ieee802154_ptr->pan_id; + rtnl_unlock(); ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr, nla_get_u8(info->attrs[IEEE802154_ATTR_REASON])); @@ -449,11 +452,7 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb) idx = 0; for_each_netdev(net, dev) { - /* Check on mtu is currently a hacked solution because lowpan - * and wpan have the same ARPHRD type. - */ - if (idx < s_idx || dev->type != ARPHRD_IEEE802154 || - dev->mtu != IEEE802154_MTU) + if (idx < s_idx || dev->type != ARPHRD_IEEE802154) goto cont; if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid, @@ -510,7 +509,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info) ops->get_mac_params(dev, ¶ms); if (info->attrs[IEEE802154_ATTR_TXPOWER]) - params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]); + params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]) * 100; if (info->attrs[IEEE802154_ATTR_LBT_ENABLED]) params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]); @@ -519,7 +518,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info) params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]); if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) - params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]); + params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) * 100; if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES]) params.csma_retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]); @@ -783,11 +782,7 @@ ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb, int rc; for_each_netdev(net, dev) { - /* Check on mtu is currently a hacked solution because lowpan - * and wpan have the same ARPHRD type. - */ - if (idx < first_dev || dev->type != ARPHRD_IEEE802154 || - dev->mtu != IEEE802154_MTU) + if (idx < first_dev || dev->type != ARPHRD_IEEE802154) goto skip; data.ops = ieee802154_mlme_ops(dev); diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index 346c6665d25e..77d73014bde3 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -50,26 +50,26 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid, if (!hdr) goto out; - mutex_lock(&phy->pib_lock); + rtnl_lock(); if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) || nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel)) goto nla_put_failure; for (i = 0; i < 32; i++) { - if (phy->channels_supported[i]) - buf[pages++] = phy->channels_supported[i] | (i << 27); + if (phy->supported.channels[i]) + buf[pages++] = phy->supported.channels[i] | (i << 27); } if (pages && nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, pages * sizeof(uint32_t), buf)) goto nla_put_failure; - mutex_unlock(&phy->pib_lock); + rtnl_unlock(); kfree(buf); genlmsg_end(msg, hdr); return 0; nla_put_failure: - mutex_unlock(&phy->pib_lock); + rtnl_unlock(); genlmsg_cancel(msg, hdr); out: kfree(buf); diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index f3c12f6a4a39..7dbb1f4ce7df 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -207,10 +207,11 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = { [NL802154_ATTR_PAGE] = { .type = NLA_U8, }, [NL802154_ATTR_CHANNEL] = { .type = NLA_U8, }, - [NL802154_ATTR_TX_POWER] = { .type = NLA_S8, }, + [NL802154_ATTR_TX_POWER] = { .type = NLA_S32, }, [NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, }, [NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, }, + [NL802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, }, [NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, }, @@ -225,6 +226,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = { [NL802154_ATTR_MAX_FRAME_RETRIES] = { .type = NLA_S8, }, [NL802154_ATTR_LBT_MODE] = { .type = NLA_U8, }, + + [NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED }, }; /* message building helper */ @@ -235,6 +238,28 @@ static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq, return genlmsg_put(skb, portid, seq, &nl802154_fam, flags, cmd); } +static int +nl802154_put_flags(struct sk_buff *msg, int attr, u32 mask) +{ + struct nlattr *nl_flags = nla_nest_start(msg, attr); + int i; + + if (!nl_flags) + return -ENOBUFS; + + i = 0; + while (mask) { + if ((mask & 1) && nla_put_flag(msg, i)) + return -ENOBUFS; + + mask >>= 1; + i++; + } + + nla_nest_end(msg, nl_flags); + return 0; +} + static int nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev, struct sk_buff *msg) @@ -248,7 +273,7 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev, for (page = 0; page <= IEEE802154_MAX_PAGE; page++) { if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL, - rdev->wpan_phy.channels_supported[page])) + rdev->wpan_phy.supported.channels[page])) return -ENOBUFS; } nla_nest_end(msg, nl_page); @@ -256,6 +281,92 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev, return 0; } +static int +nl802154_put_capabilities(struct sk_buff *msg, + struct cfg802154_registered_device *rdev) +{ + const struct wpan_phy_supported *caps = &rdev->wpan_phy.supported; + struct nlattr *nl_caps, *nl_channels; + int i; + + nl_caps = nla_nest_start(msg, NL802154_ATTR_WPAN_PHY_CAPS); + if (!nl_caps) + return -ENOBUFS; + + nl_channels = nla_nest_start(msg, NL802154_CAP_ATTR_CHANNELS); + if (!nl_channels) + return -ENOBUFS; + + for (i = 0; i <= IEEE802154_MAX_PAGE; i++) { + if (caps->channels[i]) { + if (nl802154_put_flags(msg, i, caps->channels[i])) + return -ENOBUFS; + } + } + + nla_nest_end(msg, nl_channels); + + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) { + struct nlattr *nl_ed_lvls; + + nl_ed_lvls = nla_nest_start(msg, + NL802154_CAP_ATTR_CCA_ED_LEVELS); + if (!nl_ed_lvls) + return -ENOBUFS; + + for (i = 0; i < caps->cca_ed_levels_size; i++) { + if (nla_put_s32(msg, i, caps->cca_ed_levels[i])) + return -ENOBUFS; + } + + nla_nest_end(msg, nl_ed_lvls); + } + + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) { + struct nlattr *nl_tx_pwrs; + + nl_tx_pwrs = nla_nest_start(msg, NL802154_CAP_ATTR_TX_POWERS); + if (!nl_tx_pwrs) + return -ENOBUFS; + + for (i = 0; i < caps->tx_powers_size; i++) { + if (nla_put_s32(msg, i, caps->tx_powers[i])) + return -ENOBUFS; + } + + nla_nest_end(msg, nl_tx_pwrs); + } + + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) { + if (nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_MODES, + caps->cca_modes) || + nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_OPTS, + caps->cca_opts)) + return -ENOBUFS; + } + + if (nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MINBE, caps->min_minbe) || + nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MINBE, caps->max_minbe) || + nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MAXBE, caps->min_maxbe) || + nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MAXBE, caps->max_maxbe) || + nla_put_u8(msg, NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS, + caps->min_csma_backoffs) || + nla_put_u8(msg, NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS, + caps->max_csma_backoffs) || + nla_put_s8(msg, NL802154_CAP_ATTR_MIN_FRAME_RETRIES, + caps->min_frame_retries) || + nla_put_s8(msg, NL802154_CAP_ATTR_MAX_FRAME_RETRIES, + caps->max_frame_retries) || + nl802154_put_flags(msg, NL802154_CAP_ATTR_IFTYPES, + caps->iftypes) || + nla_put_u32(msg, NL802154_CAP_ATTR_LBT, caps->lbt)) + return -ENOBUFS; + + nla_nest_end(msg, nl_caps); + + return 0; +} + static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, enum nl802154_commands cmd, struct sk_buff *msg, u32 portid, u32 seq, @@ -286,23 +397,38 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, rdev->wpan_phy.current_channel)) goto nla_put_failure; - /* supported channels array */ + /* TODO remove this behaviour, we still keep support it for a while + * so users can change the behaviour to the new one. + */ if (nl802154_send_wpan_phy_channels(rdev, msg)) goto nla_put_failure; /* cca mode */ - if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE, - rdev->wpan_phy.cca.mode)) - goto nla_put_failure; + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) { + if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE, + rdev->wpan_phy.cca.mode)) + goto nla_put_failure; + + if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) { + if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT, + rdev->wpan_phy.cca.opt)) + goto nla_put_failure; + } + } - if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) { - if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT, - rdev->wpan_phy.cca.opt)) + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) { + if (nla_put_s32(msg, NL802154_ATTR_TX_POWER, + rdev->wpan_phy.transmit_power)) goto nla_put_failure; } - if (nla_put_s8(msg, NL802154_ATTR_TX_POWER, - rdev->wpan_phy.transmit_power)) + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) { + if (nla_put_s32(msg, NL802154_ATTR_CCA_ED_LEVEL, + rdev->wpan_phy.cca_ed_level)) + goto nla_put_failure; + } + + if (nl802154_put_capabilities(msg, rdev)) goto nla_put_failure; finish: @@ -575,7 +701,8 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL802154_ATTR_IFTYPE]) { type = nla_get_u32(info->attrs[NL802154_ATTR_IFTYPE]); - if (type > NL802154_IFTYPE_MAX) + if (type > NL802154_IFTYPE_MAX || + !(rdev->wpan_phy.supported.iftypes & BIT(type))) return -EINVAL; } @@ -625,7 +752,8 @@ static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info) channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]); /* check 802.15.4 constraints */ - if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL) + if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL || + !(rdev->wpan_phy.supported.channels[page] & BIT(channel))) return -EINVAL; return rdev_set_channel(rdev, page, channel); @@ -636,12 +764,17 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info) struct cfg802154_registered_device *rdev = info->user_ptr[0]; struct wpan_phy_cca cca; + if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE)) + return -EOPNOTSUPP; + if (!info->attrs[NL802154_ATTR_CCA_MODE]) return -EINVAL; cca.mode = nla_get_u32(info->attrs[NL802154_ATTR_CCA_MODE]); /* checking 802.15.4 constraints */ - if (cca.mode < NL802154_CCA_ENERGY || cca.mode > NL802154_CCA_ATTR_MAX) + if (cca.mode < NL802154_CCA_ENERGY || + cca.mode > NL802154_CCA_ATTR_MAX || + !(rdev->wpan_phy.supported.cca_modes & BIT(cca.mode))) return -EINVAL; if (cca.mode == NL802154_CCA_ENERGY_CARRIER) { @@ -649,13 +782,58 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info) return -EINVAL; cca.opt = nla_get_u32(info->attrs[NL802154_ATTR_CCA_OPT]); - if (cca.opt > NL802154_CCA_OPT_ATTR_MAX) + if (cca.opt > NL802154_CCA_OPT_ATTR_MAX || + !(rdev->wpan_phy.supported.cca_opts & BIT(cca.opt))) return -EINVAL; } return rdev_set_cca_mode(rdev, &cca); } +static int nl802154_set_cca_ed_level(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + s32 ed_level; + int i; + + if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL)) + return -EOPNOTSUPP; + + if (!info->attrs[NL802154_ATTR_CCA_ED_LEVEL]) + return -EINVAL; + + ed_level = nla_get_s32(info->attrs[NL802154_ATTR_CCA_ED_LEVEL]); + + for (i = 0; i < rdev->wpan_phy.supported.cca_ed_levels_size; i++) { + if (ed_level == rdev->wpan_phy.supported.cca_ed_levels[i]) + return rdev_set_cca_ed_level(rdev, ed_level); + } + + return -EINVAL; +} + +static int nl802154_set_tx_power(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + s32 power; + int i; + + if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)) + return -EOPNOTSUPP; + + if (!info->attrs[NL802154_ATTR_TX_POWER]) + return -EINVAL; + + power = nla_get_s32(info->attrs[NL802154_ATTR_TX_POWER]); + + for (i = 0; i < rdev->wpan_phy.supported.tx_powers_size; i++) { + if (power == rdev->wpan_phy.supported.tx_powers[i]) + return rdev_set_tx_power(rdev, power); + } + + return -EINVAL; +} + static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info) { struct cfg802154_registered_device *rdev = info->user_ptr[0]; @@ -668,14 +846,22 @@ static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info) return -EBUSY; /* don't change address fields on monitor */ - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) - return -EINVAL; - - if (!info->attrs[NL802154_ATTR_PAN_ID]) + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR || + !info->attrs[NL802154_ATTR_PAN_ID]) return -EINVAL; pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]); + /* TODO + * I am not sure about to check here on broadcast pan_id. + * Broadcast is a valid setting, comment from 802.15.4: + * If this value is 0xffff, the device is not associated. + * + * This could useful to simple deassociate an device. + */ + if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST)) + return -EINVAL; + return rdev_set_pan_id(rdev, wpan_dev, pan_id); } @@ -691,14 +877,27 @@ static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info) return -EBUSY; /* don't change address fields on monitor */ - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) - return -EINVAL; - - if (!info->attrs[NL802154_ATTR_SHORT_ADDR]) + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR || + !info->attrs[NL802154_ATTR_SHORT_ADDR]) return -EINVAL; short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]); + /* TODO + * I am not sure about to check here on broadcast short_addr. + * Broadcast is a valid setting, comment from 802.15.4: + * A value of 0xfffe indicates that the device has + * associated but has not been allocated an address. A + * value of 0xffff indicates that the device does not + * have a short address. + * + * I think we should allow to set these settings but + * don't allow to allow socket communication with it. + */ + if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) || + short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST)) + return -EINVAL; + return rdev_set_short_addr(rdev, wpan_dev, short_addr); } @@ -722,7 +921,11 @@ nl802154_set_backoff_exponent(struct sk_buff *skb, struct genl_info *info) max_be = nla_get_u8(info->attrs[NL802154_ATTR_MAX_BE]); /* check 802.15.4 constraints */ - if (max_be < 3 || max_be > 8 || min_be > max_be) + if (min_be < rdev->wpan_phy.supported.min_minbe || + min_be > rdev->wpan_phy.supported.max_minbe || + max_be < rdev->wpan_phy.supported.min_maxbe || + max_be > rdev->wpan_phy.supported.max_maxbe || + min_be > max_be) return -EINVAL; return rdev_set_backoff_exponent(rdev, wpan_dev, min_be, max_be); @@ -747,7 +950,8 @@ nl802154_set_max_csma_backoffs(struct sk_buff *skb, struct genl_info *info) info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]); /* check 802.15.4 constraints */ - if (max_csma_backoffs > 5) + if (max_csma_backoffs < rdev->wpan_phy.supported.min_csma_backoffs || + max_csma_backoffs > rdev->wpan_phy.supported.max_csma_backoffs) return -EINVAL; return rdev_set_max_csma_backoffs(rdev, wpan_dev, max_csma_backoffs); @@ -771,7 +975,8 @@ nl802154_set_max_frame_retries(struct sk_buff *skb, struct genl_info *info) info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]); /* check 802.15.4 constraints */ - if (max_frame_retries < -1 || max_frame_retries > 7) + if (max_frame_retries < rdev->wpan_phy.supported.min_frame_retries || + max_frame_retries > rdev->wpan_phy.supported.max_frame_retries) return -EINVAL; return rdev_set_max_frame_retries(rdev, wpan_dev, max_frame_retries); @@ -791,6 +996,9 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info) return -EINVAL; mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]); + if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt)) + return -EINVAL; + return rdev_set_lbt_mode(rdev, wpan_dev, mode); } @@ -936,6 +1144,22 @@ static const struct genl_ops nl802154_ops[] = { .internal_flags = NL802154_FLAG_NEED_WPAN_PHY | NL802154_FLAG_NEED_RTNL, }, + { + .cmd = NL802154_CMD_SET_CCA_ED_LEVEL, + .doit = nl802154_set_cca_ed_level, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_WPAN_PHY | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_SET_TX_POWER, + .doit = nl802154_set_tx_power, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_WPAN_PHY | + NL802154_FLAG_NEED_RTNL, + }, { .cmd = NL802154_CMD_SET_PAN_ID, .doit = nl802154_set_pan_id, diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h index 7b5a9dd94fe5..b2155a123f6c 100644 --- a/net/ieee802154/rdev-ops.h +++ b/net/ieee802154/rdev-ops.h @@ -74,6 +74,29 @@ rdev_set_cca_mode(struct cfg802154_registered_device *rdev, return ret; } +static inline int +rdev_set_cca_ed_level(struct cfg802154_registered_device *rdev, s32 ed_level) +{ + int ret; + + trace_802154_rdev_set_cca_ed_level(&rdev->wpan_phy, ed_level); + ret = rdev->ops->set_cca_ed_level(&rdev->wpan_phy, ed_level); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; +} + +static inline int +rdev_set_tx_power(struct cfg802154_registered_device *rdev, + s32 power) +{ + int ret; + + trace_802154_rdev_set_tx_power(&rdev->wpan_phy, power); + ret = rdev->ops->set_tx_power(&rdev->wpan_phy, power); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; +} + static inline int rdev_set_pan_id(struct cfg802154_registered_device *rdev, struct wpan_dev *wpan_dev, __le16 pan_id) diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 7aaaf967df58..02abef2c1621 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -64,10 +64,8 @@ ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr) if (tmp->type != ARPHRD_IEEE802154) continue; - pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp); - short_addr = - ieee802154_mlme_ops(tmp)->get_short_addr(tmp); - + pan_id = tmp->ieee802154_ptr->pan_id; + short_addr = tmp->ieee802154_ptr->short_addr; if (pan_id == addr->pan_id && short_addr == addr->short_addr) { dev = tmp; @@ -228,15 +226,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len) goto out; } - if (dev->type != ARPHRD_IEEE802154) { - err = -ENODEV; - goto out_put; - } - sk->sk_bound_dev_if = dev->ifindex; sk_dst_reset(sk); -out_put: dev_put(dev); out: release_sock(sk); @@ -286,7 +278,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) if (size > mtu) { pr_debug("size = %Zu, mtu = %u\n", size, mtu); - err = -EINVAL; + err = -EMSGSIZE; goto out_dev; } @@ -797,9 +789,9 @@ static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) /* Data frame processing */ BUG_ON(dev->type != ARPHRD_IEEE802154); - pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); - short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); - hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr); + pan_id = dev->ieee802154_ptr->pan_id; + short_addr = dev->ieee802154_ptr->short_addr; + hw_addr = dev->ieee802154_ptr->extended_addr; read_lock(&dgram_lock); sk_for_each(sk, &dgram_head) { diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h index 5ac25eb6ed17..73eb7605c1eb 100644 --- a/net/ieee802154/trace.h +++ b/net/ieee802154/trace.h @@ -1,4 +1,4 @@ -/* Based on net/wireless/tracing.h */ +/* Based on net/wireless/trace.h */ #undef TRACE_SYSTEM #define TRACE_SYSTEM cfg802154 @@ -93,6 +93,21 @@ TRACE_EVENT(802154_rdev_set_channel, __entry->page, __entry->channel) ); +TRACE_EVENT(802154_rdev_set_tx_power, + TP_PROTO(struct wpan_phy *wpan_phy, s32 power), + TP_ARGS(wpan_phy, power), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + __field(s32, power) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + __entry->power = power; + ), + TP_printk(WPAN_PHY_PR_FMT ", power: %d", WPAN_PHY_PR_ARG, + __entry->power) +); + TRACE_EVENT(802154_rdev_set_cca_mode, TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca), TP_ARGS(wpan_phy, cca), @@ -108,6 +123,21 @@ TRACE_EVENT(802154_rdev_set_cca_mode, WPAN_CCA_PR_ARG) ); +TRACE_EVENT(802154_rdev_set_cca_ed_level, + TP_PROTO(struct wpan_phy *wpan_phy, s32 ed_level), + TP_ARGS(wpan_phy, ed_level), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + __field(s32, ed_level) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + __entry->ed_level = ed_level; + ), + TP_printk(WPAN_PHY_PR_FMT ", ed_level: %d", WPAN_PHY_PR_ARG, + __entry->ed_level) +); + DECLARE_EVENT_CLASS(802154_le16_template, TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 le16arg), diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 235d36afece3..cc858ef44451 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -488,7 +488,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) inet->inet_saddr = 0; /* Use device */ /* Make sure we are allowed to bind here. */ - if (sk->sk_prot->get_port(sk, snum)) { + if ((snum || !inet->bind_address_no_port) && + sk->sk_prot->get_port(sk, snum)) { inet->inet_saddr = inet->inet_rcv_saddr = 0; err = -EADDRINUSE; goto out_release_sock; @@ -1595,7 +1596,7 @@ static __net_init int inet_init_net(struct net *net) */ seqlock_init(&net->ipv4.ip_local_ports.lock); net->ipv4.ip_local_ports.range[0] = 32768; - net->ipv4.ip_local_ports.range[1] = 61000; + net->ipv4.ip_local_ports.range[1] = 60999; seqlock_init(&net->ipv4.ping_group_range.lock); /* diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 421a80b09b62..30b544f025ac 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -256,7 +256,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) aead_givcrypt_set_crypt(req, sg, sg, clen, iv); aead_givcrypt_set_assoc(req, asg, assoclen); aead_givcrypt_set_giv(req, esph->enc_data, - XFRM_SKB_CB(skb)->seq.output.low); + XFRM_SKB_CB(skb)->seq.output.low + + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); ESP_SKB_CB(skb)->tmp = tmp; err = crypto_aead_givencrypt(req); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 5a5d9bdeaeb4..3c699c4e90a4 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -72,6 +72,7 @@ #include #include #include +#include #include #include #include @@ -324,13 +325,15 @@ static inline void empty_child_dec(struct key_vector *n) static struct key_vector *leaf_new(t_key key, struct fib_alias *fa) { - struct tnode *kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); - struct key_vector *l = kv->kv; + struct key_vector *l; + struct tnode *kv; + kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); if (!kv) return NULL; /* initialize key vector */ + l = kv->kv; l->key = key; l->pos = 0; l->bits = 0; @@ -345,24 +348,26 @@ static struct key_vector *leaf_new(t_key key, struct fib_alias *fa) static struct key_vector *tnode_new(t_key key, int pos, int bits) { - struct tnode *tnode = tnode_alloc(bits); unsigned int shift = pos + bits; - struct key_vector *tn = tnode->kv; + struct key_vector *tn; + struct tnode *tnode; /* verify bits and pos their msb bits clear and values are valid */ BUG_ON(!bits || (shift > KEYLENGTH)); - pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0), - sizeof(struct key_vector *) << bits); - + tnode = tnode_alloc(bits); if (!tnode) return NULL; + pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0), + sizeof(struct key_vector *) << bits); + if (bits == KEYLENGTH) tnode->full_children = 1; else tnode->empty_children = 1ul << bits; + tn = tnode->kv; tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0; tn->pos = pos; tn->bits = bits; @@ -2053,11 +2058,12 @@ static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter) static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter, struct trie *t) { - struct key_vector *n, *pn = t->kv; + struct key_vector *n, *pn; if (!t) return NULL; + pn = t->kv; n = rcu_dereference(pn->tnode[0]); if (!n) return NULL; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 3766bddb3e8a..5f9b063bbe8a 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -393,9 +394,10 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; } -static inline u32 inet_sk_port_offset(const struct sock *sk) +static u32 inet_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); + return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, inet->inet_daddr, inet->inet_dport); @@ -501,8 +503,14 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; + /* By starting with offset being an even number, + * we tend to leave about 50% of ports for other uses, + * like bind(0). + */ + offset &= ~1; + local_bh_disable(); - for (i = 1; i <= remaining; i++) { + for (i = 0; i < remaining; i++) { port = low + (i + offset) % remaining; if (inet_is_local_reserved_port(net, port)) continue; @@ -546,7 +554,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; ok: - hint += i; + hint += (i + 2) & ~1; /* Head lock still held and bh's disabled */ inet_bind_hash(sk, tb, port); @@ -593,7 +601,11 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { - return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk), + u32 port_offset = 0; + + if (!inet_sk(sk)->inet_num) + port_offset = inet_sk_port_offset(sk); + return __inet_hash_connect(death_row, sk, port_offset, __inet_check_established); } EXPORT_SYMBOL_GPL(inet_hash_connect); @@ -609,3 +621,33 @@ void inet_hashinfo_init(struct inet_hashinfo *h) } } EXPORT_SYMBOL_GPL(inet_hashinfo_init); + +int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) +{ + unsigned int i, nblocks = 1; + + if (sizeof(spinlock_t) != 0) { + /* allocate 2 cache lines or at least one spinlock per cpu */ + nblocks = max_t(unsigned int, + 2 * L1_CACHE_BYTES / sizeof(spinlock_t), + 1); + nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); + + /* no more locks than number of hash buckets */ + nblocks = min(nblocks, hashinfo->ehash_mask + 1); + + hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), + GFP_KERNEL | __GFP_NOWARN); + if (!hashinfo->ehash_locks) + hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); + + if (!hashinfo->ehash_locks) + return -ENOMEM; + + for (i = 0; i < nblocks; i++) + spin_lock_init(&hashinfo->ehash_locks[i]); + } + hashinfo->ehash_locks_mask = nblocks - 1; + return 0; +} +EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 47fa64ee82b1..a50dc6d408d1 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -75,6 +75,7 @@ struct ipq { __be16 id; u8 protocol; u8 ecn; /* RFC3168 support */ + u16 max_df_size; /* largest frag with DF set seen */ int iif; unsigned int rid; struct inet_peer *peer; @@ -326,6 +327,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { struct sk_buff *prev, *next; struct net_device *dev; + unsigned int fragsize; int flags, offset; int ihl, end; int err = -ENOENT; @@ -481,9 +483,14 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) if (offset == 0) qp->q.flags |= INET_FRAG_FIRST_IN; + fragsize = skb->len + ihl; + + if (fragsize > qp->q.max_size) + qp->q.max_size = fragsize; + if (ip_hdr(skb)->frag_off & htons(IP_DF) && - skb->len + ihl > qp->q.max_size) - qp->q.max_size = skb->len + ihl; + fragsize > qp->max_df_size) + qp->max_df_size = fragsize; if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && qp->q.meat == qp->q.len) { @@ -613,13 +620,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, head->next = NULL; head->dev = dev; head->tstamp = qp->q.stamp; - IPCB(head)->frag_max_size = qp->q.max_size; + IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); iph = ip_hdr(head); - /* max_size != 0 implies at least one fragment had IP_DF set */ - iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; iph->tot_len = htons(len); iph->tos |= ecn; + + /* When we set IP_DF on a refragmented skb we must also force a + * call to ip_fragment to avoid forwarding a DF-skb of size s while + * original sender only sent fragments of size f (where f < s). + * + * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest + * frag seen to avoid sending tiny DF-fragments in case skb was built + * from one very small df-fragment and one large non-df frag. + */ + if (qp->max_df_size == qp->q.max_size) { + IPCB(head)->flags |= IPSKB_FRAG_PMTU; + iph->frag_off = htons(IP_DF); + } else { + iph->frag_off = 0; + } + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; qp->q.fragments_tail = NULL; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 451b009dae75..f5f5ef1cebd5 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -84,6 +84,7 @@ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL; EXPORT_SYMBOL(sysctl_ip_default_ttl); static int ip_fragment(struct sock *sk, struct sk_buff *skb, + unsigned int mtu, int (*output)(struct sock *, struct sk_buff *)); /* Generate a checksum for an outgoing IP datagram. */ @@ -219,7 +220,8 @@ static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb) return -EINVAL; } -static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) +static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb, + unsigned int mtu) { netdev_features_t features; struct sk_buff *segs; @@ -227,7 +229,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) /* common case: locally created skb or seglen is <= mtu */ if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) || - skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb)) + skb_gso_network_seglen(skb) <= mtu) return ip_finish_output2(sk, skb); /* Slowpath - GSO segment length is exceeding the dst MTU. @@ -251,7 +253,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) int err; segs->next = NULL; - err = ip_fragment(sk, segs, ip_finish_output2); + err = ip_fragment(sk, segs, mtu, ip_finish_output2); if (err && ret == 0) ret = err; @@ -263,6 +265,8 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) static int ip_finish_output(struct sock *sk, struct sk_buff *skb) { + unsigned int mtu; + #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) /* Policy lookup after SNAT yielded a new policy */ if (skb_dst(skb)->xfrm) { @@ -270,11 +274,12 @@ static int ip_finish_output(struct sock *sk, struct sk_buff *skb) return dst_output_sk(sk, skb); } #endif + mtu = ip_skb_dst_mtu(skb); if (skb_is_gso(skb)) - return ip_finish_output_gso(sk, skb); + return ip_finish_output_gso(sk, skb, mtu); - if (skb->len > ip_skb_dst_mtu(skb)) - return ip_fragment(sk, skb, ip_finish_output2); + if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) + return ip_fragment(sk, skb, mtu, ip_finish_output2); return ip_finish_output2(sk, skb); } @@ -482,12 +487,15 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) } static int ip_fragment(struct sock *sk, struct sk_buff *skb, + unsigned int mtu, int (*output)(struct sock *, struct sk_buff *)) { struct iphdr *iph = ip_hdr(skb); - unsigned int mtu = ip_skb_dst_mtu(skb); - if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) || + if ((iph->frag_off & htons(IP_DF)) == 0) + return ip_do_fragment(sk, skb, output); + + if (unlikely(!skb->ignore_df || (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size > mtu))) { struct rtable *rt = skb_rtable(skb); @@ -532,6 +540,8 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb, iph = ip_hdr(skb); mtu = ip_skb_dst_mtu(skb); + if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) + mtu = IPCB(skb)->frag_max_size; /* * Setup starting values. @@ -727,6 +737,9 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb, iph = ip_hdr(skb2); iph->frag_off = htons((offset >> 3)); + if (IPCB(skb)->flags & IPSKB_FRAG_PMTU) + iph->frag_off |= htons(IP_DF); + /* ANK: dirty, but effective trick. Upgrade options only if * the segment to be fragmented was THE FIRST (otherwise, * options are already fixed) and make it ONCE diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 7cfb0893f263..04ae2992a5cd 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -582,6 +582,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, case IP_TRANSPARENT: case IP_MINTTL: case IP_NODEFRAG: + case IP_BIND_ADDRESS_NO_PORT: case IP_UNICAST_IF: case IP_MULTICAST_TTL: case IP_MULTICAST_ALL: @@ -732,6 +733,9 @@ static int do_ip_setsockopt(struct sock *sk, int level, } inet->nodefrag = val ? 1 : 0; break; + case IP_BIND_ADDRESS_NO_PORT: + inet->bind_address_no_port = val ? 1 : 0; + break; case IP_MTU_DISCOVER: if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT) goto e_inval; @@ -1324,6 +1328,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, case IP_NODEFRAG: val = inet->nodefrag; break; + case IP_BIND_ADDRESS_NO_PORT: + val = inet->bind_address_no_port; + break; case IP_MTU_DISCOVER: val = inet->pmtudisc; break; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 9f7269f3c54a..0c152087ca15 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -65,7 +65,6 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, goto drop; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; - skb->mark = be32_to_cpu(tunnel->parms.i_key); return xfrm_input(skb, nexthdr, spi, encap_type); } @@ -91,6 +90,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err) struct pcpu_sw_netstats *tstats; struct xfrm_state *x; struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4; + u32 orig_mark = skb->mark; + int ret; if (!tunnel) return 1; @@ -107,7 +108,11 @@ static int vti_rcv_cb(struct sk_buff *skb, int err) x = xfrm_input_state(skb); family = x->inner_mode->afinfo->family; - if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) + skb->mark = be32_to_cpu(tunnel->parms.i_key); + ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); + skb->mark = orig_mark; + + if (!ret) return -EPERM; skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev))); @@ -216,8 +221,6 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) memset(&fl, 0, sizeof(fl)); - skb->mark = be32_to_cpu(tunnel->parms.o_key); - switch (skb->protocol) { case htons(ETH_P_IP): xfrm_decode_session(skb, &fl, AF_INET); @@ -233,6 +236,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } + /* override mark with tunnel output key */ + fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key); + return vti_xmit(skb, dev, &fl); } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 2d0e265fef6e..e7abf5145edc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1444,7 +1444,6 @@ static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ipt_ip *ip, - unsigned int hookmask, int *size) { struct xt_match *match; @@ -1513,8 +1512,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { - ret = compat_find_calc_match(ematch, name, - &e->ip, e->comefrom, &off); + ret = compat_find_calc_match(ematch, name, &e->ip, &off); if (ret != 0) goto release_matches; ++j; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index df849e5a10f1..d70b1f603692 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -219,9 +219,9 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, } EXPORT_SYMBOL_GPL(__cookie_v4_check); -static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, - struct request_sock *req, - struct dst_entry *dst) +struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst) { struct inet_connection_sock *icsk = inet_csk(sk); struct sock *child; @@ -235,7 +235,7 @@ static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, } return child; } - +EXPORT_SYMBOL(tcp_get_cookie_sock); /* * when syncookies are in effect and tcp timestamps are enabled we stored @@ -391,7 +391,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst); - ret = get_cookie_sock(sk, skb, req, &rt->dst); + ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst); /* ip_queue_xmit() depends on our flow being setup * Normal sockets get it right from inet_csk_route_child_sock() */ diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 841de32f1fee..433231ccfb17 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -41,11 +41,19 @@ static int tcp_syn_retries_min = 1; static int tcp_syn_retries_max = MAX_TCP_SYNCNT; static int ip_ping_group_range_min[] = { 0, 0 }; static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; +static int min_sndbuf = SOCK_MIN_SNDBUF; +static int min_rcvbuf = SOCK_MIN_RCVBUF; /* Update system visible IP port range */ static void set_local_port_range(struct net *net, int range[2]) { + bool same_parity = !((range[0] ^ range[1]) & 1); + write_seqlock(&net->ipv4.ip_local_ports.lock); + if (same_parity && !net->ipv4.ip_local_ports.warned) { + net->ipv4.ip_local_ports.warned = true; + pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n"); + } net->ipv4.ip_local_ports.range[0] = range[0]; net->ipv4.ip_local_ports.range[1] = range[1]; write_sequnlock(&net->ipv4.ip_local_ports.lock); @@ -522,7 +530,7 @@ static struct ctl_table ipv4_table[] = { .maxlen = sizeof(sysctl_tcp_wmem), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_sndbuf, }, { .procname = "tcp_notsent_lowat", @@ -537,7 +545,7 @@ static struct ctl_table ipv4_table[] = { .maxlen = sizeof(sysctl_tcp_rmem), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_rcvbuf, }, { .procname = "tcp_app_win", @@ -702,7 +710,7 @@ static struct ctl_table ipv4_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, + .extra1 = &one, .extra2 = &gso_max_segs, }, { @@ -750,7 +758,7 @@ static struct ctl_table ipv4_table[] = { .maxlen = sizeof(sysctl_udp_rmem_min), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one + .extra1 = &min_rcvbuf, }, { .procname = "udp_wmem_min", @@ -758,7 +766,7 @@ static struct ctl_table ipv4_table[] = { .maxlen = sizeof(sysctl_udp_wmem_min), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one + .extra1 = &min_sndbuf, }, { } }; diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 7a5ae50c80c8..84be008c945c 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk, tcp_cleanup_congestion_control(sk); icsk->icsk_ca_ops = ca; + icsk->icsk_ca_setsockopt = 1; if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) icsk->icsk_ca_ops->init(sk); @@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) rcu_read_lock(); ca = __tcp_ca_find_autoload(name); /* No change asking for existing value */ - if (ca == icsk->icsk_ca_ops) + if (ca == icsk->icsk_ca_ops) { + icsk->icsk_ca_setsockopt = 1; goto out; + } if (!ca) err = -ENOENT; else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index feb875769b8d..d7d4c2b79cf2 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1400,7 +1400,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; } - if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) + if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { @@ -1647,7 +1647,7 @@ int tcp_v4_rcv(struct sk_buff *skb) if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; - if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { + if (tcp_checksum_complete(skb)) { csum_error: TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); bad_packet: @@ -1671,10 +1671,6 @@ int tcp_v4_rcv(struct sk_buff *skb) goto discard_it; } - if (skb->len < (th->doff << 2)) { - inet_twsk_put(inet_twsk(sk)); - goto bad_packet; - } if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index df7fe3c31162..4bc00cb79e60 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -420,7 +420,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) rcu_read_unlock(); } - if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner)) + /* If no valid choice made yet, assign current system default ca. */ + if (!ca_got_dst && + (!icsk->icsk_ca_setsockopt || + !try_module_get(icsk->icsk_ca_ops->owner))) tcp_assign_congestion_control(sk); tcp_set_ca_state(sk, TCP_CA_Open); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 534e5fdb04c1..eeb59befaf06 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -50,8 +50,8 @@ int sysctl_tcp_retrans_collapse __read_mostly = 1; */ int sysctl_tcp_workaround_signed_windows __read_mostly = 0; -/* Default TSQ limit of two TSO segments */ -int sysctl_tcp_limit_output_bytes __read_mostly = 131072; +/* Default TSQ limit of four TSO segments */ +int sysctl_tcp_limit_output_bytes __read_mostly = 262144; /* This limits the percentage of the congestion window which we * will allow a single TSO frame to consume. Building TSO frames @@ -2088,7 +2088,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) break; - if (tso_segs == 1 || !max_segs) { + if (tso_segs == 1) { if (unlikely(!tcp_nagle_test(tp, skb, mss_now, (tcp_skb_is_last(sk, skb) ? nonagle : TCP_NAGLE_PUSH)))) @@ -2101,7 +2101,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, } limit = mss_now; - if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp)) + if (tso_segs > 1 && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, min_t(unsigned int, cwnd_quota, diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d10b7e0112eb..83aa604f9273 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -90,6 +90,7 @@ #include #include #include +#include #include #include #include @@ -1345,10 +1346,8 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, } unlock_sock_fast(sk, slow); - if (noblock) - return -EAGAIN; - - /* starting over for a new packet */ + /* starting over for a new packet, but check if we need to yield */ + cond_resched(); msg->msg_flags &= ~MSG_TRUNC; goto try_again; } @@ -1962,6 +1961,7 @@ void udp_v4_early_demux(struct sk_buff *skb) struct sock *sk; struct dst_entry *dst; int dif = skb->dev->ifindex; + int ours; /* validate the packet */ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) @@ -1971,14 +1971,24 @@ void udp_v4_early_demux(struct sk_buff *skb) uh = udp_hdr(skb); if (skb->pkt_type == PACKET_BROADCAST || - skb->pkt_type == PACKET_MULTICAST) + skb->pkt_type == PACKET_MULTICAST) { + struct in_device *in_dev = __in_dev_get_rcu(skb->dev); + + if (!in_dev) + return; + + ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, + iph->protocol); + if (!ours) + return; sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); - else if (skb->pkt_type == PACKET_HOST) + } else if (skb->pkt_type == PACKET_HOST) { sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); - else + } else { return; + } if (!sk) return; diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index d873ceea86e6..ca09bf49ac68 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -133,6 +133,14 @@ static void snmp6_free_dev(struct inet6_dev *idev) free_percpu(idev->stats.ipv6); } +static void in6_dev_finish_destroy_rcu(struct rcu_head *head) +{ + struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu); + + snmp6_free_dev(idev); + kfree(idev); +} + /* Nobody refers to this device, we may destroy it. */ void in6_dev_finish_destroy(struct inet6_dev *idev) @@ -151,7 +159,6 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) pr_warn("Freeing alive inet6 device %p\n", idev); return; } - snmp6_free_dev(idev); - kfree_rcu(idev, rcu); + call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu); } EXPORT_SYMBOL(in6_dev_finish_destroy); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index f3866c0b6cfe..7de52b65173f 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -362,7 +362,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) np->saddr = addr->sin6_addr; /* Make sure we are allowed to bind here. */ - if (sk->sk_prot->get_port(sk, snum)) { + if ((snum || !inet->bind_address_no_port) && + sk->sk_prot->get_port(sk, snum)) { inet_reset_saddr(sk); err = -EADDRINUSE; goto out; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 31f1b5d5e2ef..7c07ce36aae2 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -248,7 +248,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) aead_givcrypt_set_crypt(req, sg, sg, clen, iv); aead_givcrypt_set_assoc(req, asg, assoclen); aead_givcrypt_set_giv(req, esph->enc_data, - XFRM_SKB_CB(skb)->seq.output.low); + XFRM_SKB_CB(skb)->seq.output.low + + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); ESP_SKB_CB(skb)->tmp = tmp; err = crypto_aead_givencrypt(req); diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 871641bc1ed4..b4fd96de97e6 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -257,7 +257,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; } -static inline u32 inet6_sk_port_offset(const struct sock *sk) +static u32 inet6_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); @@ -269,7 +269,11 @@ static inline u32 inet6_sk_port_offset(const struct sock *sk) int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { - return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk), + u32 port_offset = 0; + + if (!inet_sk(sk)->inet_num) + port_offset = inet6_sk_port_offset(sk); + return __inet_hash_connect(death_row, sk, port_offset, __inet6_check_established); } EXPORT_SYMBOL_GPL(inet6_hash_connect); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index ed9d681207fa..0224c032dca5 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -322,7 +322,6 @@ static int vti6_rcv(struct sk_buff *skb) } XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; - skb->mark = be32_to_cpu(t->parms.i_key); rcu_read_unlock(); @@ -342,6 +341,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err) struct pcpu_sw_netstats *tstats; struct xfrm_state *x; struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; + u32 orig_mark = skb->mark; + int ret; if (!t) return 1; @@ -358,7 +359,11 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err) x = xfrm_input_state(skb); family = x->inner_mode->afinfo->family; - if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) + skb->mark = be32_to_cpu(t->parms.i_key); + ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); + skb->mark = orig_mark; + + if (!ret) return -EPERM; skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev))); @@ -430,6 +435,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) struct net_device *tdev; struct xfrm_state *x; int err = -1; + int mtu; if (!dst) goto tx_err_link_failure; @@ -463,6 +469,19 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) skb_dst_set(skb, dst); skb->dev = skb_dst(skb)->dev; + mtu = dst_mtu(dst); + if (!skb->ignore_df && skb->len > mtu) { + skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); + + if (skb->protocol == htons(ETH_P_IPV6)) + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + else + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); + + return -EMSGSIZE; + } + err = dst_output(skb); if (net_xmit_eval(err) == 0) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); @@ -495,7 +514,6 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) int ret; memset(&fl, 0, sizeof(fl)); - skb->mark = be32_to_cpu(t->parms.o_key); switch (skb->protocol) { case htons(ETH_P_IPV6): @@ -516,6 +534,9 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_err; } + /* override mark with tunnel output key */ + fl.flowi_mark = be32_to_cpu(t->parms.o_key); + ret = vti6_xmit(skb, dev, &fl); if (ret < 0) goto tx_err; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 62f5b0d0bc9b..cdd085f8b770 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1459,7 +1459,6 @@ static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ip6t_ip6 *ipv6, - unsigned int hookmask, int *size) { struct xt_match *match; @@ -1528,8 +1527,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { - ret = compat_find_calc_match(ematch, name, - &e->ipv6, e->comefrom, &off); + ret = compat_find_calc_match(ematch, name, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 484a5c144073..ca4700cb26c4 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1327,13 +1327,7 @@ static struct inet_protosw rawv6_protosw = { int __init rawv6_init(void) { - int ret; - - ret = inet6_register_protosw(&rawv6_protosw); - if (ret) - goto out; -out: - return ret; + return inet6_register_protosw(&rawv6_protosw); } void rawv6_exit(void) diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 21bc2eb53c57..0909f4e0d53c 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -41,23 +41,6 @@ static __u16 const msstab[] = { 9000 - 60, }; -static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, - struct request_sock *req, - struct dst_entry *dst) -{ - struct inet_connection_sock *icsk = inet_csk(sk); - struct sock *child; - - child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); - if (child) { - atomic_set(&req->rsk_refcnt, 1); - inet_csk_reqsk_queue_add(sk, req, child); - } else { - reqsk_free(req); - } - return child; -} - static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv6_cookie_scratch); @@ -264,7 +247,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); - ret = get_cookie_sock(sk, skb, req, dst); + ret = tcp_get_cookie_sock(sk, skb, req, dst); out: return ret; out_free: diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7be3d858cbf0..45a7176ed460 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1250,7 +1250,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; } - if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) + if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { @@ -1442,7 +1442,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) tcp_v6_fill_cb(skb, hdr, th); - if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { + if (tcp_checksum_complete(skb)) { csum_error: TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); bad_packet: @@ -1467,10 +1467,6 @@ static int tcp_v6_rcv(struct sk_buff *skb) tcp_v6_fill_cb(skb, hdr, th); - if (skb->len < (th->doff<<2)) { - inet_twsk_put(inet_twsk(sk)); - goto bad_packet; - } if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c2ec41617a35..e51fc3eee6db 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -525,10 +525,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } unlock_sock_fast(sk, slow); - if (noblock) - return -EAGAIN; - - /* starting over for a new packet */ + /* starting over for a new packet, but check if we need to yield */ + cond_resched(); msg->msg_flags &= ~MSG_TRUNC; goto try_again; } diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 3469bbdc891c..690b9f640b41 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1411,6 +1411,7 @@ static int ieee80211_change_station(struct wiphy *wiphy, } sta->sdata = vlansdata; + ieee80211_check_fast_xmit(sta); if (sta->sta_state == IEEE80211_STA_AUTHORIZED && prev_4addr != new_4addr) { @@ -2538,51 +2539,22 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local, struct ieee80211_roc_work *new_roc, struct ieee80211_roc_work *cur_roc) { - unsigned long j = jiffies; - unsigned long cur_roc_end = cur_roc->hw_start_time + - msecs_to_jiffies(cur_roc->duration); - struct ieee80211_roc_work *next_roc; - int new_dur; + unsigned long now = jiffies; + unsigned long remaining = cur_roc->hw_start_time + + msecs_to_jiffies(cur_roc->duration) - + now; if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun)) return false; - if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end)) + /* if it doesn't fit entirely, schedule a new one */ + if (new_roc->duration > jiffies_to_msecs(remaining)) return false; ieee80211_handle_roc_started(new_roc); - new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j); - - /* cur_roc is long enough - add new_roc to the dependents list. */ - if (new_dur <= 0) { - list_add_tail(&new_roc->list, &cur_roc->dependents); - return true; - } - - new_roc->duration = new_dur; - - /* - * if cur_roc was already coalesced before, we might - * want to extend the next roc instead of adding - * a new one. - */ - next_roc = list_entry(cur_roc->list.next, - struct ieee80211_roc_work, list); - if (&next_roc->list != &local->roc_list && - next_roc->chan == new_roc->chan && - next_roc->sdata == new_roc->sdata && - !WARN_ON(next_roc->started)) { - list_add_tail(&new_roc->list, &next_roc->dependents); - next_roc->duration = max(next_roc->duration, - new_roc->duration); - next_roc->type = max(next_roc->type, new_roc->type); - return true; - } - - /* add right after cur_roc */ - list_add(&new_roc->list, &cur_roc->list); - + /* add to dependents so we send the expired event properly */ + list_add_tail(&new_roc->list, &cur_roc->dependents); return true; } @@ -2695,17 +2667,9 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, * In the offloaded ROC case, if it hasn't begun, add * this new one to the dependent list to be handled * when the master one begins. If it has begun, - * check that there's still a minimum time left and - * if so, start this one, transmitting the frame, but - * add it to the list directly after this one with - * a reduced time so we'll ask the driver to execute - * it right after finishing the previous one, in the - * hope that it'll also be executed right afterwards, - * effectively extending the old one. - * If there's no minimum time left, just add it to the - * normal list. - * TODO: the ROC type is ignored here, assuming that it - * is better to immediately use the current ROC. + * check if it fits entirely within the existing one, + * in which case it will just be dependent as well. + * Otherwise, schedule it by itself. */ if (!tmp->hw_begun) { list_add_tail(&roc->list, &tmp->dependents); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 2c4fe45ea38a..b12f61507f9f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -202,6 +202,8 @@ enum ieee80211_packet_rx_flags { * @IEEE80211_RX_CMNTR: received on cooked monitor already * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported * to cfg80211_report_obss_beacon(). + * @IEEE80211_RX_REORDER_TIMER: this frame is released by the + * reorder buffer timeout timer, not the normal RX path * * These flags are used across handling multiple interfaces * for a single frame. @@ -209,6 +211,7 @@ enum ieee80211_packet_rx_flags { enum ieee80211_rx_flags { IEEE80211_RX_CMNTR = BIT(0), IEEE80211_RX_BEACON_REPORTED = BIT(1), + IEEE80211_RX_REORDER_TIMER = BIT(2), }; struct ieee80211_rx_data { @@ -322,12 +325,6 @@ struct mesh_preq_queue { u8 flags; }; -#if HZ/100 == 0 -#define IEEE80211_ROC_MIN_LEFT 1 -#else -#define IEEE80211_ROC_MIN_LEFT (HZ/100) -#endif - struct ieee80211_roc_work { struct list_head list; struct list_head dependents; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 4ee8fea263ed..b2e85ffca7ed 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -522,6 +522,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) memcpy(sdata->vif.hw_queue, master->vif.hw_queue, sizeof(sdata->vif.hw_queue)); sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; + + mutex_lock(&local->key_mtx); + sdata->crypto_tx_tailroom_needed_cnt += + master->crypto_tx_tailroom_needed_cnt; + mutex_unlock(&local->key_mtx); + break; } case NL80211_IFTYPE_AP: diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 2e677376c958..577a11a13cdf 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -58,6 +58,22 @@ static void assert_key_lock(struct ieee80211_local *local) lockdep_assert_held(&local->key_mtx); } +static void +update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) +{ + struct ieee80211_sub_if_data *vlan; + + if (sdata->vif.type != NL80211_IFTYPE_AP) + return; + + mutex_lock(&sdata->local->mtx); + + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + vlan->crypto_tx_tailroom_needed_cnt += delta; + + mutex_unlock(&sdata->local->mtx); +} + static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) { /* @@ -79,6 +95,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net */ + update_vlan_tailroom_need_count(sdata, 1); + if (!sdata->crypto_tx_tailroom_needed_cnt++) { /* * Flush all XMIT packets currently using HW encryption or no @@ -88,6 +106,15 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) } } +static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata, + int delta) +{ + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta); + + update_vlan_tailroom_need_count(sdata, -delta); + sdata->crypto_tx_tailroom_needed_cnt -= delta; +} + static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) { struct ieee80211_sub_if_data *sdata; @@ -144,7 +171,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) - sdata->crypto_tx_tailroom_needed_cnt--; + decrease_tailroom_need_count(sdata, 1); WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); @@ -545,7 +572,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key, schedule_delayed_work(&sdata->dec_tailroom_needed_wk, HZ/2); } else { - sdata->crypto_tx_tailroom_needed_cnt--; + decrease_tailroom_need_count(sdata, 1); } } @@ -635,6 +662,7 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) { struct ieee80211_key *key; + struct ieee80211_sub_if_data *vlan; ASSERT_RTNL(); @@ -643,7 +671,14 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) mutex_lock(&sdata->local->key_mtx); - sdata->crypto_tx_tailroom_needed_cnt = 0; + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || + sdata->crypto_tx_tailroom_pending_dec); + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || + vlan->crypto_tx_tailroom_pending_dec); + } list_for_each_entry(key, &sdata->key_list, list) { increment_tailroom_need_count(sdata); @@ -653,6 +688,22 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) mutex_unlock(&sdata->local->key_mtx); } +void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_sub_if_data *vlan; + + mutex_lock(&sdata->local->key_mtx); + + sdata->crypto_tx_tailroom_needed_cnt = 0; + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + vlan->crypto_tx_tailroom_needed_cnt = 0; + } + + mutex_unlock(&sdata->local->key_mtx); +} + void ieee80211_iter_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void (*iter)(struct ieee80211_hw *hw, @@ -692,8 +743,8 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata, { struct ieee80211_key *key, *tmp; - sdata->crypto_tx_tailroom_needed_cnt -= - sdata->crypto_tx_tailroom_pending_dec; + decrease_tailroom_need_count(sdata, + sdata->crypto_tx_tailroom_pending_dec); sdata->crypto_tx_tailroom_pending_dec = 0; ieee80211_debugfs_key_remove_mgmt_default(sdata); @@ -713,6 +764,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *vlan; + struct ieee80211_sub_if_data *master; struct ieee80211_key *key, *tmp; LIST_HEAD(keys); @@ -732,8 +784,20 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, list_for_each_entry_safe(key, tmp, &keys, list) __ieee80211_key_destroy(key, false); - WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || - sdata->crypto_tx_tailroom_pending_dec); + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + if (sdata->bss) { + master = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt != + master->crypto_tx_tailroom_needed_cnt); + } + } else { + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || + sdata->crypto_tx_tailroom_pending_dec); + } + if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || @@ -797,8 +861,8 @@ void ieee80211_delayed_tailroom_dec(struct work_struct *wk) */ mutex_lock(&sdata->local->key_mtx); - sdata->crypto_tx_tailroom_needed_cnt -= - sdata->crypto_tx_tailroom_pending_dec; + decrease_tailroom_need_count(sdata, + sdata->crypto_tx_tailroom_pending_dec); sdata->crypto_tx_tailroom_pending_dec = 0; mutex_unlock(&sdata->local->key_mtx); } diff --git a/net/mac80211/key.h b/net/mac80211/key.h index df430a618764..2119526db2f4 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h @@ -160,6 +160,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, void ieee80211_free_sta_keys(struct ieee80211_local *local, struct sta_info *sta); void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); +void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata); #define key_mtx_dereference(local, ref) \ rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 3c956c5f99b2..674164fe5cdb 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -246,6 +246,7 @@ static void ieee80211_restart_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, restart_work); + struct ieee80211_sub_if_data *sdata; /* wait for scan work complete */ flush_workqueue(local->workqueue); @@ -254,6 +255,8 @@ static void ieee80211_restart_work(struct work_struct *work) "%s called with hardware scan in progress\n", __func__); rtnl_lock(); + list_for_each_entry(sdata, &local->interfaces, list) + flush_delayed_work(&sdata->dec_tailroom_needed_wk); ieee80211_scan_cancel(local); ieee80211_reconfig(local); rtnl_unlock(); @@ -770,8 +773,10 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local) for (r = 0; r < local->hw.n_cipher_schemes; r++) { suites[w++] = cs[r].cipher; - if (WARN_ON(cs[r].pn_len > IEEE80211_MAX_PN_LEN)) + if (WARN_ON(cs[r].pn_len > IEEE80211_MAX_PN_LEN)) { + kfree(suites); return -EINVAL; + } } } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 3294666f599c..387fe70ab126 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1098,6 +1098,24 @@ static void ieee80211_chswitch_timer(unsigned long data) ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work); } +static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata) +{ + struct sta_info *sta; + u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { + if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + continue; + + ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr, + NL80211_TDLS_TEARDOWN, reason, + GFP_ATOMIC); + } + rcu_read_unlock(); +} + static void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, u64 timestamp, u32 device_timestamp, @@ -1161,6 +1179,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, return; } + /* + * Drop all TDLS peers - either we disconnect or move to a different + * channel from this point on. There's no telling what our peer will do. + * The TDLS WIDER_BW scenario is also problematic, as peers might now + * have an incompatible wider chandef. + */ + ieee80211_teardown_tdls_peers(sdata); + mutex_lock(&local->mtx); mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index aa35977a9c4d..7d85f7516324 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2108,7 +2108,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) /* deliver to local stack */ skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); - if (rx->local->napi) + if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) && + rx->local->napi) napi_gro_receive(rx->local->napi, skb); else netif_receive_skb(skb); @@ -3215,7 +3216,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) /* This is OK -- must be QoS data frame */ .security_idx = tid, .seqno_idx = tid, - .flags = 0, + .flags = IEEE80211_RX_REORDER_TIMER, }; struct tid_ampdu_rx *tid_agg_rx; diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 8a92a920ff17..75e8e3bba538 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -1183,6 +1183,12 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, switch (oper) { case NL80211_TDLS_ENABLE_LINK: + if (sdata->vif.csa_active) { + tdls_dbg(sdata, "TDLS: disallow link during CSA\n"); + ret = -EBUSY; + break; + } + rcu_read_lock(); sta = sta_info_get(sdata, peer); if (!sta) { diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 79412f16b61d..b864ebc6ab8f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2022,6 +2022,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_unlock(&local->sta_mtx); /* add back keys */ + list_for_each_entry(sdata, &local->interfaces, list) + ieee80211_reset_crypto_tx_tailroom(sdata); + list_for_each_entry(sdata, &local->interfaces, list) if (ieee80211_sdata_running(sdata)) ieee80211_enable_keys(sdata); diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig index aa462b480a39..fb45287ebac3 100644 --- a/net/mac802154/Kconfig +++ b/net/mac802154/Kconfig @@ -2,6 +2,7 @@ config MAC802154 tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)" depends on IEEE802154 select CRC_CCITT + select CRYPTO select CRYPTO_AUTHENC select CRYPTO_CCM select CRYPTO_CTR diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c index 70be9c799f8a..317c4662e544 100644 --- a/net/mac802154/cfg.c +++ b/net/mac802154/cfg.c @@ -73,9 +73,9 @@ ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel) ASSERT_RTNL(); - /* check if phy support this setting */ - if (!(wpan_phy->channels_supported[page] & BIT(channel))) - return -EINVAL; + if (wpan_phy->current_page == page && + wpan_phy->current_channel == channel) + return 0; ret = drv_set_channel(local, page, channel); if (!ret) { @@ -95,9 +95,8 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy, ASSERT_RTNL(); - /* check if phy support this setting */ - if (!(local->hw.flags & IEEE802154_HW_CCA_MODE)) - return -EOPNOTSUPP; + if (wpan_phy_cca_cmp(&wpan_phy->cca, cca)) + return 0; ret = drv_set_cca_mode(local, cca); if (!ret) @@ -106,21 +105,50 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy, return ret; } +static int +ieee802154_set_cca_ed_level(struct wpan_phy *wpan_phy, s32 ed_level) +{ + struct ieee802154_local *local = wpan_phy_priv(wpan_phy); + int ret; + + ASSERT_RTNL(); + + if (wpan_phy->cca_ed_level == ed_level) + return 0; + + ret = drv_set_cca_ed_level(local, ed_level); + if (!ret) + wpan_phy->cca_ed_level = ed_level; + + return ret; +} + +static int +ieee802154_set_tx_power(struct wpan_phy *wpan_phy, s32 power) +{ + struct ieee802154_local *local = wpan_phy_priv(wpan_phy); + int ret; + + ASSERT_RTNL(); + + if (wpan_phy->transmit_power == power) + return 0; + + ret = drv_set_tx_power(local, power); + if (!ret) + wpan_phy->transmit_power = power; + + return ret; +} + static int ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 pan_id) { ASSERT_RTNL(); - /* TODO - * I am not sure about to check here on broadcast pan_id. - * Broadcast is a valid setting, comment from 802.15.4: - * If this value is 0xffff, the device is not associated. - * - * This could useful to simple deassociate an device. - */ - if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST)) - return -EINVAL; + if (wpan_dev->pan_id == pan_id) + return 0; wpan_dev->pan_id = pan_id; return 0; @@ -131,12 +159,11 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, u8 min_be, u8 max_be) { - struct ieee802154_local *local = wpan_phy_priv(wpan_phy); - ASSERT_RTNL(); - if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS)) - return -EOPNOTSUPP; + if (wpan_dev->min_be == min_be && + wpan_dev->max_be == max_be) + return 0; wpan_dev->min_be = min_be; wpan_dev->max_be = max_be; @@ -149,20 +176,8 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, { ASSERT_RTNL(); - /* TODO - * I am not sure about to check here on broadcast short_addr. - * Broadcast is a valid setting, comment from 802.15.4: - * A value of 0xfffe indicates that the device has - * associated but has not been allocated an address. A - * value of 0xffff indicates that the device does not - * have a short address. - * - * I think we should allow to set these settings but - * don't allow to allow socket communication with it. - */ - if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) || - short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST)) - return -EINVAL; + if (wpan_dev->short_addr == short_addr) + return 0; wpan_dev->short_addr = short_addr; return 0; @@ -173,12 +188,10 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, u8 max_csma_backoffs) { - struct ieee802154_local *local = wpan_phy_priv(wpan_phy); - ASSERT_RTNL(); - if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS)) - return -EOPNOTSUPP; + if (wpan_dev->csma_retries == max_csma_backoffs) + return 0; wpan_dev->csma_retries = max_csma_backoffs; return 0; @@ -189,12 +202,10 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, s8 max_frame_retries) { - struct ieee802154_local *local = wpan_phy_priv(wpan_phy); - ASSERT_RTNL(); - if (!(local->hw.flags & IEEE802154_HW_FRAME_RETRIES)) - return -EOPNOTSUPP; + if (wpan_dev->frame_retries == max_frame_retries) + return 0; wpan_dev->frame_retries = max_frame_retries; return 0; @@ -204,12 +215,10 @@ static int ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, bool mode) { - struct ieee802154_local *local = wpan_phy_priv(wpan_phy); - ASSERT_RTNL(); - if (!(local->hw.flags & IEEE802154_HW_LBT)) - return -EOPNOTSUPP; + if (wpan_dev->lbt == mode) + return 0; wpan_dev->lbt = mode; return 0; @@ -222,6 +231,8 @@ const struct cfg802154_ops mac802154_config_ops = { .del_virtual_intf = ieee802154_del_iface, .set_channel = ieee802154_set_channel, .set_cca_mode = ieee802154_set_cca_mode, + .set_cca_ed_level = ieee802154_set_cca_ed_level, + .set_tx_power = ieee802154_set_tx_power, .set_pan_id = ieee802154_set_pan_id, .set_short_addr = ieee802154_set_short_addr, .set_backoff_exponent = ieee802154_set_backoff_exponent, diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h index a0533357b9ea..caecd5f43aa7 100644 --- a/net/mac802154/driver-ops.h +++ b/net/mac802154/driver-ops.h @@ -58,7 +58,7 @@ drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel) return local->ops->set_channel(&local->hw, page, channel); } -static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm) +static inline int drv_set_tx_power(struct ieee802154_local *local, s32 mbm) { might_sleep(); @@ -67,7 +67,7 @@ static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm) return -EOPNOTSUPP; } - return local->ops->set_txpower(&local->hw, dbm); + return local->ops->set_txpower(&local->hw, mbm); } static inline int drv_set_cca_mode(struct ieee802154_local *local, @@ -96,7 +96,7 @@ static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode) } static inline int -drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level) +drv_set_cca_ed_level(struct ieee802154_local *local, s32 mbm) { might_sleep(); @@ -105,7 +105,7 @@ drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level) return -EOPNOTSUPP; } - return local->ops->set_cca_ed_level(&local->hw, ed_level); + return local->ops->set_cca_ed_level(&local->hw, mbm); } static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id) diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h index 127ba18386fc..eec668f3637f 100644 --- a/net/mac802154/ieee802154_i.h +++ b/net/mac802154/ieee802154_i.h @@ -86,8 +86,6 @@ struct ieee802154_sub_if_data { unsigned long state; char name[IFNAMSIZ]; - spinlock_t mib_lock; - /* protects sec from concurrent access by netlink. access by * encrypt/decrypt/header_create safe without additional protection. */ @@ -136,12 +134,7 @@ ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer); /* MIB callbacks */ -void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val); -__le16 mac802154_dev_get_short_addr(const struct net_device *dev); -__le16 mac802154_dev_get_pan_id(const struct net_device *dev); -void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val); void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan); -u8 mac802154_dev_get_dsn(const struct net_device *dev); int mac802154_get_params(struct net_device *dev, struct ieee802154_llsec_params *params); diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 91b75abbd1a1..b544b5dc4bfb 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -62,9 +62,10 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) (struct sockaddr_ieee802154 *)&ifr->ifr_addr; int err = -ENOIOCTLCMD; - ASSERT_RTNL(); + if (cmd != SIOCGIFADDR && cmd != SIOCSIFADDR) + return err; - spin_lock_bh(&sdata->mib_lock); + rtnl_lock(); switch (cmd) { case SIOCGIFADDR: @@ -89,7 +90,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } case SIOCSIFADDR: if (netif_running(dev)) { - spin_unlock_bh(&sdata->mib_lock); + rtnl_unlock(); return -EBUSY; } @@ -111,7 +112,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; } - spin_unlock_bh(&sdata->mib_lock); + rtnl_unlock(); return err; } @@ -241,7 +242,6 @@ static int mac802154_wpan_open(struct net_device *dev) struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct ieee802154_local *local = sdata->local; struct wpan_dev *wpan_dev = &sdata->wpan_dev; - struct wpan_phy *phy = sdata->local->phy; rc = ieee802154_check_concurrent_iface(sdata, sdata->vif.type); if (rc < 0) @@ -251,8 +251,6 @@ static int mac802154_wpan_open(struct net_device *dev) if (rc < 0) return rc; - mutex_lock(&phy->pib_lock); - if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) { rc = drv_set_promiscuous_mode(local, wpan_dev->promiscuous_mode); @@ -294,11 +292,7 @@ static int mac802154_wpan_open(struct net_device *dev) goto out; } - mutex_unlock(&phy->pib_lock); - return 0; - out: - mutex_unlock(&phy->pib_lock); return rc; } @@ -374,14 +368,12 @@ static int mac802154_header_create(struct sk_buff *skb, hdr.fc.type = cb->type; hdr.fc.security_enabled = cb->secen; hdr.fc.ack_request = cb->ackreq; - hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev); + hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF; if (mac802154_set_header_security(sdata, &hdr, cb) < 0) return -EINVAL; if (!saddr) { - spin_lock_bh(&sdata->mib_lock); - if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) || wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) || wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) { @@ -393,8 +385,6 @@ static int mac802154_header_create(struct sk_buff *skb, } hdr.source.pan_id = wpan_dev->pan_id; - - spin_unlock_bh(&sdata->mib_lock); } else { hdr.source = *(const struct ieee802154_addr *)saddr; } @@ -474,13 +464,16 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, enum nl802154_iftype type) { struct wpan_dev *wpan_dev = &sdata->wpan_dev; + u8 tmp; /* set some type-dependent values */ sdata->vif.type = type; sdata->wpan_dev.iftype = type; - get_random_bytes(&wpan_dev->bsn, 1); - get_random_bytes(&wpan_dev->dsn, 1); + get_random_bytes(&tmp, sizeof(tmp)); + atomic_set(&wpan_dev->bsn, tmp); + get_random_bytes(&tmp, sizeof(tmp)); + atomic_set(&wpan_dev->dsn, tmp); /* defaults per 802.15.4-2011 */ wpan_dev->min_be = 3; @@ -503,7 +496,6 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, sdata->dev->ml_priv = &mac802154_mlme_wpan; wpan_dev->promiscuous_mode = false; - spin_lock_init(&sdata->mib_lock); mutex_init(&sdata->sec_mtx); mac802154_llsec_init(&sdata->sec); diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c index bdccb4ecd30f..8606da459ff3 100644 --- a/net/mac802154/mac_cmd.c +++ b/net/mac802154/mac_cmd.c @@ -36,37 +36,30 @@ static int mac802154_mlme_start_req(struct net_device *dev, u8 pan_coord, u8 blx, u8 coord_realign) { - struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); - int rc = 0; + struct ieee802154_llsec_params params; + int changed = 0; ASSERT_RTNL(); BUG_ON(addr->mode != IEEE802154_ADDR_SHORT); - mac802154_dev_set_pan_id(dev, addr->pan_id); - mac802154_dev_set_short_addr(dev, addr->short_addr); + dev->ieee802154_ptr->pan_id = addr->pan_id; + dev->ieee802154_ptr->short_addr = addr->short_addr; mac802154_dev_set_page_channel(dev, page, channel); - if (ops->llsec) { - struct ieee802154_llsec_params params; - int changed = 0; + params.pan_id = addr->pan_id; + changed |= IEEE802154_LLSEC_PARAM_PAN_ID; - params.coord_shortaddr = addr->short_addr; - changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR; + params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr); + changed |= IEEE802154_LLSEC_PARAM_HWADDR; - params.pan_id = addr->pan_id; - changed |= IEEE802154_LLSEC_PARAM_PAN_ID; + params.coord_hwaddr = params.hwaddr; + changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR; - params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr); - changed |= IEEE802154_LLSEC_PARAM_HWADDR; + params.coord_shortaddr = addr->short_addr; + changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR; - params.coord_hwaddr = params.hwaddr; - changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR; - - rc = ops->llsec->set_params(dev, ¶ms, changed); - } - - return rc; + return mac802154_set_params(dev, ¶ms, changed); } static int mac802154_set_mac_params(struct net_device *dev, @@ -91,19 +84,19 @@ static int mac802154_set_mac_params(struct net_device *dev, wpan_dev->frame_retries = params->frame_retries; wpan_dev->lbt = params->lbt; - if (local->hw.flags & IEEE802154_HW_TXPOWER) { + if (local->hw.phy->flags & WPAN_PHY_FLAG_TXPOWER) { ret = drv_set_tx_power(local, params->transmit_power); if (ret < 0) return ret; } - if (local->hw.flags & IEEE802154_HW_CCA_MODE) { + if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_MODE) { ret = drv_set_cca_mode(local, ¶ms->cca); if (ret < 0) return ret; } - if (local->hw.flags & IEEE802154_HW_CCA_ED_LEVEL) { + if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) { ret = drv_set_cca_ed_level(local, params->cca_ed_level); if (ret < 0) return ret; @@ -151,9 +144,6 @@ static struct ieee802154_llsec_ops mac802154_llsec_ops = { struct ieee802154_mlme_ops mac802154_mlme_wpan = { .start_req = mac802154_mlme_start_req, - .get_pan_id = mac802154_dev_get_pan_id, - .get_short_addr = mac802154_dev_get_short_addr, - .get_dsn = mac802154_dev_get_dsn, .llsec = &mac802154_llsec_ops, diff --git a/net/mac802154/main.c b/net/mac802154/main.c index 08cb32dc8fd3..356b346e1ee8 100644 --- a/net/mac802154/main.c +++ b/net/mac802154/main.c @@ -107,6 +107,18 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops) skb_queue_head_init(&local->skb_queue); + /* init supported flags with 802.15.4 default ranges */ + phy->supported.max_minbe = 8; + phy->supported.min_maxbe = 3; + phy->supported.max_maxbe = 8; + phy->supported.min_frame_retries = -1; + phy->supported.max_frame_retries = 7; + phy->supported.max_csma_backoffs = 5; + phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE; + + /* always supported */ + phy->supported.iftypes = BIT(NL802154_IFTYPE_NODE); + return &local->hw; } EXPORT_SYMBOL(ieee802154_alloc_hw); @@ -155,6 +167,26 @@ int ieee802154_register_hw(struct ieee802154_hw *hw) ieee802154_setup_wpan_phy_pib(local->phy); + if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) { + local->phy->supported.min_csma_backoffs = 4; + local->phy->supported.max_csma_backoffs = 4; + local->phy->supported.min_maxbe = 5; + local->phy->supported.max_maxbe = 5; + local->phy->supported.min_minbe = 3; + local->phy->supported.max_minbe = 3; + } + + if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) { + /* TODO should be 3, but our default value is -1 which means + * no ARET handling. + */ + local->phy->supported.min_frame_retries = -1; + local->phy->supported.max_frame_retries = -1; + } + + if (hw->flags & IEEE802154_HW_PROMISCUOUS) + local->phy->supported.iftypes |= BIT(NL802154_IFTYPE_MONITOR); + rc = wpan_phy_register(local->phy); if (rc < 0) goto out_wq; diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c index 5cf019a57fd7..73f94fbf8785 100644 --- a/net/mac802154/mib.c +++ b/net/mac802154/mib.c @@ -26,81 +26,22 @@ #include "ieee802154_i.h" #include "driver-ops.h" -void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val) -{ - struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - spin_lock_bh(&sdata->mib_lock); - sdata->wpan_dev.short_addr = val; - spin_unlock_bh(&sdata->mib_lock); -} - -__le16 mac802154_dev_get_short_addr(const struct net_device *dev) -{ - struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); - __le16 ret; - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - spin_lock_bh(&sdata->mib_lock); - ret = sdata->wpan_dev.short_addr; - spin_unlock_bh(&sdata->mib_lock); - - return ret; -} - -__le16 mac802154_dev_get_pan_id(const struct net_device *dev) -{ - struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); - __le16 ret; - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - spin_lock_bh(&sdata->mib_lock); - ret = sdata->wpan_dev.pan_id; - spin_unlock_bh(&sdata->mib_lock); - - return ret; -} - -void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val) -{ - struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - spin_lock_bh(&sdata->mib_lock); - sdata->wpan_dev.pan_id = val; - spin_unlock_bh(&sdata->mib_lock); -} - -u8 mac802154_dev_get_dsn(const struct net_device *dev) -{ - struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - return sdata->wpan_dev.dsn++; -} - void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct ieee802154_local *local = sdata->local; int res; + ASSERT_RTNL(); + BUG_ON(dev->type != ARPHRD_IEEE802154); res = drv_set_channel(local, page, chan); if (res) { pr_debug("set_channel failed\n"); } else { - mutex_lock(&local->phy->pib_lock); local->phy->current_channel = chan; local->phy->current_page = page; - mutex_unlock(&local->phy->pib_lock); } } diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index c0d67b2b4132..e0f10063cac3 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -47,8 +47,6 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, pr_debug("getting packet via slave interface %s\n", sdata->dev->name); - spin_lock_bh(&sdata->mib_lock); - span = wpan_dev->pan_id; sshort = wpan_dev->short_addr; @@ -83,13 +81,10 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, skb->pkt_type = PACKET_OTHERHOST; break; default: - spin_unlock_bh(&sdata->mib_lock); pr_debug("invalid dest mode\n"); goto fail; } - spin_unlock_bh(&sdata->mib_lock); - skb->dev = sdata->dev; rc = mac802154_llsec_decrypt(&sdata->sec, skb); diff --git a/net/mac802154/util.c b/net/mac802154/util.c index 150bf807e572..583435f38930 100644 --- a/net/mac802154/util.c +++ b/net/mac802154/util.c @@ -85,11 +85,10 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, hrtimer_start(&local->ifs_timer, ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC), HRTIMER_MODE_REL); - - consume_skb(skb); } else { ieee802154_wake_queue(hw); - consume_skb(skb); } + + dev_consume_skb_any(skb); } EXPORT_SYMBOL(ieee802154_xmit_complete); diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 7b3f732269e4..1f93a5978f2a 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -541,7 +541,7 @@ static void mpls_ifdown(struct net_device *dev) RCU_INIT_POINTER(dev->mpls_ptr, NULL); - kfree(mdev); + kfree_rcu(mdev, rcu); } static int mpls_dev_notify(struct notifier_block *this, unsigned long event, @@ -564,6 +564,17 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, case NETDEV_UNREGISTER: mpls_ifdown(dev); break; + case NETDEV_CHANGENAME: + mdev = mpls_dev_get(dev); + if (mdev) { + int err; + + mpls_dev_sysctl_unregister(mdev); + err = mpls_dev_sysctl_register(dev, mdev); + if (err) + return notifier_from_errno(err); + } + break; } return NOTIFY_OK; } diff --git a/net/mpls/internal.h b/net/mpls/internal.h index b064c345042c..8cabeb5a1cb9 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -16,6 +16,7 @@ struct mpls_dev { int input_enabled; struct ctl_table_header *sysctl; + struct rcu_head rcu; }; struct sk_buff; diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index 809df534a720..0183b32da942 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c @@ -62,6 +62,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, static struct packet_offload mpls_mc_offload __read_mostly = { .type = cpu_to_be16(ETH_P_MPLS_MC), + .priority = 15, .callbacks = { .gso_segment = mpls_gso_segment, }, @@ -69,6 +70,7 @@ static struct packet_offload mpls_mc_offload __read_mostly = { static struct packet_offload mpls_uc_offload __read_mostly = { .type = cpu_to_be16(ETH_P_MPLS_UC), + .priority = 15, .callbacks = { .gso_segment = mpls_gso_segment, }, diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 1c78d7fb1da7..fbc8d15c7fda 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -3,6 +3,7 @@ menu "Core Netfilter Configuration" config NETFILTER_INGRESS bool "Netfilter ingress support" + default y select NET_INGRESS help This allows you to classify packets from ingress using the Netfilter @@ -455,6 +456,11 @@ config NF_TABLES_INET help This option enables support for a mixed IPv4/IPv6 "inet" table. +config NF_TABLES_NETDEV + tristate "Netfilter nf_tables netdev tables support" + help + This option enables support for the "netdev" table. + config NFT_EXTHDR tristate "Netfilter nf_tables IPv6 exthdr module" help diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index a87d8b8ec730..70d026d46fe7 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -75,6 +75,7 @@ nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o obj-$(CONFIG_NF_TABLES) += nf_tables.o obj-$(CONFIG_NF_TABLES_INET) += nf_tables_inet.o +obj-$(CONFIG_NF_TABLES_NETDEV) += nf_tables_netdev.o obj-$(CONFIG_NFT_COMPAT) += nft_compat.o obj-$(CONFIG_NFT_EXTHDR) += nft_exthdr.o obj-$(CONFIG_NFT_META) += nft_meta.o diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 34ded09317e7..4528f122bcd2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -399,6 +399,8 @@ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = { [NFTA_TABLE_NAME] = { .type = NLA_STRING, .len = NFT_TABLE_MAXNAMELEN - 1 }, [NFTA_TABLE_FLAGS] = { .type = NLA_U32 }, + [NFTA_TABLE_DEV] = { .type = NLA_STRING, + .len = IFNAMSIZ - 1 }, }; static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, @@ -423,6 +425,10 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use))) goto nla_put_failure; + if (table->dev && + nla_put_string(skb, NFTA_TABLE_DEV, table->dev->name)) + goto nla_put_failure; + nlmsg_end(skb, nlh); return 0; @@ -608,6 +614,11 @@ static int nf_tables_updtable(struct nft_ctx *ctx) if (flags == ctx->table->flags) return 0; + if ((ctx->afi->flags & NFT_AF_NEEDS_DEV) && + ctx->nla[NFTA_TABLE_DEV] && + nla_strcmp(ctx->nla[NFTA_TABLE_DEV], ctx->table->dev->name)) + return -EOPNOTSUPP; + trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE, sizeof(struct nft_trans_table)); if (trans == NULL) @@ -645,6 +656,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, struct nft_table *table; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; + struct net_device *dev = NULL; u32 flags = 0; struct nft_ctx ctx; int err; @@ -679,30 +691,50 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, return -EINVAL; } + if (afi->flags & NFT_AF_NEEDS_DEV) { + char ifname[IFNAMSIZ]; + + if (!nla[NFTA_TABLE_DEV]) + return -EOPNOTSUPP; + + nla_strlcpy(ifname, nla[NFTA_TABLE_DEV], IFNAMSIZ); + dev = dev_get_by_name(net, ifname); + if (!dev) + return -ENOENT; + } else if (nla[NFTA_TABLE_DEV]) { + return -EOPNOTSUPP; + } + + err = -EAFNOSUPPORT; if (!try_module_get(afi->owner)) - return -EAFNOSUPPORT; + goto err1; err = -ENOMEM; table = kzalloc(sizeof(*table), GFP_KERNEL); if (table == NULL) - goto err1; + goto err2; nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN); INIT_LIST_HEAD(&table->chains); INIT_LIST_HEAD(&table->sets); table->flags = flags; + table->dev = dev; nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); if (err < 0) - goto err2; + goto err3; list_add_tail_rcu(&table->list, &afi->tables); return 0; -err2: +err3: kfree(table); -err1: +err2: module_put(afi->owner); +err1: + if (dev != NULL) + dev_put(dev); + return err; } @@ -806,6 +838,9 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx) { BUG_ON(ctx->table->use > 0); + if (ctx->table->dev) + dev_put(ctx->table->dev); + kfree(ctx->table); module_put(ctx->afi->owner); } @@ -1361,6 +1396,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, ops->priority = priority; ops->priv = chain; ops->hook = afi->hooks[ops->hooknum]; + ops->dev = table->dev; if (hookfn) ops->hook = hookfn; if (afi->hook_ops_init) diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c new file mode 100644 index 000000000000..04cb17057f46 --- /dev/null +++ b/net/netfilter/nf_tables_netdev.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2015 Pablo Neira Ayuso + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +static inline void +nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct iphdr *iph, _iph; + u32 len, thoff; + + nft_set_pktinfo(pkt, ops, skb, state); + + iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph), + &_iph); + if (!iph) + return; + + iph = ip_hdr(skb); + if (iph->ihl < 5 || iph->version != 4) + return; + + len = ntohs(iph->tot_len); + thoff = iph->ihl * 4; + if (skb->len < len) + return; + else if (len < thoff) + return; + + pkt->tprot = iph->protocol; + pkt->xt.thoff = thoff; + pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET; +} + +static inline void +__nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct ipv6hdr *ip6h, _ip6h; + unsigned int thoff = 0; + unsigned short frag_off; + int protohdr; + u32 pkt_len; + + ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h), + &_ip6h); + if (!ip6h) + return; + + if (ip6h->version != 6) + return; + + pkt_len = ntohs(ip6h->payload_len); + if (pkt_len + sizeof(*ip6h) > skb->len) + return; + + protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); + if (protohdr < 0) + return; + + pkt->tprot = protohdr; + pkt->xt.thoff = thoff; + pkt->xt.fragoff = frag_off; +#endif +} + +static inline void nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + nft_set_pktinfo(pkt, ops, skb, state); + __nft_netdev_set_pktinfo_ipv6(pkt, ops, skb, state); +} + +static unsigned int +nft_do_chain_netdev(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct nft_pktinfo pkt; + + switch (eth_hdr(skb)->h_proto) { + case htons(ETH_P_IP): + nft_netdev_set_pktinfo_ipv4(&pkt, ops, skb, state); + break; + case htons(ETH_P_IPV6): + nft_netdev_set_pktinfo_ipv6(&pkt, ops, skb, state); + break; + default: + nft_set_pktinfo(&pkt, ops, skb, state); + break; + } + + return nft_do_chain(&pkt, ops); +} + +static struct nft_af_info nft_af_netdev __read_mostly = { + .family = NFPROTO_NETDEV, + .nhooks = NF_NETDEV_NUMHOOKS, + .owner = THIS_MODULE, + .flags = NFT_AF_NEEDS_DEV, + .nops = 1, + .hooks = { + [NF_NETDEV_INGRESS] = nft_do_chain_netdev, + }, +}; + +static int nf_tables_netdev_init_net(struct net *net) +{ + net->nft.netdev = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL); + if (net->nft.netdev == NULL) + return -ENOMEM; + + memcpy(net->nft.netdev, &nft_af_netdev, sizeof(nft_af_netdev)); + + if (nft_register_afinfo(net, net->nft.netdev) < 0) + goto err; + + return 0; +err: + kfree(net->nft.netdev); + return -ENOMEM; +} + +static void nf_tables_netdev_exit_net(struct net *net) +{ + nft_unregister_afinfo(net->nft.netdev); + kfree(net->nft.netdev); +} + +static struct pernet_operations nf_tables_netdev_net_ops = { + .init = nf_tables_netdev_init_net, + .exit = nf_tables_netdev_exit_net, +}; + +static const struct nf_chain_type nft_filter_chain_netdev = { + .name = "filter", + .type = NFT_CHAIN_T_DEFAULT, + .family = NFPROTO_NETDEV, + .owner = THIS_MODULE, + .hook_mask = (1 << NF_NETDEV_INGRESS), +}; + +static int __init nf_tables_netdev_init(void) +{ + int ret; + + nft_register_chain_type(&nft_filter_chain_netdev); + ret = register_pernet_subsys(&nf_tables_netdev_net_ops); + if (ret < 0) + nft_unregister_chain_type(&nft_filter_chain_netdev); + + return ret; +} + +static void __exit nf_tables_netdev_exit(void) +{ + unregister_pernet_subsys(&nf_tables_netdev_net_ops); + nft_unregister_chain_type(&nft_filter_chain_netdev); +} + +module_init(nf_tables_netdev_init); +module_exit(nf_tables_netdev_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pablo Neira Ayuso "); +MODULE_ALIAS_NFT_FAMILY(5); /* NFPROTO_NETDEV */ diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index b491c1c296fe..8a8c0b8b4f63 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -608,17 +608,16 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port) } static int output_userspace(struct datapath *dp, struct sk_buff *skb, - struct sw_flow_key *key, const struct nlattr *attr) + struct sw_flow_key *key, const struct nlattr *attr, + const struct nlattr *actions, int actions_len) { struct ovs_tunnel_info info; struct dp_upcall_info upcall; const struct nlattr *a; int rem; + memset(&upcall, 0, sizeof(upcall)); upcall.cmd = OVS_PACKET_CMD_ACTION; - upcall.userdata = NULL; - upcall.portid = 0; - upcall.egress_tun_info = NULL; for (a = nla_data(attr), rem = nla_len(attr); rem > 0; a = nla_next(a, &rem)) { @@ -647,6 +646,13 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, break; } + case OVS_USERSPACE_ATTR_ACTIONS: { + /* Include actions. */ + upcall.actions = actions; + upcall.actions_len = actions_len; + break; + } + } /* End of switch. */ } @@ -654,7 +660,8 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, } static int sample(struct datapath *dp, struct sk_buff *skb, - struct sw_flow_key *key, const struct nlattr *attr) + struct sw_flow_key *key, const struct nlattr *attr, + const struct nlattr *actions, int actions_len) { const struct nlattr *acts_list = NULL; const struct nlattr *a; @@ -688,7 +695,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb, */ if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE && nla_is_last(a, rem))) - return output_userspace(dp, skb, key, a); + return output_userspace(dp, skb, key, a, actions, actions_len); skb = skb_clone(skb, GFP_ATOMIC); if (!skb) @@ -872,7 +879,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case OVS_ACTION_ATTR_USERSPACE: - output_userspace(dp, skb, key, a); + output_userspace(dp, skb, key, a, attr, len); break; case OVS_ACTION_ATTR_HASH: @@ -916,7 +923,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case OVS_ACTION_ATTR_SAMPLE: - err = sample(dp, skb, key, a); + err = sample(dp, skb, key, a, attr, len); break; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 3b90461317ec..ff8c4a4c1609 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -272,10 +272,9 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) struct dp_upcall_info upcall; int error; + memset(&upcall, 0, sizeof(upcall)); upcall.cmd = OVS_PACKET_CMD_MISS; - upcall.userdata = NULL; upcall.portid = ovs_vport_find_upcall_portid(p, skb); - upcall.egress_tun_info = NULL; error = ovs_dp_upcall(dp, skb, key, &upcall); if (unlikely(error)) kfree_skb(skb); @@ -397,6 +396,10 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, if (upcall_info->egress_tun_info) size += nla_total_size(ovs_tun_key_attr_size()); + /* OVS_PACKET_ATTR_ACTIONS */ + if (upcall_info->actions_len) + size += nla_total_size(upcall_info->actions_len); + return size; } @@ -478,6 +481,17 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, nla_nest_end(user_skb, nla); } + if (upcall_info->actions_len) { + nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS); + err = ovs_nla_put_actions(upcall_info->actions, + upcall_info->actions_len, + user_skb); + if (!err) + nla_nest_end(user_skb, nla); + else + nla_nest_cancel(user_skb, nla); + } + /* Only reserve room for attribute header, packet data is added * in skb_zerocopy() */ if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) { diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 4ec4a480b147..cd691e935e08 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -116,6 +116,8 @@ struct ovs_skb_cb { struct dp_upcall_info { const struct ovs_tunnel_info *egress_tun_info; const struct nlattr *userdata; + const struct nlattr *actions; + int actions_len; u32 portid; u8 cmd; }; diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 4776282c6417..33e6d6e2908f 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -125,6 +125,7 @@ static struct vport *netdev_create(const struct vport_parms *parms) if (err) goto error_master_upper_dev_unlink; + dev_disable_lro(netdev_vport->dev); dev_set_promiscuity(netdev_vport->dev, 1); netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH; rtnl_unlock(); diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 3d83641f2861..2ad9032372b2 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -270,6 +270,28 @@ static int rds_cong_monitor(struct rds_sock *rs, char __user *optval, return ret; } +static int rds_set_transport(struct rds_sock *rs, char __user *optval, + int optlen) +{ + int t_type; + + if (rs->rs_transport) + return -EOPNOTSUPP; /* previously attached to transport */ + + if (optlen != sizeof(int)) + return -EINVAL; + + if (copy_from_user(&t_type, (int __user *)optval, sizeof(t_type))) + return -EFAULT; + + if (t_type < 0 || t_type >= RDS_TRANS_COUNT) + return -EINVAL; + + rs->rs_transport = rds_trans_get(t_type); + + return rs->rs_transport ? 0 : -ENOPROTOOPT; +} + static int rds_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { @@ -300,6 +322,11 @@ static int rds_setsockopt(struct socket *sock, int level, int optname, case RDS_CONG_MONITOR: ret = rds_cong_monitor(rs, optval, optlen); break; + case SO_RDS_TRANSPORT: + lock_sock(sock->sk); + ret = rds_set_transport(rs, optval, optlen); + release_sock(sock->sk); + break; default: ret = -ENOPROTOOPT; } @@ -312,6 +339,7 @@ static int rds_getsockopt(struct socket *sock, int level, int optname, { struct rds_sock *rs = rds_sk_to_rs(sock->sk); int ret = -ENOPROTOOPT, len; + int trans; if (level != SOL_RDS) goto out; @@ -337,6 +365,19 @@ static int rds_getsockopt(struct socket *sock, int level, int optname, else ret = 0; break; + case SO_RDS_TRANSPORT: + if (len < sizeof(int)) { + ret = -EINVAL; + break; + } + trans = (rs->rs_transport ? rs->rs_transport->t_type : + RDS_TRANS_NONE); /* unbound */ + if (put_user(trans, (int __user *)optval) || + put_user(sizeof(int), optlen)) + ret = -EFAULT; + else + ret = 0; + break; default: break; } diff --git a/net/rds/bind.c b/net/rds/bind.c index a2e6562da751..4ebd29c128b6 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -181,6 +181,10 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (ret) goto out; + if (rs->rs_transport) { /* previously bound */ + ret = 0; + goto out; + } trans = rds_trans_get_preferred(sin->sin_addr.s_addr); if (!trans) { ret = -EADDRNOTAVAIL; diff --git a/net/rds/rds.h b/net/rds/rds.h index 0d41155a2258..a33fb4ad3535 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -408,11 +408,6 @@ struct rds_notifier { * should try hard not to block. */ -#define RDS_TRANS_IB 0 -#define RDS_TRANS_IWARP 1 -#define RDS_TRANS_TCP 2 -#define RDS_TRANS_COUNT 3 - struct rds_transport { char t_name[TRANSNAMSIZ]; struct list_head t_item; @@ -803,6 +798,7 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr); void rds_trans_put(struct rds_transport *trans); unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail); +struct rds_transport *rds_trans_get(int t_type); int rds_trans_init(void); void rds_trans_exit(void); diff --git a/net/rds/transport.c b/net/rds/transport.c index 7f2ac4fec367..8b4a6cd2c3a7 100644 --- a/net/rds/transport.c +++ b/net/rds/transport.c @@ -101,6 +101,27 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr) return ret; } +struct rds_transport *rds_trans_get(int t_type) +{ + struct rds_transport *ret = NULL; + struct rds_transport *trans; + unsigned int i; + + down_read(&rds_trans_sem); + for (i = 0; i < RDS_TRANS_COUNT; i++) { + trans = transports[i]; + + if (trans && trans->t_type == t_type && + (!trans->t_owner || try_module_get(trans->t_owner))) { + ret = trans; + break; + } + } + up_read(&rds_trans_sem); + + return ret; +} + /* * This returns the number of stats entries in the snapshot and only * copies them using the iter if there is enough space for them. The diff --git a/net/rfkill/core.c b/net/rfkill/core.c index fa7cd792791c..f12149a29cb1 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -794,7 +794,8 @@ void rfkill_resume_polling(struct rfkill *rfkill) } EXPORT_SYMBOL(rfkill_resume_polling); -static int rfkill_suspend(struct device *dev, pm_message_t state) +#ifdef CONFIG_PM_SLEEP +static int rfkill_suspend(struct device *dev) { struct rfkill *rfkill = to_rfkill(dev); @@ -818,13 +819,18 @@ static int rfkill_resume(struct device *dev) return 0; } +static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume); +#define RFKILL_PM_OPS (&rfkill_pm_ops) +#else +#define RFKILL_PM_OPS NULL +#endif + static struct class rfkill_class = { .name = "rfkill", .dev_release = rfkill_release, .dev_groups = rfkill_dev_groups, .dev_uevent = rfkill_dev_uevent, - .suspend = rfkill_suspend, - .resume = rfkill_resume, + .pm = RFKILL_PM_OPS, }; bool rfkill_blocked(struct rfkill *rfkill) diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index d978f2f46ff3..d5d58d919552 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c @@ -112,21 +112,17 @@ static int rfkill_gpio_probe(struct platform_device *pdev) rfkill->clk = devm_clk_get(&pdev->dev, NULL); - gpio = devm_gpiod_get(&pdev->dev, "reset"); - if (!IS_ERR(gpio)) { - ret = gpiod_direction_output(gpio, 0); - if (ret) - return ret; - rfkill->reset_gpio = gpio; - } + gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); - gpio = devm_gpiod_get(&pdev->dev, "shutdown"); - if (!IS_ERR(gpio)) { - ret = gpiod_direction_output(gpio, 0); - if (ret) - return ret; - rfkill->shutdown_gpio = gpio; - } + rfkill->reset_gpio = gpio; + + gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + rfkill->shutdown_gpio = gpio; /* Make sure at-least one of the GPIO is defined and that * a name is specified for this instance diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index dc6a2d324bd8..1d56903fd4c7 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -37,6 +37,7 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, { struct tcf_bpf *prog = act->priv; int action, filter_res; + bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS; if (unlikely(!skb_mac_header_was_set(skb))) return TC_ACT_UNSPEC; @@ -48,7 +49,13 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, /* Needed here for accessing maps. */ rcu_read_lock(); - filter_res = BPF_PROG_RUN(prog->filter, skb); + if (at_ingress) { + __skb_push(skb, skb->mac_len); + filter_res = BPF_PROG_RUN(prog->filter, skb); + __skb_pull(skb, skb->mac_len); + } else { + filter_res = BPF_PROG_RUN(prog->filter, skb); + } rcu_read_unlock(); /* A BPF program may overwrite the default action opcode. diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 91bd9c19471d..c79ecfd36e0f 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -64,6 +64,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, { struct cls_bpf_head *head = rcu_dereference_bh(tp->root); struct cls_bpf_prog *prog; +#ifdef CONFIG_NET_CLS_ACT + bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS; +#else + bool at_ingress = false; +#endif int ret = -1; if (unlikely(!skb_mac_header_was_set(skb))) @@ -72,7 +77,16 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, /* Needed here for accessing maps. */ rcu_read_lock(); list_for_each_entry_rcu(prog, &head->plist, link) { - int filter_res = BPF_PROG_RUN(prog->filter, skb); + int filter_res; + + if (at_ingress) { + /* It is safe to push/pull even if skb_shared() */ + __skb_push(skb, skb->mac_len); + filter_res = BPF_PROG_RUN(prog->filter, skb); + __skb_pull(skb, skb->mac_len); + } else { + filter_res = BPF_PROG_RUN(prog->filter, skb); + } if (filter_res == 0) continue; diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index b4359924846c..76bc3a20ffdb 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -68,15 +68,21 @@ static inline u32 addr_fold(void *addr) static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) { - if (flow->addrs.src) - return ntohl(flow->addrs.src); + __be32 src = flow_get_u32_src(flow); + + if (src) + return ntohl(src); + return addr_fold(skb->sk); } static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) { - if (flow->addrs.dst) - return ntohl(flow->addrs.dst); + __be32 dst = flow_get_u32_dst(flow); + + if (dst) + return ntohl(dst); + return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 8c8f34ef6980..b92d3f49c23e 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -25,10 +25,12 @@ struct fl_flow_key { int indev_ifindex; + struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; struct flow_dissector_key_eth_addrs eth; + struct flow_dissector_key_addrs ipaddrs; union { - struct flow_dissector_key_addrs ipv4; + struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; }; struct flow_dissector_key_ports tp; @@ -259,14 +261,14 @@ static int fl_set_key(struct net *net, struct nlattr **tb, &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.ip_proto)); } - if (key->basic.n_proto == htons(ETH_P_IP)) { + if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, sizeof(key->ipv4.src)); fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, sizeof(key->ipv4.dst)); - } else if (key->basic.n_proto == htons(ETH_P_IPV6)) { + } else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, sizeof(key->ipv6.src)); @@ -347,6 +349,7 @@ static void fl_init_dissector(struct cls_fl_head *head, struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; size_t cnt = 0; + FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); @@ -608,7 +611,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, sizeof(key->basic.ip_proto))) goto nla_put_failure; - if (key->basic.n_proto == htons(ETH_P_IP) && + if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, sizeof(key->ipv4.src)) || @@ -616,7 +619,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, sizeof(key->ipv4.dst)))) goto nla_put_failure; - else if (key->basic.n_proto == htons(ETH_P_IPV6) && + else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, sizeof(key->ipv6.src)) || diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 0b74dc0ede9c..c5b9db84d069 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, if (dev->flags & IFF_UP) dev_deactivate(dev); - if (new && new->ops->attach) { - new->ops->attach(new); - num_q = 0; - } + if (new && new->ops->attach) + goto skip; for (i = 0; i < num_q; i++) { struct netdev_queue *dev_queue = dev_ingress_queue(dev); @@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, qdisc_destroy(old); } +skip: if (!ingress) { notify_and_destroy(net, skb, n, classid, dev->qdisc, new); if (new && !new->ops->attach) atomic_inc(&new->refcnt); dev->qdisc = new ? : &noop_qdisc; + + if (new && new->ops->attach) + new->ops->attach(new); } else { notify_and_destroy(net, skb, n, classid, old, new); } diff --git a/net/sctp/auth.c b/net/sctp/auth.c index fb7976aee61c..4f15b7d730e1 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -381,13 +381,14 @@ int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep, } -/* Public interface to creat the association shared key. +/* Public interface to create the association shared key. * See code above for the algorithm. */ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) { struct sctp_auth_bytes *secret; struct sctp_shared_key *ep_key; + struct sctp_chunk *chunk; /* If we don't support AUTH, or peer is not capable * we don't need to do anything. @@ -410,6 +411,14 @@ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) sctp_auth_key_put(asoc->asoc_shared_key); asoc->asoc_shared_key = secret; + /* Update send queue in case any chunk already in there now + * needs authenticating + */ + list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) { + if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc)) + chunk->auth = 1; + } + return 0; } diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index ac853acbe211..e008057dab46 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -803,7 +803,7 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi) } /** - * switchdev_fib_ipv4_add - Add IPv4 route entry to switch + * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry * * @dst: route's IPv4 destination address * @dst_len: destination address length (prefix length) @@ -813,7 +813,7 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi) * @nlflags: netlink flags passed in (NLM_F_*) * @tb_id: route table ID * - * Add IPv4 route entry to switch device. + * Add/modify switch IPv4 route entry. */ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, u8 tos, u8 type, u32 nlflags, u32 tb_id) diff --git a/net/tipc/link.c b/net/tipc/link.c index fb2a003c8e6d..ca8b8e0f49b5 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1320,8 +1320,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, if (!tipc_link_is_up(l_ptr)) return; - if (skb_queue_len(&l_ptr->backlogq)) - next_sent = buf_seqno(skb_peek(&l_ptr->backlogq)); msg_set_next_sent(msg, next_sent); if (!skb_queue_empty(&l_ptr->deferdq)) { last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq)); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 9370f953e16f..46b6ed534ef2 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -410,7 +410,7 @@ static int tipc_release(struct socket *sock) struct net *net; struct tipc_sock *tsk; struct sk_buff *skb; - u32 dnode, probing_state; + u32 dnode; /* * Exit if socket isn't fully initialized (occurs when a failed accept() @@ -448,10 +448,7 @@ static int tipc_release(struct socket *sock) } tipc_sk_withdraw(tsk, 0, NULL); - probing_state = tsk->probing_state; - if (del_timer_sync(&sk->sk_timer) && - probing_state != TIPC_CONN_PROBING) - sock_put(sk); + sk_stop_timer(sk, &sk->sk_timer); tipc_sk_remove(tsk); if (tsk->connected) { skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, @@ -2143,11 +2140,17 @@ static void tipc_sk_timeout(unsigned long data) peer_node = tsk_peer_node(tsk); if (tsk->probing_state == TIPC_CONN_PROBING) { - /* Previous probe not answered -> self abort */ - skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, - TIPC_CONN_MSG, SHORT_H_SIZE, 0, - own_node, peer_node, tsk->portid, - peer_port, TIPC_ERR_NO_PORT); + if (!sock_owned_by_user(sk)) { + sk->sk_socket->state = SS_DISCONNECTING; + tsk->connected = 0; + tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), + tsk_peer_port(tsk)); + sk->sk_state_change(sk); + } else { + /* Try again later */ + sk_reset_timer(sk, &sk->sk_timer, (HZ / 20)); + } + } else { skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, peer_node, own_node, diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b8c44076c776..f25e1675b865 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1984,6 +1984,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, unix_state_unlock(sk); timeo = freezable_schedule_timeout(timeo); unix_state_lock(sk); + + if (sock_flag(sk, SOCK_DEAD)) + break; + clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } @@ -2055,6 +2059,10 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state) struct sk_buff *skb, *last; unix_state_lock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + err = -ECONNRESET; + goto unlock; + } last = skb = skb_peek(&sk->sk_receive_queue); last_len = last ? last->len : 0; again: diff --git a/net/wireless/core.h b/net/wireless/core.h index 801cd49c5a0c..311eef26bf88 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -222,6 +222,7 @@ struct cfg80211_event { const u8 *ie; size_t ie_len; u16 reason; + bool locally_generated; } dc; struct { u8 bssid[ETH_ALEN]; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index d11454f87bac..8020b5b094d4 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -938,7 +938,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, } void cfg80211_disconnected(struct net_device *dev, u16 reason, - const u8 *ie, size_t ie_len, gfp_t gfp) + const u8 *ie, size_t ie_len, + bool locally_generated, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); @@ -954,6 +955,7 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason, ev->dc.ie_len = ie_len; memcpy((void *)ev->dc.ie, ie, ie_len); ev->dc.reason = reason; + ev->dc.locally_generated = locally_generated; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 9ee6bc1a7610..9cee0220665d 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -86,7 +86,7 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static void cfg80211_leave_all(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev; @@ -95,7 +95,7 @@ static void cfg80211_leave_all(struct cfg80211_registered_device *rdev) cfg80211_leave(rdev, wdev); } -static int wiphy_suspend(struct device *dev, pm_message_t state) +static int wiphy_suspend(struct device *dev) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); int ret = 0; @@ -136,6 +136,11 @@ static int wiphy_resume(struct device *dev) return ret; } + +static SIMPLE_DEV_PM_OPS(wiphy_pm_ops, wiphy_suspend, wiphy_resume); +#define WIPHY_PM_OPS (&wiphy_pm_ops) +#else +#define WIPHY_PM_OPS NULL #endif static const void *wiphy_namespace(struct device *d) @@ -151,10 +156,7 @@ struct class ieee80211_class = { .dev_release = wiphy_dev_release, .dev_groups = ieee80211_groups, .dev_uevent = wiphy_uevent, -#ifdef CONFIG_PM - .suspend = wiphy_suspend, - .resume = wiphy_resume, -#endif + .pm = WIPHY_PM_OPS, .ns_type = &net_ns_type_operations, .namespace = wiphy_namespace, }; diff --git a/net/wireless/util.c b/net/wireless/util.c index 70051ab52f4f..baf7218cec15 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -887,7 +887,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev) case EVENT_DISCONNECTED: __cfg80211_disconnected(wdev->netdev, ev->dc.ie, ev->dc.ie_len, - ev->dc.reason, true); + ev->dc.reason, + !ev->dc.locally_generated); break; case EVENT_IBSS_JOINED: __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid, @@ -944,7 +945,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, ntype == NL80211_IFTYPE_P2P_CLIENT)) return -EBUSY; - if (ntype != otype && netif_running(dev)) { + if (ntype != otype) { dev->ieee80211_ptr->use_4addr = false; dev->ieee80211_ptr->mesh_id_up_len = 0; wdev_lock(dev->ieee80211_ptr); diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index fff1bef6ed6d..fd682832a0e3 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -1333,6 +1333,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); wdev_unlock(wdev); + memset(&sinfo, 0, sizeof(sinfo)); + if (rdev_get_station(rdev, dev, bssid, &sinfo)) return NULL; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 526c4feb3b50..60ce7014e1b0 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include static struct kmem_cache *secpath_cachep __read_mostly; @@ -29,7 +31,7 @@ int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo) return -EAFNOSUPPORT; spin_lock_bh(&xfrm_input_afinfo_lock); if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL)) - err = -ENOBUFS; + err = -EEXIST; else rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo); spin_unlock_bh(&xfrm_input_afinfo_lock); @@ -186,6 +188,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) struct xfrm_state *x = NULL; xfrm_address_t *daddr; struct xfrm_mode *inner_mode; + u32 mark = skb->mark; unsigned int family; int decaps = 0; int async = 0; @@ -203,6 +206,18 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) XFRM_SPI_SKB_CB(skb)->daddroff); family = XFRM_SPI_SKB_CB(skb)->family; + /* if tunnel is present override skb->mark value with tunnel i_key */ + if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) { + switch (family) { + case AF_INET: + mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); + break; + case AF_INET6: + mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); + break; + } + } + /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; @@ -229,7 +244,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) goto drop; } - x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family); + x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); if (x == NULL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound(skb, family, spi, seq); @@ -239,13 +254,13 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); - if (unlikely(x->km.state == XFRM_STATE_ACQ)) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); - goto drop_unlock; - } if (unlikely(x->km.state != XFRM_STATE_VALID)) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID); + if (x->km.state == XFRM_STATE_ACQ) + XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); + else + XFRM_INC_STATS(net, + LINUX_MIB_XFRMINSTATEINVALID); goto drop_unlock; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 638af0655aaf..18cead7645be 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -315,14 +315,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) } EXPORT_SYMBOL(xfrm_policy_destroy); -static void xfrm_queue_purge(struct sk_buff_head *list) -{ - struct sk_buff *skb; - - while ((skb = skb_dequeue(list)) != NULL) - kfree_skb(skb); -} - /* Rule must be locked. Release descentant resources, announce * entry dead. The rule must be unlinked from lists to the moment. */ @@ -335,7 +327,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy) if (del_timer(&policy->polq.hold_timer)) xfrm_pol_put(policy); - xfrm_queue_purge(&policy->polq.hold_queue); + skb_queue_purge(&policy->polq.hold_queue); if (del_timer(&policy->timer)) xfrm_pol_put(policy); @@ -708,6 +700,9 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, struct xfrm_policy_queue *pq = &old->polq; struct sk_buff_head list; + if (skb_queue_empty(&pq->hold_queue)) + return; + __skb_queue_head_init(&list); spin_lock_bh(&pq->hold_queue.lock); @@ -716,9 +711,6 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, xfrm_pol_put(old); spin_unlock_bh(&pq->hold_queue.lock); - if (skb_queue_empty(&list)) - return; - pq = &new->polq; spin_lock_bh(&pq->hold_queue.lock); @@ -1012,7 +1004,9 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, if (list_empty(&walk->walk.all)) x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); else - x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all); + x = list_first_entry(&walk->walk.all, + struct xfrm_policy_walk_entry, all); + list_for_each_entry_from(x, &net->xfrm.policy_all, all) { if (x->dead) continue; @@ -1120,6 +1114,9 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, } chain = &net->xfrm.policy_inexact[dir]; hlist_for_each_entry(pol, chain, bydst) { + if ((pol->priority >= priority) && ret) + break; + err = xfrm_policy_match(pol, fl, type, family, dir); if (err) { if (err == -ESRCH) @@ -1128,13 +1125,13 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, ret = ERR_PTR(err); goto fail; } - } else if (pol->priority < priority) { + } else { ret = pol; break; } } - if (ret) - xfrm_pol_hold(ret); + + xfrm_pol_hold(ret); fail: read_unlock_bh(&net->xfrm.xfrm_policy_lock); @@ -1955,7 +1952,7 @@ static void xfrm_policy_queue_process(unsigned long arg) purge_queue: pq->timeout = 0; - xfrm_queue_purge(&pq->hold_queue); + skb_queue_purge(&pq->hold_queue); xfrm_pol_put(pol); } @@ -2814,7 +2811,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) return -EAFNOSUPPORT; spin_lock(&xfrm_policy_afinfo_lock); if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) - err = -ENOBUFS; + err = -EEXIST; else { struct dst_ops *dst_ops = afinfo->dst_ops; if (likely(dst_ops->kmem_cachep == NULL)) @@ -3209,16 +3206,17 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector * } chain = &net->xfrm.policy_inexact[dir]; hlist_for_each_entry(pol, chain, bydst) { + if ((pol->priority >= priority) && ret) + break; + if (xfrm_migrate_selector_match(sel, &pol->selector) && - pol->type == type && - pol->priority < priority) { + pol->type == type) { ret = pol; break; } } - if (ret) - xfrm_pol_hold(ret); + xfrm_pol_hold(ret); read_unlock_bh(&net->xfrm.xfrm_policy_lock); diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index dab57daae408..4fd725a0c500 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -99,6 +99,7 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; + XFRM_SKB_CB(skb)->seq.output.hi = 0; if (unlikely(x->replay.oseq == 0)) { x->replay.oseq--; xfrm_audit_state_replay_overflow(x, skb); @@ -177,6 +178,7 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; + XFRM_SKB_CB(skb)->seq.output.hi = 0; if (unlikely(replay_esn->oseq == 0)) { replay_esn->oseq--; xfrm_audit_state_replay_overflow(x, skb); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f5e39e35d73a..9895a8c56d8c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -927,8 +927,8 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, x->id.spi != spi) continue; - spin_unlock_bh(&net->xfrm.xfrm_state_lock); xfrm_state_hold(x); + spin_unlock_bh(&net->xfrm.xfrm_state_lock); return x; } spin_unlock_bh(&net->xfrm.xfrm_state_lock); @@ -1626,7 +1626,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, if (list_empty(&walk->all)) x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all); else - x = list_entry(&walk->all, struct xfrm_state_walk, all); + x = list_first_entry(&walk->all, struct xfrm_state_walk, all); list_for_each_entry_from(x, &net->xfrm.state_all, all) { if (x->state == XFRM_STATE_DEAD) continue; @@ -1908,7 +1908,7 @@ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) return -EAFNOSUPPORT; spin_lock_bh(&xfrm_state_afinfo_lock); if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) - err = -ENOBUFS; + err = -EEXIST; else rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo); spin_unlock_bh(&xfrm_state_afinfo_lock); diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c index 2625b987944f..41ae2fd21b13 100644 --- a/samples/bpf/sockex3_kern.c +++ b/samples/bpf/sockex3_kern.c @@ -89,7 +89,6 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off) struct globals { struct flow_keys flow; - __u32 nhoff; }; struct bpf_map_def SEC("maps") percpu_map = { @@ -139,7 +138,7 @@ static void update_stats(struct __sk_buff *skb, struct globals *g) static __always_inline void parse_ip_proto(struct __sk_buff *skb, struct globals *g, __u32 ip_proto) { - __u32 nhoff = g->nhoff; + __u32 nhoff = skb->cb[0]; int poff; switch (ip_proto) { @@ -165,7 +164,7 @@ static __always_inline void parse_ip_proto(struct __sk_buff *skb, if (gre_flags & GRE_SEQ) nhoff += 4; - g->nhoff = nhoff; + skb->cb[0] = nhoff; parse_eth_proto(skb, gre_proto); break; } @@ -195,7 +194,7 @@ PROG(PARSE_IP)(struct __sk_buff *skb) if (!g) return 0; - nhoff = g->nhoff; + nhoff = skb->cb[0]; if (unlikely(ip_is_fragment(skb, nhoff))) return 0; @@ -210,7 +209,7 @@ PROG(PARSE_IP)(struct __sk_buff *skb) verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/); nhoff += (verlen & 0xF) << 2; - g->nhoff = nhoff; + skb->cb[0] = nhoff; parse_ip_proto(skb, g, ip_proto); return 0; } @@ -223,7 +222,7 @@ PROG(PARSE_IPV6)(struct __sk_buff *skb) if (!g) return 0; - nhoff = g->nhoff; + nhoff = skb->cb[0]; ip_proto = load_byte(skb, nhoff + offsetof(struct ipv6hdr, nexthdr)); @@ -233,25 +232,21 @@ PROG(PARSE_IPV6)(struct __sk_buff *skb) nhoff + offsetof(struct ipv6hdr, daddr)); nhoff += sizeof(struct ipv6hdr); - g->nhoff = nhoff; + skb->cb[0] = nhoff; parse_ip_proto(skb, g, ip_proto); return 0; } PROG(PARSE_VLAN)(struct __sk_buff *skb) { - struct globals *g = this_cpu_globals(); __u32 nhoff, proto; - if (!g) - return 0; - - nhoff = g->nhoff; + nhoff = skb->cb[0]; proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, h_vlan_encapsulated_proto)); nhoff += sizeof(struct vlan_hdr); - g->nhoff = nhoff; + skb->cb[0] = nhoff; parse_eth_proto(skb, proto); @@ -260,17 +255,13 @@ PROG(PARSE_VLAN)(struct __sk_buff *skb) PROG(PARSE_MPLS)(struct __sk_buff *skb) { - struct globals *g = this_cpu_globals(); __u32 nhoff, label; - if (!g) - return 0; - - nhoff = g->nhoff; + nhoff = skb->cb[0]; label = load_word(skb, nhoff); nhoff += sizeof(struct mpls_label); - g->nhoff = nhoff; + skb->cb[0] = nhoff; if (label & MPLS_LS_S_MASK) { __u8 verlen = load_byte(skb, nhoff); @@ -288,14 +279,10 @@ PROG(PARSE_MPLS)(struct __sk_buff *skb) SEC("socket/0") int main_prog(struct __sk_buff *skb) { - struct globals *g = this_cpu_globals(); __u32 nhoff = ETH_HLEN; __u32 proto = load_half(skb, 12); - if (!g) - return 0; - - g->nhoff = nhoff; + skb->cb[0] = nhoff; parse_eth_proto(skb, proto); return 0; } diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c index 7c27710f8296..9bfb2eb34563 100644 --- a/samples/bpf/tcbpf1_kern.c +++ b/samples/bpf/tcbpf1_kern.c @@ -21,7 +21,7 @@ static inline void set_dst_mac(struct __sk_buff *skb, char *mac) static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) { - __u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF); + __u8 old_tos = load_byte(skb, TOS_OFF); bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2); bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0); @@ -34,7 +34,7 @@ static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) { - __u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF)); + __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF)); bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip)); bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip)); @@ -44,7 +44,7 @@ static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) #define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest)) static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) { - __u16 old_port = htons(load_half(skb, BPF_LL_OFF + TCP_DPORT_OFF)); + __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF)); bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port)); bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0); @@ -53,7 +53,7 @@ static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) SEC("classifier") int bpf_prog1(struct __sk_buff *skb) { - __u8 proto = load_byte(skb, BPF_LL_OFF + ETH_HLEN + offsetof(struct iphdr, protocol)); + __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); long *value; if (proto == IPPROTO_TCP) { diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index 12f3780af73f..693605997abc 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c @@ -29,6 +29,7 @@ struct bpf_test { ACCEPT, REJECT } result; + enum bpf_prog_type prog_type; }; static struct bpf_test tests[] = { @@ -743,6 +744,84 @@ static struct bpf_test tests[] = { .errstr = "different pointers", .result = REJECT, }, + { + "check skb->mark is not writeable by sockets", + .insns = { + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check skb->tc_index is not writeable by sockets", + .insns = { + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, tc_index)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check non-u32 access to cb", + .insns = { + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check out of range skb->cb access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[60])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_ACT, + }, + { + "write skb fields from socket prog", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_index)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "write skb fields from tc_cls_act prog", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_index)), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, tc_index)), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, }; static int probe_filter_length(struct bpf_insn *fp) @@ -775,6 +854,7 @@ static int test(void) for (i = 0; i < ARRAY_SIZE(tests); i++) { struct bpf_insn *prog = tests[i].insns; + int prog_type = tests[i].prog_type; int prog_len = probe_filter_length(prog); int *fixup = tests[i].fixup; int map_fd = -1; @@ -789,8 +869,8 @@ static int test(void) } printf("#%d %s ", i, tests[i].descr); - prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, - prog_len * sizeof(struct bpf_insn), + prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER, + prog, prog_len * sizeof(struct bpf_insn), "GPL", 0); if (tests[i].result == ACCEPT) { diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 89b1df4e72ab..c5ec977b9c37 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3169,12 +3169,12 @@ sub process { } # check for global initialisers. - if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) { + if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*(?:0|NULL|false)\s*;/) { if (ERROR("GLOBAL_INITIALISERS", "do not initialise globals to 0 or NULL\n" . $herecurr) && $fix) { - $fixed[$fixlinenr] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/; + $fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*(0|NULL|false)\s*;/$1;/; } } # check for static initialisers. diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py index a1504c4f1900..25db8cff44a2 100644 --- a/scripts/gdb/linux/modules.py +++ b/scripts/gdb/linux/modules.py @@ -73,18 +73,11 @@ def invoke(self, arg, from_tty): " " if utils.get_long_type().sizeof == 8 else "")) for module in module_list(): - ref = 0 - module_refptr = module['refptr'] - for cpu in cpus.cpu_list("cpu_possible_mask"): - refptr = cpus.per_cpu(module_refptr, cpu) - ref += refptr['incs'] - ref -= refptr['decs'] - gdb.write("{address} {name:<19} {size:>8} {ref}".format( address=str(module['module_core']).split()[0], name=module['name'].string(), size=str(module['core_size']), - ref=str(ref))) + ref=str(module['refcnt']['counter']))) source_list = module['source_list'] t = self._module_use_type.get_type().pointer() diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index 7371e0c3926f..1eabcdf69457 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c @@ -246,6 +246,9 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val) return hda_reg_read_stereo_amp(codec, reg, val); if (verb == AC_VERB_GET_PROC_COEF) return hda_reg_read_coef(codec, reg, val); + if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE) + reg &= ~AC_AMP_FAKE_MUTE; + err = snd_hdac_exec_verb(codec, reg, 0, val); if (err < 0) return err; @@ -265,6 +268,9 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val) unsigned int verb; int i, bytes, err; + if (codec->caps_overwriting) + return 0; + reg &= ~0x00080000U; /* drop GET bit */ reg |= (codec->addr << 28); verb = get_verb(reg); @@ -280,6 +286,8 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val) switch (verb & 0xf00) { case AC_VERB_SET_AMP_GAIN_MUTE: + if ((reg & AC_AMP_FAKE_MUTE) && (val & AC_AMP_MUTE)) + val = 0; verb = AC_VERB_SET_AMP_GAIN_MUTE; if (reg & AC_AMP_GET_LEFT) verb |= AC_AMP_SET_LEFT >> 8; diff --git a/sound/mips/Kconfig b/sound/mips/Kconfig index d2f615ab177a..2153d31fb663 100644 --- a/sound/mips/Kconfig +++ b/sound/mips/Kconfig @@ -12,12 +12,14 @@ if SND_MIPS config SND_SGI_O2 tristate "SGI O2 Audio" depends on SGI_IP32 + select SND_PCM help Sound support for the SGI O2 Workstation. config SND_SGI_HAL2 tristate "SGI HAL2 Audio" depends on SGI_HAS_HAL2 + select SND_PCM help Sound support for the SGI Indy and Indigo2 Workstation. diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index b49feff0a319..5645481af3d9 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -436,7 +436,7 @@ static unsigned int get_num_devices(struct hda_codec *codec, hda_nid_t nid) get_wcaps_type(wcaps) != AC_WID_PIN) return 0; - parm = snd_hda_param_read(codec, nid, AC_PAR_DEVLIST_LEN); + parm = snd_hdac_read_parm_uncached(&codec->core, nid, AC_PAR_DEVLIST_LEN); if (parm == -1 && codec->bus->rirb_error) parm = 0; return parm & AC_DEV_LIST_LEN_MASK; @@ -1375,6 +1375,31 @@ int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir, } EXPORT_SYMBOL_GPL(snd_hda_override_amp_caps); +/** + * snd_hda_codec_amp_update - update the AMP mono value + * @codec: HD-audio codec + * @nid: NID to read the AMP value + * @ch: channel to update (0 or 1) + * @dir: #HDA_INPUT or #HDA_OUTPUT + * @idx: the index value (only for input direction) + * @mask: bit mask to set + * @val: the bits value to set + * + * Update the AMP values for the given channel, direction and index. + */ +int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid, + int ch, int dir, int idx, int mask, int val) +{ + unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx); + + /* enable fake mute if no h/w mute but min=mute */ + if ((query_amp_caps(codec, nid, dir) & + (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) == AC_AMPCAP_MIN_MUTE) + cmd |= AC_AMP_FAKE_MUTE; + return snd_hdac_regmap_update_raw(&codec->core, cmd, mask, val); +} +EXPORT_SYMBOL_GPL(snd_hda_codec_amp_update); + /** * snd_hda_codec_amp_stereo - update the AMP stereo values * @codec: HD-audio codec diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 1c8678775f40..ac0db1679f09 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -4926,9 +4926,12 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec, dig_only: parse_digital(codec); - if (spec->power_down_unused || codec->power_save_node) + if (spec->power_down_unused || codec->power_save_node) { if (!codec->power_filter) codec->power_filter = snd_hda_gen_path_power_filter; + if (!codec->patch_ops.stream_pm) + codec->patch_ops.stream_pm = snd_hda_gen_stream_pm; + } if (!spec->no_analog && spec->beep_nid) { err = snd_hda_attach_beep_device(codec, spec->beep_nid); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 34040d26c94f..b6db25b23dd3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -340,6 +340,11 @@ enum { #define use_vga_switcheroo(chip) 0 #endif +#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ + ((pci)->device == 0x0c0c) || \ + ((pci)->device == 0x0d0c) || \ + ((pci)->device == 0x160c)) + static char *driver_short_names[] = { [AZX_DRIVER_ICH] = "HDA Intel", [AZX_DRIVER_PCH] = "HDA Intel PCH", @@ -1854,8 +1859,17 @@ static int azx_probe_continue(struct azx *chip) if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { #ifdef CONFIG_SND_HDA_I915 err = hda_i915_init(hda); - if (err < 0) - goto out_free; + if (err < 0) { + /* if the controller is bound only with HDMI/DP + * (for HSW and BDW), we need to abort the probe; + * for other chips, still continue probing as other + * codecs can be on the same link. + */ + if (CONTROLLER_IN_GPU(pci)) + goto out_free; + else + goto skip_i915; + } err = hda_display_power(hda, true); if (err < 0) { dev_err(chip->card->dev, @@ -1865,6 +1879,9 @@ static int azx_probe_continue(struct azx *chip) #endif } +#ifdef CONFIG_SND_HDA_I915 + skip_i915: +#endif err = azx_first_init(chip); if (err < 0) goto out_free; @@ -2089,6 +2106,8 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0xaab0), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0xaac8), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, /* VIA VT8251/VT8237A */ { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index 3b567f42296b..bed66c314431 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h @@ -129,8 +129,8 @@ int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol, /* lowlevel accessor with caching; use carefully */ #define snd_hda_codec_amp_read(codec, nid, ch, dir, idx) \ snd_hdac_regmap_get_amp(&(codec)->core, nid, ch, dir, idx) -#define snd_hda_codec_amp_update(codec, nid, ch, dir, idx, mask, val) \ - snd_hdac_regmap_update_amp(&(codec)->core, nid, ch, dir, idx, mask, val) +int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid, + int ch, int dir, int idx, int mask, int val); int snd_hda_codec_amp_stereo(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, int mask, int val); int snd_hda_codec_amp_init(struct hda_codec *codec, hda_nid_t nid, int ch, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 31f8f13be907..6d010452c1f5 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -884,6 +884,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = { { 0x10ec0275, 0x1028, 0, "ALC3260" }, { 0x10ec0899, 0x1028, 0, "ALC3861" }, { 0x10ec0298, 0x1028, 0, "ALC3266" }, + { 0x10ec0256, 0x1028, 0, "ALC3246" }, { 0x10ec0670, 0x1025, 0, "ALC669X" }, { 0x10ec0676, 0x1025, 0, "ALC679X" }, { 0x10ec0282, 0x1043, 0, "ALC3229" }, @@ -2167,6 +2168,7 @@ static const struct hda_fixup alc882_fixups[] = { static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), + SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD), @@ -4227,6 +4229,11 @@ static void alc_fixup_headset_mode_alc662(struct hda_codec *codec, if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; spec->gen.hp_mic = 1; /* Mic-in is same pin as headphone */ + + /* Disable boost for mic-in permanently. (This code is only called + from quirks that guarantee that the headphone is at NID 0x1b.) */ + snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000); + snd_hda_override_wcaps(codec, 0x1b, get_wcaps(codec, 0x1b) & ~AC_WCAP_IN_AMP); } else alc_fixup_headset_mode(codec, fix, action); } @@ -4508,6 +4515,8 @@ enum { ALC288_FIXUP_DELL_HEADSET_MODE, ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, ALC288_FIXUP_DELL_XPS_13_GPIO6, + ALC292_FIXUP_DELL_E7X, + ALC292_FIXUP_DISABLE_AAMIX, }; static const struct hda_fixup alc269_fixups[] = { @@ -5030,6 +5039,16 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE }, + [ALC292_FIXUP_DISABLE_AAMIX] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_disable_aamix, + }, + [ALC292_FIXUP_DELL_E7X] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_dell_xps13, + .chained = true, + .chain_id = ALC292_FIXUP_DISABLE_AAMIX + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -5042,6 +5061,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), + SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X), + SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -5051,6 +5072,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -5370,6 +5392,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x17, 0x40000000}, {0x1d, 0x40700001}, {0x21, 0x02211040}), + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC255_STANDARD_PINS, + {0x12, 0x90a60160}, + {0x14, 0x90170120}, + {0x17, 0x40000000}, + {0x1d, 0x40700001}, + {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ALC256_STANDARD_PINS, {0x13, 0x40000000}), @@ -5623,8 +5652,7 @@ static int patch_alc269(struct hda_codec *codec) spec = codec->spec; spec->gen.shared_mic_vref_pin = 0x18; - if (codec->core.vendor_id != 0x10ec0292) - codec->power_save_node = 1; + codec->power_save_node = 1; snd_hda_pick_fixup(codec, alc269_fixup_models, alc269_fixup_tbl, alc269_fixups); diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 43c99ce4a520..6c66d7e16439 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -100,6 +100,7 @@ enum { STAC_HP_ENVY_BASS, STAC_HP_BNB13_EQ, STAC_HP_ENVY_TS_BASS, + STAC_HP_ENVY_TS_DAC_BIND, STAC_92HD83XXX_GPIO10_EAPD, STAC_92HD83XXX_MODELS }; @@ -2171,6 +2172,22 @@ static void stac92hd83xxx_fixup_gpio10_eapd(struct hda_codec *codec, spec->eapd_switch = 0; } +static void hp_envy_ts_fixup_dac_bind(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + struct sigmatel_spec *spec = codec->spec; + static hda_nid_t preferred_pairs[] = { + 0xd, 0x13, + 0 + }; + + if (action != HDA_FIXUP_ACT_PRE_PROBE) + return; + + spec->gen.preferred_dacs = preferred_pairs; +} + static const struct hda_verb hp_bnb13_eq_verbs[] = { /* 44.1KHz base */ { 0x22, 0x7A6, 0x3E }, @@ -2686,6 +2703,12 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = { {} }, }, + [STAC_HP_ENVY_TS_DAC_BIND] = { + .type = HDA_FIXUP_FUNC, + .v.func = hp_envy_ts_fixup_dac_bind, + .chained = true, + .chain_id = STAC_HP_ENVY_TS_BASS, + }, [STAC_92HD83XXX_GPIO10_EAPD] = { .type = HDA_FIXUP_FUNC, .v.func = stac92hd83xxx_fixup_gpio10_eapd, @@ -2764,6 +2787,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = { "HP bNB13", STAC_HP_BNB13_EQ), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x190e, "HP ENVY TS", STAC_HP_ENVY_TS_BASS), + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1967, + "HP ENVY TS", STAC_HP_ENVY_TS_DAC_BIND), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1940, "HP bNB13", STAC_HP_BNB13_EQ), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1941, @@ -4403,7 +4428,6 @@ static const struct hda_codec_ops stac_patch_ops = { #ifdef CONFIG_PM .suspend = stac_suspend, #endif - .stream_pm = snd_hda_gen_stream_pm, .reboot_notify = stac_shutup, }; @@ -4697,7 +4721,8 @@ static int patch_stac92hd71bxx(struct hda_codec *codec) return err; spec = codec->spec; - codec->power_save_node = 1; + /* disabled power_save_node since it causes noises on a Dell machine */ + /* codec->power_save_node = 1; */ spec->linear_tone_beep = 0; spec->gen.own_eapd_ctl = 1; spec->gen.power_down_unused = 1; diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 31a95cca015d..bab6c04932aa 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -449,6 +449,15 @@ static int via_suspend(struct hda_codec *codec) return 0; } + +static int via_resume(struct hda_codec *codec) +{ + /* some delay here to make jack detection working (bko#98921) */ + msleep(10); + codec->patch_ops.init(codec); + regcache_sync(codec->core.regmap); + return 0; +} #endif #ifdef CONFIG_PM @@ -475,6 +484,7 @@ static const struct hda_codec_ops via_patch_ops = { .stream_pm = snd_hda_gen_stream_pm, #ifdef CONFIG_PM .suspend = via_suspend, + .resume = via_resume, .check_power_status = via_check_power_status, #endif }; diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index d51703e30523..0a4ad5feb82e 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c @@ -72,7 +72,6 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec, if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { old_vmaster_hook = spec->vmaster_mute.hook; spec->vmaster_mute.hook = update_tpacpi_mute_led; - spec->vmaster_mute_enum = 1; removefunc = false; } if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 3e2ef61c627b..8b7e391dd0b8 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -918,6 +918,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ + case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */ case USB_ID(0x046d, 0x0991): /* Most audio usb devices lie about volume resolution. * Most Logitech webcams have res = 384. @@ -1582,12 +1583,6 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, unitid); return -EINVAL; } - /* no bmControls field (e.g. Maya44) -> ignore */ - if (desc->bLength <= 10 + input_pins) { - usb_audio_dbg(state->chip, "MU %d has no bmControls field\n", - unitid); - return 0; - } num_ins = 0; ich = 0; @@ -1595,6 +1590,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, err = parse_audio_unit(state, desc->baSourceID[pin]); if (err < 0) continue; + /* no bmControls field (e.g. Maya44) -> ignore */ + if (desc->bLength <= 10 + input_pins) + continue; err = check_input_term(state, desc->baSourceID[pin], &iterm); if (err < 0) return err; diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index b703cb3cda19..e5000da9e9d7 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -436,6 +436,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { .id = USB_ID(0x200c, 0x1018), .map = ebox44_map, }, + { + /* MAYA44 USB+ */ + .id = USB_ID(0x2573, 0x0008), + .map = maya44_map, + }, { /* KEF X300A */ .id = USB_ID(0x27ac, 0x1000), diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 46facfc9aec1..754e689596a2 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1118,7 +1118,9 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ + case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ + case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ return true; } return false; @@ -1265,8 +1267,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, if (fp->altsetting == 2) return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; - /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ - case USB_ID(0x20b1, 0x2009): + + case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ + case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */ if (fp->altsetting == 3) return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c index c5baf9c591b7..618c2bcd4eab 100644 --- a/tools/net/bpf_jit_disasm.c +++ b/tools/net/bpf_jit_disasm.c @@ -123,6 +123,8 @@ static int get_last_jit_image(char *haystack, size_t hlen, assert(ret == 0); ptr = haystack; + memset(pmatch, 0, sizeof(pmatch)); + while (1) { ret = regexec(®ex, ptr, 1, pmatch, 0); if (ret == 0) { diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bac98ca3d4ca..323b65edfc97 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -52,6 +52,7 @@ unsigned int skip_c0; unsigned int skip_c1; unsigned int do_nhm_cstates; unsigned int do_snb_cstates; +unsigned int do_knl_cstates; unsigned int do_pc2; unsigned int do_pc3; unsigned int do_pc6; @@ -91,6 +92,7 @@ unsigned int do_gfx_perf_limit_reasons; unsigned int do_ring_perf_limit_reasons; unsigned int crystal_hz; unsigned long long tsc_hz; +int base_cpu; #define RAPL_PKG (1 << 0) /* 0x610 MSR_PKG_POWER_LIMIT */ @@ -316,7 +318,7 @@ void print_header(void) if (do_nhm_cstates) outp += sprintf(outp, " CPU%%c1"); - if (do_nhm_cstates && !do_slm_cstates) + if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) outp += sprintf(outp, " CPU%%c3"); if (do_nhm_cstates) outp += sprintf(outp, " CPU%%c6"); @@ -546,7 +548,7 @@ int format_counters(struct thread_data *t, struct core_data *c, if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; - if (do_nhm_cstates && !do_slm_cstates) + if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc); if (do_nhm_cstates) outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc); @@ -1018,14 +1020,17 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; - if (do_nhm_cstates && !do_slm_cstates) { + if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; } - if (do_nhm_cstates) { + if (do_nhm_cstates && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; + } else if (do_knl_cstates) { + if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) + return -7; } if (do_snb_cstates) @@ -1150,7 +1155,7 @@ dump_nhm_platform_info(void) unsigned long long msr; unsigned int ratio; - get_msr(0, MSR_NHM_PLATFORM_INFO, &msr); + get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr); @@ -1162,7 +1167,7 @@ dump_nhm_platform_info(void) fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n", ratio, bclk, ratio * bclk); - get_msr(0, MSR_IA32_POWER_CTL, &msr); + get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", msr, msr & 0x2 ? "EN" : "DIS"); @@ -1175,7 +1180,7 @@ dump_hsw_turbo_ratio_limits(void) unsigned long long msr; unsigned int ratio; - get_msr(0, MSR_TURBO_RATIO_LIMIT2, &msr); + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr); fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr); @@ -1197,7 +1202,7 @@ dump_ivt_turbo_ratio_limits(void) unsigned long long msr; unsigned int ratio; - get_msr(0, MSR_TURBO_RATIO_LIMIT1, &msr); + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr); fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr); @@ -1249,7 +1254,7 @@ dump_nhm_turbo_ratio_limits(void) unsigned long long msr; unsigned int ratio; - get_msr(0, MSR_TURBO_RATIO_LIMIT, &msr); + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); @@ -1295,12 +1300,73 @@ dump_nhm_turbo_ratio_limits(void) return; } +static void +dump_knl_turbo_ratio_limits(void) +{ + int cores; + unsigned int ratio; + unsigned long long msr; + int delta_cores; + int delta_ratio; + int i; + + get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr); + + fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", + msr); + + /** + * Turbo encoding in KNL is as follows: + * [7:0] -- Base value of number of active cores of bucket 1. + * [15:8] -- Base value of freq ratio of bucket 1. + * [20:16] -- +ve delta of number of active cores of bucket 2. + * i.e. active cores of bucket 2 = + * active cores of bucket 1 + delta + * [23:21] -- Negative delta of freq ratio of bucket 2. + * i.e. freq ratio of bucket 2 = + * freq ratio of bucket 1 - delta + * [28:24]-- +ve delta of number of active cores of bucket 3. + * [31:29]-- -ve delta of freq ratio of bucket 3. + * [36:32]-- +ve delta of number of active cores of bucket 4. + * [39:37]-- -ve delta of freq ratio of bucket 4. + * [44:40]-- +ve delta of number of active cores of bucket 5. + * [47:45]-- -ve delta of freq ratio of bucket 5. + * [52:48]-- +ve delta of number of active cores of bucket 6. + * [55:53]-- -ve delta of freq ratio of bucket 6. + * [60:56]-- +ve delta of number of active cores of bucket 7. + * [63:61]-- -ve delta of freq ratio of bucket 7. + */ + cores = msr & 0xFF; + ratio = (msr >> 8) && 0xFF; + if (ratio > 0) + fprintf(stderr, + "%d * %.0f = %.0f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, cores); + + for (i = 16; i < 64; i = i + 8) { + delta_cores = (msr >> i) & 0x1F; + delta_ratio = (msr >> (i + 5)) && 0x7; + if (!delta_cores || !delta_ratio) + return; + cores = cores + delta_cores; + ratio = ratio - delta_ratio; + + /** -ve ratios will make successive ratio calculations + * negative. Hence return instead of carrying on. + */ + if (ratio > 0) + fprintf(stderr, + "%d * %.0f = %.0f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, cores); + } +} + static void dump_nhm_cst_cfg(void) { unsigned long long msr; - get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); + get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) @@ -1381,12 +1447,41 @@ int parse_int_file(const char *fmt, ...) } /* - * cpu_is_first_sibling_in_core(cpu) - * return 1 if given CPU is 1st HT sibling in the core + * get_cpu_position_in_core(cpu) + * return the position of the CPU among its HT siblings in the core + * return -1 if the sibling is not in list */ -int cpu_is_first_sibling_in_core(int cpu) +int get_cpu_position_in_core(int cpu) { - return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); + char path[64]; + FILE *filep; + int this_cpu; + char character; + int i; + + sprintf(path, + "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", + cpu); + filep = fopen(path, "r"); + if (filep == NULL) { + perror(path); + exit(1); + } + + for (i = 0; i < topo.num_threads_per_core; i++) { + fscanf(filep, "%d", &this_cpu); + if (this_cpu == cpu) { + fclose(filep); + return i; + } + + /* Account for no separator after last thread*/ + if (i != (topo.num_threads_per_core - 1)) + fscanf(filep, "%c", &character); + } + + fclose(filep); + return -1; } /* @@ -1412,25 +1507,31 @@ int get_num_ht_siblings(int cpu) { char path[80]; FILE *filep; - int sib1, sib2; - int matches; + int sib1; + int matches = 0; char character; + char str[100]; + char *ch; sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); filep = fopen_or_die(path, "r"); + /* * file format: - * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4) - * otherwinse 1 sibling (self). + * A ',' separated or '-' separated set of numbers + * (eg 1-2 or 1,3,4,5) */ - matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2); + fscanf(filep, "%d%c\n", &sib1, &character); + fseek(filep, 0, SEEK_SET); + fgets(str, 100, filep); + ch = strchr(str, character); + while (ch != NULL) { + matches++; + ch = strchr(ch+1, character); + } fclose(filep); - - if (matches == 3) - return 2; - else - return 1; + return matches+1; } /* @@ -1594,8 +1695,10 @@ void turbostat_loop() void check_dev_msr() { struct stat sb; + char pathname[32]; - if (stat("/dev/cpu/0/msr", &sb)) + sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); + if (stat(pathname, &sb)) if (system("/sbin/modprobe msr > /dev/null 2>&1")) err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); } @@ -1608,6 +1711,7 @@ void check_permissions() cap_user_data_t cap_data = &cap_data_data; extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); int do_exit = 0; + char pathname[32]; /* check for CAP_SYS_RAWIO */ cap_header->pid = getpid(); @@ -1622,7 +1726,8 @@ void check_permissions() } /* test file permissions */ - if (euidaccess("/dev/cpu/0/msr", R_OK)) { + sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); + if (euidaccess(pathname, R_OK)) { do_exit++; warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); } @@ -1704,7 +1809,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) default: return 0; } - get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); + get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; @@ -1753,6 +1858,21 @@ int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model) } } +int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + + if (family != 6) + return 0; + + switch (model) { + case 0x57: /* Knights Landing */ + return 1; + default: + return 0; + } +} static void dump_cstate_pstate_config_info(family, model) { @@ -1770,6 +1890,9 @@ dump_cstate_pstate_config_info(family, model) if (has_nhm_turbo_ratio_limit(family, model)) dump_nhm_turbo_ratio_limits(); + if (has_knl_turbo_ratio_limit(family, model)) + dump_knl_turbo_ratio_limits(); + dump_nhm_cst_cfg(); } @@ -1801,7 +1924,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) return 0; - switch (msr & 0x7) { + switch (msr & 0xF) { case ENERGY_PERF_BIAS_PERFORMANCE: epb_string = "performance"; break; @@ -1925,7 +2048,7 @@ double get_tdp(model) unsigned long long msr; if (do_rapl & RAPL_PKG_POWER_INFO) - if (!get_msr(0, MSR_PKG_POWER_INFO, &msr)) + if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; switch (model) { @@ -1950,6 +2073,7 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units) case 0x3F: /* HSX */ case 0x4F: /* BDX */ case 0x56: /* BDX-DE */ + case 0x57: /* KNL */ return (rapl_dram_energy_units = 15.3 / 1000000); default: return (rapl_energy_units); @@ -1991,6 +2115,7 @@ void rapl_probe(unsigned int family, unsigned int model) case 0x3F: /* HSX */ case 0x4F: /* BDX */ case 0x56: /* BDX-DE */ + case 0x57: /* KNL */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; break; case 0x2D: @@ -2006,7 +2131,7 @@ void rapl_probe(unsigned int family, unsigned int model) } /* units on package 0, verify later other packages match */ - if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr)) + if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) return; rapl_power_units = 1.0 / (1 << (msr & 0xF)); @@ -2331,6 +2456,17 @@ int is_slm(unsigned int family, unsigned int model) return 0; } +int is_knl(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + switch (model) { + case 0x57: /* KNL */ + return 1; + } + return 0; +} + #define SLM_BCLK_FREQS 5 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; @@ -2340,7 +2476,7 @@ double slm_bclk(void) unsigned int i; double freq; - if (get_msr(0, MSR_FSB_FREQ, &msr)) + if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) fprintf(stderr, "SLM BCLK: unknown\n"); i = msr & 0xf; @@ -2408,7 +2544,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk if (!do_nhm_platform_info) goto guess; - if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr)) + if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) goto guess; target_c_local = (msr >> 16) & 0xFF; @@ -2541,6 +2677,7 @@ void process_cpuid() do_c8_c9_c10 = has_hsw_msrs(family, model); do_skl_residency = has_skl_msrs(family, model); do_slm_cstates = is_slm(family, model); + do_knl_cstates = is_knl(family, model); bclk = discover_bclk(family, model); rapl_probe(family, model); @@ -2755,13 +2892,9 @@ int initialize_counters(int cpu_id) my_package_id = get_physical_package_id(cpu_id); my_core_id = get_core_id(cpu_id); - - if (cpu_is_first_sibling_in_core(cpu_id)) { - my_thread_id = 0; + my_thread_id = get_cpu_position_in_core(cpu_id); + if (!my_thread_id) topo.num_cores++; - } else { - my_thread_id = 1; - } init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); @@ -2785,13 +2918,24 @@ void setup_all_buffers(void) for_all_proc_cpus(initialize_counters); } +void set_base_cpu(void) +{ + base_cpu = sched_getcpu(); + if (base_cpu < 0) + err(-ENODEV, "No valid cpus found"); + + if (debug > 1) + fprintf(stderr, "base_cpu = %d\n", base_cpu); +} + void turbostat_init() { + setup_all_buffers(); + set_base_cpu(); check_dev_msr(); check_permissions(); process_cpuid(); - setup_all_buffers(); if (debug) for_all_cpus(print_epb, ODD_COUNTERS); @@ -2870,7 +3014,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(stderr, "turbostat version 4.5 2 Apr, 2015" + fprintf(stderr, "turbostat version 4.7 27-May, 2015" " - Len Brown \n"); } diff --git a/tools/testing/libos/Makefile b/tools/testing/libos/Makefile index 3da25429382b..a27eb84e7712 100644 --- a/tools/testing/libos/Makefile +++ b/tools/testing/libos/Makefile @@ -19,7 +19,7 @@ testbin: bake check_pkgs cd buildtop ; \ ../bake/bake.py download ; \ ../bake/bake.py update ; \ - ../bake/bake.py build + ../bake/bake.py build $(BAKEBUILD_PARAMS) test: @./dce-test.sh ADD_PARAM=$(ADD_PARAM) diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 5bdb781163d1..9b0d8baf2934 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -5,8 +5,10 @@ include ../lib.mk .PHONY: all all_32 all_64 warn_32bit_failure clean TARGETS_C_BOTHBITS := sigreturn single_step_syscall +TARGETS_C_32BIT_ONLY := entry_from_vm86 -BINARIES_32 := $(TARGETS_C_BOTHBITS:%=%_32) +TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) +BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32) BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64) CFLAGS := -O2 -g -std=gnu99 -pthread -Wall @@ -32,7 +34,7 @@ all_64: $(BINARIES_64) clean: $(RM) $(BINARIES_32) $(BINARIES_64) -$(TARGETS_C_BOTHBITS:%=%_32): %_32: %.c +$(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl $(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c new file mode 100644 index 000000000000..5c38a187677b --- /dev/null +++ b/tools/testing/selftests/x86/entry_from_vm86.c @@ -0,0 +1,114 @@ +/* + * entry_from_vm86.c - tests kernel entries from vm86 mode + * Copyright (c) 2014-2015 Andrew Lutomirski + * + * This exercises a few paths that need to special-case vm86 mode. + * + * GPL v2. + */ + +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned long load_addr = 0x10000; +static int nerrs = 0; + +asm ( + ".pushsection .rodata\n\t" + ".type vmcode_bound, @object\n\t" + "vmcode:\n\t" + "vmcode_bound:\n\t" + ".code16\n\t" + "bound %ax, (2048)\n\t" + "int3\n\t" + "vmcode_sysenter:\n\t" + "sysenter\n\t" + ".size vmcode, . - vmcode\n\t" + "end_vmcode:\n\t" + ".code32\n\t" + ".popsection" + ); + +extern unsigned char vmcode[], end_vmcode[]; +extern unsigned char vmcode_bound[], vmcode_sysenter[]; + +static void do_test(struct vm86plus_struct *v86, unsigned long eip, + const char *text) +{ + long ret; + + printf("[RUN]\t%s from vm86 mode\n", text); + v86->regs.eip = eip; + ret = vm86(VM86_ENTER, v86); + + if (ret == -1 && errno == ENOSYS) { + printf("[SKIP]\tvm86 not supported\n"); + return; + } + + if (VM86_TYPE(ret) == VM86_INTx) { + char trapname[32]; + int trapno = VM86_ARG(ret); + if (trapno == 13) + strcpy(trapname, "GP"); + else if (trapno == 5) + strcpy(trapname, "BR"); + else if (trapno == 14) + strcpy(trapname, "PF"); + else + sprintf(trapname, "%d", trapno); + + printf("[OK]\tExited vm86 mode due to #%s\n", trapname); + } else if (VM86_TYPE(ret) == VM86_UNKNOWN) { + printf("[OK]\tExited vm86 mode due to unhandled GP fault\n"); + } else { + printf("[OK]\tExited vm86 mode due to type %ld, arg %ld\n", + VM86_TYPE(ret), VM86_ARG(ret)); + } +} + +int main(void) +{ + struct vm86plus_struct v86; + unsigned char *addr = mmap((void *)load_addr, 4096, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, -1,0); + if (addr != (unsigned char *)load_addr) + err(1, "mmap"); + + memcpy(addr, vmcode, end_vmcode - vmcode); + addr[2048] = 2; + addr[2050] = 3; + + memset(&v86, 0, sizeof(v86)); + + v86.regs.cs = load_addr / 16; + v86.regs.ss = load_addr / 16; + v86.regs.ds = load_addr / 16; + v86.regs.es = load_addr / 16; + + assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */ + + /* #BR -- should deliver SIG??? */ + do_test(&v86, vmcode_bound - vmcode, "#BR"); + + /* SYSENTER -- should cause #GP or #UD depending on CPU */ + do_test(&v86, vmcode_sysenter - vmcode, "SYSENTER"); + + return (nerrs == 0 ? 0 : 1); +}